Home | History | Annotate | Line # | Download | only in ieee1394
if_fwip.c revision 1.23
      1 /*	$NetBSD: if_fwip.c,v 1.23 2010/05/10 12:17:32 kiyohara Exp $	*/
      2 /*-
      3  * Copyright (c) 2004
      4  *	Doug Rabson
      5  * Copyright (c) 2002-2003
      6  * 	Hidetoshi Shimokawa. All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *
     19  *	This product includes software developed by Hidetoshi Shimokawa.
     20  *
     21  * 4. Neither the name of the author nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * $FreeBSD: src/sys/dev/firewire/if_fwip.c,v 1.18 2009/02/09 16:58:18 fjoe Exp $
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: if_fwip.c,v 1.23 2010/05/10 12:17:32 kiyohara Exp $");
     42 
     43 #include <sys/param.h>
     44 #include <sys/bus.h>
     45 #include <sys/device.h>
     46 #include <sys/errno.h>
     47 #include <sys/kmem.h>
     48 #include <sys/mbuf.h>
     49 #include <sys/mutex.h>
     50 #include <sys/sysctl.h>
     51 
     52 #include <net/bpf.h>
     53 #include <net/if.h>
     54 #include <net/if_ieee1394.h>
     55 #include <net/if_types.h>
     56 
     57 #include <dev/ieee1394/firewire.h>
     58 #include <dev/ieee1394/firewirereg.h>
     59 #include <dev/ieee1394/iec13213.h>
     60 #include <dev/ieee1394/if_fwipvar.h>
     61 
     62 /*
     63  * We really need a mechanism for allocating regions in the FIFO
     64  * address space. We pick a address in the OHCI controller's 'middle'
     65  * address space. This means that the controller will automatically
     66  * send responses for us, which is fine since we don't have any
     67  * important information to put in the response anyway.
     68  */
     69 #define INET_FIFO	0xfffe00000000LL
     70 
     71 #define FWIPDEBUG	if (fwipdebug) aprint_debug_ifnet
     72 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
     73 
     74 
     75 struct fw_hwaddr {
     76 	uint32_t		sender_unique_ID_hi;
     77 	uint32_t		sender_unique_ID_lo;
     78 	uint8_t			sender_max_rec;
     79 	uint8_t			sspd;
     80 	uint16_t		sender_unicast_FIFO_hi;
     81 	uint32_t		sender_unicast_FIFO_lo;
     82 };
     83 
     84 
     85 static int fwipmatch(device_t, cfdata_t, void *);
     86 static void fwipattach(device_t, device_t, void *);
     87 static int fwipdetach(device_t, int);
     88 static int fwipactivate(device_t, enum devact);
     89 
     90 /* network interface */
     91 static void fwip_start(struct ifnet *);
     92 static int fwip_ioctl(struct ifnet *, u_long, void *);
     93 static int fwip_init(struct ifnet *);
     94 static void fwip_stop(struct ifnet *, int);
     95 
     96 static void fwip_post_busreset(void *);
     97 static void fwip_output_callback(struct fw_xfer *);
     98 static void fwip_async_output(struct fwip_softc *, struct ifnet *);
     99 static void fwip_stream_input(struct fw_xferq *);
    100 static void fwip_unicast_input(struct fw_xfer *);
    101 
    102 static int fwipdebug = 0;
    103 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
    104 static int tx_speed = 2;
    105 static int rx_queue_len = FWMAXQUEUE;
    106 
    107 MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over IEEE1394 interface");
    108 /*
    109  * Setup sysctl(3) MIB, hw.fwip.*
    110  *
    111  * TBD condition CTLFLAG_PERMANENT on being a module or not
    112  */
    113 SYSCTL_SETUP(sysctl_fwip, "sysctl fwip(4) subtree setup")
    114 {
    115 	int rc, fwip_node_num;
    116 	const struct sysctlnode *node;
    117 
    118 	if ((rc = sysctl_createv(clog, 0, NULL, NULL,
    119 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
    120 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
    121 		goto err;
    122 	}
    123 
    124 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
    125 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "fwip",
    126 	    SYSCTL_DESCR("fwip controls"),
    127 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
    128 		goto err;
    129 	}
    130 	fwip_node_num = node->sysctl_num;
    131 
    132 	/* fwip RX queue length */
    133 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
    134 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
    135 	    "rx_queue_len", SYSCTL_DESCR("Length of the receive queue"),
    136 	    NULL, 0, &rx_queue_len,
    137 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
    138 		goto err;
    139 	}
    140 
    141 	/* fwip RX queue length */
    142 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
    143 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
    144 	    "if_fwip_debug", SYSCTL_DESCR("fwip driver debug flag"),
    145 	    NULL, 0, &fwipdebug,
    146 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
    147 		goto err;
    148 	}
    149 
    150 	return;
    151 
    152 err:
    153 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
    154 }
    155 
    156 
    157 CFATTACH_DECL_NEW(fwip, sizeof(struct fwip_softc),
    158     fwipmatch, fwipattach, fwipdetach, fwipactivate);
    159 
    160 
    161 static int
    162 fwipmatch(device_t parent, cfdata_t cf, void *aux)
    163 {
    164 	struct fw_attach_args *fwa = aux;
    165 
    166 	if (strcmp(fwa->name, "fwip") == 0)
    167 		return 1;
    168 	return 0;
    169 }
    170 
    171 static void
    172 fwipattach(device_t parent, device_t self, void *aux)
    173 {
    174 	struct fwip_softc *sc = device_private(self);
    175 	struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
    176 	struct fw_hwaddr *hwaddr;
    177 	struct ifnet *ifp;
    178 
    179 	aprint_naive("\n");
    180 	aprint_normal(": IP over IEEE1394\n");
    181 
    182 	sc->sc_fd.dev = self;
    183 	sc->sc_eth.fwip_ifp = &sc->sc_eth.fwcom.fc_if;
    184 	hwaddr = (struct fw_hwaddr *)&sc->sc_eth.fwcom.ic_hwaddr;
    185 
    186 	ifp = sc->sc_eth.fwip_ifp;
    187 
    188 	mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_NET);
    189 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
    190 
    191 	/* XXX */
    192 	sc->sc_dma_ch = -1;
    193 
    194 	sc->sc_fd.fc = fwa->fc;
    195 	if (tx_speed < 0)
    196 		tx_speed = sc->sc_fd.fc->speed;
    197 
    198 	sc->sc_fd.post_explore = NULL;
    199 	sc->sc_fd.post_busreset = fwip_post_busreset;
    200 	sc->sc_eth.fwip = sc;
    201 
    202 	/*
    203 	 * Encode our hardware the way that arp likes it.
    204 	 */
    205 	hwaddr->sender_unique_ID_hi = htonl(sc->sc_fd.fc->eui.hi);
    206 	hwaddr->sender_unique_ID_lo = htonl(sc->sc_fd.fc->eui.lo);
    207 	hwaddr->sender_max_rec = sc->sc_fd.fc->maxrec;
    208 	hwaddr->sspd = sc->sc_fd.fc->speed;
    209 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
    210 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
    211 
    212 	/* fill the rest and attach interface */
    213 	ifp->if_softc = &sc->sc_eth;
    214 
    215 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
    216 	ifp->if_start = fwip_start;
    217 	ifp->if_ioctl = fwip_ioctl;
    218 	ifp->if_init = fwip_init;
    219 	ifp->if_stop = fwip_stop;
    220 	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
    221 	IFQ_SET_READY(&ifp->if_snd);
    222 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_MAX_QUEUE);
    223 
    224 	if_attach(ifp);
    225 	ieee1394_ifattach(ifp, (const struct ieee1394_hwaddr *)hwaddr);
    226 
    227 	if (!pmf_device_register(self, NULL, NULL))
    228 		aprint_error_dev(self, "couldn't establish power handler\n");
    229 	else
    230 		pmf_class_network_register(self, ifp);
    231 
    232 	FWIPDEBUG(ifp, "interface created\n");
    233 	return;
    234 }
    235 
    236 static int
    237 fwipdetach(device_t self, int flags)
    238 {
    239 	struct fwip_softc *sc = device_private(self);
    240 	struct ifnet *ifp = sc->sc_eth.fwip_ifp;
    241 
    242 	fwip_stop(sc->sc_eth.fwip_ifp, 1);
    243 	ieee1394_ifdetach(ifp);
    244 	if_detach(ifp);
    245 	mutex_destroy(&sc->sc_mtx);
    246 	mutex_destroy(&sc->sc_fwb.fwb_mtx);
    247 	return 0;
    248 }
    249 
    250 static int
    251 fwipactivate(device_t self, enum devact act)
    252 {
    253 	struct fwip_softc *sc = device_private(self);
    254 
    255 	switch (act) {
    256 	case DVACT_DEACTIVATE:
    257 		if_deactivate(sc->sc_eth.fwip_ifp);
    258 		return 0;
    259 	default:
    260 		return EOPNOTSUPP;
    261 	}
    262 }
    263 
    264 static void
    265 fwip_start(struct ifnet *ifp)
    266 {
    267 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
    268 
    269 	FWIPDEBUG(ifp, "starting\n");
    270 
    271 	if (sc->sc_dma_ch < 0) {
    272 		struct mbuf *m = NULL;
    273 
    274 		FWIPDEBUG(ifp, "not ready\n");
    275 
    276 		do {
    277 			IF_DEQUEUE(&ifp->if_snd, m);
    278 			if (m != NULL)
    279 				m_freem(m);
    280 			ifp->if_oerrors++;
    281 		} while (m != NULL);
    282 
    283 		return;
    284 	}
    285 
    286 	ifp->if_flags |= IFF_OACTIVE;
    287 
    288 	if (ifp->if_snd.ifq_len != 0)
    289 		fwip_async_output(sc, ifp);
    290 
    291 	ifp->if_flags &= ~IFF_OACTIVE;
    292 }
    293 
    294 static int
    295 fwip_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    296 {
    297 	int s, error = 0;
    298 
    299 	s = splnet();
    300 
    301 	switch (cmd) {
    302 	case SIOCSIFFLAGS:
    303 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    304 			break;
    305 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
    306 		case IFF_RUNNING:
    307 			fwip_stop(ifp, 0);
    308 			break;
    309 		case IFF_UP:
    310 			fwip_init(ifp);
    311 			break;
    312 		default:
    313 			break;
    314 		}
    315 		break;
    316 
    317 	case SIOCADDMULTI:
    318 	case SIOCDELMULTI:
    319 		break;
    320 
    321 	default:
    322 		error = ieee1394_ioctl(ifp, cmd, data);
    323 		if (error == ENETRESET)
    324 			error = 0;
    325 		break;
    326 	}
    327 
    328 	splx(s);
    329 
    330 	return error;
    331 }
    332 
    333 static int
    334 fwip_init(struct ifnet *ifp)
    335 {
    336 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
    337 	struct firewire_comm *fc;
    338 	struct fw_xferq *xferq;
    339 	struct fw_xfer *xfer;
    340 	struct mbuf *m;
    341 	int i;
    342 
    343 	FWIPDEBUG(ifp, "initializing\n");
    344 
    345 	fc = sc->sc_fd.fc;
    346 	if (sc->sc_dma_ch < 0) {
    347 		const size_t size = sizeof(struct fw_bulkxfer) * rx_queue_len;
    348 
    349 		sc->sc_dma_ch = fw_open_isodma(fc, /* tx */0);
    350 		if (sc->sc_dma_ch < 0)
    351 			return ENXIO;
    352 		xferq = fc->ir[sc->sc_dma_ch];
    353 		xferq->flag |=
    354 		    FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM;
    355 		xferq->flag &= ~0xff;
    356 		xferq->flag |= broadcast_channel & 0xff;
    357 		/* register fwip_input handler */
    358 		xferq->sc = (void *) sc;
    359 		xferq->hand = fwip_stream_input;
    360 		xferq->bnchunk = rx_queue_len;
    361 		xferq->bnpacket = 1;
    362 		xferq->psize = MCLBYTES;
    363 		xferq->queued = 0;
    364 		xferq->buf = NULL;
    365 		xferq->bulkxfer = kmem_alloc(size, KM_SLEEP);
    366 		if (xferq->bulkxfer == NULL) {
    367 			aprint_error_ifnet(ifp, "if_fwip: kmem alloc failed\n");
    368 			return ENOMEM;
    369 		}
    370 		STAILQ_INIT(&xferq->stvalid);
    371 		STAILQ_INIT(&xferq->stfree);
    372 		STAILQ_INIT(&xferq->stdma);
    373 		xferq->stproc = NULL;
    374 		for (i = 0; i < xferq->bnchunk; i++) {
    375 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
    376 			xferq->bulkxfer[i].mbuf = m;
    377 			if (m != NULL) {
    378 				m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
    379 				STAILQ_INSERT_TAIL(&xferq->stfree,
    380 						&xferq->bulkxfer[i], link);
    381 			} else
    382 				aprint_error_ifnet(ifp,
    383 				    "fwip_as_input: m_getcl failed\n");
    384 		}
    385 
    386 		sc->sc_fwb.start = INET_FIFO;
    387 		sc->sc_fwb.end = INET_FIFO + 16384; /* S3200 packet size */
    388 
    389 		/* pre-allocate xfer */
    390 		STAILQ_INIT(&sc->sc_fwb.xferlist);
    391 		for (i = 0; i < rx_queue_len; i++) {
    392 			xfer = fw_xfer_alloc(M_FWIP);
    393 			if (xfer == NULL)
    394 				break;
    395 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
    396 			xfer->recv.payload = mtod(m, uint32_t *);
    397 			xfer->recv.pay_len = MCLBYTES;
    398 			xfer->hand = fwip_unicast_input;
    399 			xfer->fc = fc;
    400 			xfer->sc = (void *) sc;
    401 			xfer->mbuf = m;
    402 			STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
    403 		}
    404 		fw_bindadd(fc, &sc->sc_fwb);
    405 
    406 		STAILQ_INIT(&sc->sc_xferlist);
    407 		for (i = 0; i < TX_MAX_QUEUE; i++) {
    408 			xfer = fw_xfer_alloc(M_FWIP);
    409 			if (xfer == NULL)
    410 				break;
    411 			xfer->send.spd = tx_speed;
    412 			xfer->fc = sc->sc_fd.fc;
    413 			xfer->sc = (void *)sc;
    414 			xfer->hand = fwip_output_callback;
    415 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
    416 		}
    417 	} else
    418 		xferq = fc->ir[sc->sc_dma_ch];
    419 
    420 	sc->sc_last_dest.hi = 0;
    421 	sc->sc_last_dest.lo = 0;
    422 
    423 	/* start dma */
    424 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
    425 		fc->irx_enable(fc, sc->sc_dma_ch);
    426 
    427 	ifp->if_flags |= IFF_RUNNING;
    428 	ifp->if_flags &= ~IFF_OACTIVE;
    429 
    430 #if 0
    431 	/* attempt to start output */
    432 	fwip_start(ifp);
    433 #endif
    434 	return 0;
    435 }
    436 
    437 static void
    438 fwip_stop(struct ifnet *ifp, int disable)
    439 {
    440 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
    441 	struct firewire_comm *fc = sc->sc_fd.fc;
    442 	struct fw_xferq *xferq;
    443 	struct fw_xfer *xfer, *next;
    444 	int i;
    445 
    446 	if (sc->sc_dma_ch >= 0) {
    447 		xferq = fc->ir[sc->sc_dma_ch];
    448 
    449 		if (xferq->flag & FWXFERQ_RUNNING)
    450 			fc->irx_disable(fc, sc->sc_dma_ch);
    451 		xferq->flag &=
    452 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
    453 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
    454 		xferq->hand = NULL;
    455 
    456 		for (i = 0; i < xferq->bnchunk; i++)
    457 			m_freem(xferq->bulkxfer[i].mbuf);
    458 		kmem_free(xferq->bulkxfer,
    459 		    sizeof(struct fw_bulkxfer) * xferq->bnchunk);
    460 
    461 		fw_bindremove(fc, &sc->sc_fwb);
    462 		for (xfer = STAILQ_FIRST(&sc->sc_fwb.xferlist); xfer != NULL;
    463 		    xfer = next) {
    464 			next = STAILQ_NEXT(xfer, link);
    465 			fw_xfer_free(xfer);
    466 		}
    467 
    468 		for (xfer = STAILQ_FIRST(&sc->sc_xferlist); xfer != NULL;
    469 		    xfer = next) {
    470 			next = STAILQ_NEXT(xfer, link);
    471 			fw_xfer_free(xfer);
    472 		}
    473 
    474 		xferq->bulkxfer = NULL;
    475 		sc->sc_dma_ch = -1;
    476 	}
    477 
    478 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    479 }
    480 
    481 static void
    482 fwip_post_busreset(void *arg)
    483 {
    484 	struct fwip_softc *sc = arg;
    485 	struct crom_src *src;
    486 	struct crom_chunk *root;
    487 
    488 	src = sc->sc_fd.fc->crom_src;
    489 	root = sc->sc_fd.fc->crom_root;
    490 
    491 	/* RFC2734 IPv4 over IEEE1394 */
    492 	memset(&sc->sc_unit4, 0, sizeof(struct crom_chunk));
    493 	crom_add_chunk(src, root, &sc->sc_unit4, CROM_UDIR);
    494 	crom_add_entry(&sc->sc_unit4, CSRKEY_SPEC, CSRVAL_IETF);
    495 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_spec4, "IANA");
    496 	crom_add_entry(&sc->sc_unit4, CSRKEY_VER, 1);
    497 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_ver4, "IPv4");
    498 
    499 	/* RFC3146 IPv6 over IEEE1394 */
    500 	memset(&sc->sc_unit6, 0, sizeof(struct crom_chunk));
    501 	crom_add_chunk(src, root, &sc->sc_unit6, CROM_UDIR);
    502 	crom_add_entry(&sc->sc_unit6, CSRKEY_SPEC, CSRVAL_IETF);
    503 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_spec6, "IANA");
    504 	crom_add_entry(&sc->sc_unit6, CSRKEY_VER, 2);
    505 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_ver6, "IPv6");
    506 
    507 	sc->sc_last_dest.hi = 0;
    508 	sc->sc_last_dest.lo = 0;
    509 	ieee1394_drain(sc->sc_eth.fwip_ifp);
    510 }
    511 
    512 static void
    513 fwip_output_callback(struct fw_xfer *xfer)
    514 {
    515 	struct fwip_softc *sc = (struct fwip_softc *)xfer->sc;
    516 	struct ifnet *ifp;
    517 
    518 	ifp = sc->sc_eth.fwip_ifp;
    519 	/* XXX error check */
    520 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
    521 	if (xfer->resp != 0)
    522 		ifp->if_oerrors++;
    523 
    524 	m_freem(xfer->mbuf);
    525 	fw_xfer_unload(xfer);
    526 
    527 	mutex_enter(&sc->sc_mtx);
    528 	STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
    529 	mutex_exit(&sc->sc_mtx);
    530 
    531 	/* for queue full */
    532 	if (ifp->if_snd.ifq_head != NULL)
    533 		fwip_start(ifp);
    534 }
    535 
    536 /* Async. stream output */
    537 static void
    538 fwip_async_output(struct fwip_softc *sc, struct ifnet *ifp)
    539 {
    540 	struct firewire_comm *fc = sc->sc_fd.fc;
    541 	struct mbuf *m;
    542 	struct m_tag *mtag;
    543 	struct fw_hwaddr *destfw;
    544 	struct fw_xfer *xfer;
    545 	struct fw_xferq *xferq;
    546 	struct fw_pkt *fp;
    547 	uint16_t nodeid;
    548 	int error;
    549 	int i = 0;
    550 
    551 	xfer = NULL;
    552 	xferq = fc->atq;
    553 	while ((xferq->queued < xferq->maxq - 1) &&
    554 	    (ifp->if_snd.ifq_head != NULL)) {
    555 		mutex_enter(&sc->sc_mtx);
    556 		if (STAILQ_EMPTY(&sc->sc_xferlist)) {
    557 			mutex_exit(&sc->sc_mtx);
    558 #if 0
    559 			aprint_normal("if_fwip: lack of xfer\n");
    560 #endif
    561 			break;
    562 		}
    563 		IF_DEQUEUE(&ifp->if_snd, m);
    564 		if (m == NULL) {
    565 			mutex_exit(&sc->sc_mtx);
    566 			break;
    567 		}
    568 		xfer = STAILQ_FIRST(&sc->sc_xferlist);
    569 		STAILQ_REMOVE_HEAD(&sc->sc_xferlist, link);
    570 		mutex_exit(&sc->sc_mtx);
    571 
    572 		/*
    573 		 * Dig out the link-level address which
    574 		 * firewire_output got via arp or neighbour
    575 		 * discovery. If we don't have a link-level address,
    576 		 * just stick the thing on the broadcast channel.
    577 		 */
    578 		mtag = m_tag_find(m, MTAG_FIREWIRE_HWADDR, 0);
    579 		if (mtag == NULL)
    580 			destfw = 0;
    581 		else
    582 			destfw = (struct fw_hwaddr *) (mtag + 1);
    583 
    584 		/*
    585 		 * Put the mbuf in the xfer early in case we hit an
    586 		 * error case below - fwip_output_callback will free
    587 		 * the mbuf.
    588 		 */
    589 		xfer->mbuf = m;
    590 
    591 		/*
    592 		 * We use the arp result (if any) to add a suitable firewire
    593 		 * packet header before handing off to the bus.
    594 		 */
    595 		fp = &xfer->send.hdr;
    596 		nodeid = FWLOCALBUS | fc->nodeid;
    597 		if ((m->m_flags & M_BCAST) || !destfw) {
    598 			/*
    599 			 * Broadcast packets are sent as GASP packets with
    600 			 * specifier ID 0x00005e, version 1 on the broadcast
    601 			 * channel. To be conservative, we send at the
    602 			 * slowest possible speed.
    603 			 */
    604 			uint32_t *p;
    605 
    606 			M_PREPEND(m, 2 * sizeof(uint32_t), M_DONTWAIT);
    607 			p = mtod(m, uint32_t *);
    608 			fp->mode.stream.len = m->m_pkthdr.len;
    609 			fp->mode.stream.chtag = broadcast_channel;
    610 			fp->mode.stream.tcode = FWTCODE_STREAM;
    611 			fp->mode.stream.sy = 0;
    612 			xfer->send.spd = 0;
    613 			p[0] = htonl(nodeid << 16);
    614 			p[1] = htonl((0x5e << 24) | 1);
    615 		} else {
    616 			/*
    617 			 * Unicast packets are sent as block writes to the
    618 			 * target's unicast fifo address. If we can't
    619 			 * find the node address, we just give up. We
    620 			 * could broadcast it but that might overflow
    621 			 * the packet size limitations due to the
    622 			 * extra GASP header. Note: the hardware
    623 			 * address is stored in network byte order to
    624 			 * make life easier for ARP.
    625 			 */
    626 			struct fw_device *fd;
    627 			struct fw_eui64 eui;
    628 
    629 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
    630 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
    631 			if (sc->sc_last_dest.hi != eui.hi ||
    632 			    sc->sc_last_dest.lo != eui.lo) {
    633 				fd = fw_noderesolve_eui64(fc, &eui);
    634 				if (!fd) {
    635 					/* error */
    636 					ifp->if_oerrors++;
    637 					/* XXX set error code */
    638 					fwip_output_callback(xfer);
    639 					continue;
    640 
    641 				}
    642 				sc->sc_last_hdr.mode.wreqb.dst =
    643 				    FWLOCALBUS | fd->dst;
    644 				sc->sc_last_hdr.mode.wreqb.tlrt = 0;
    645 				sc->sc_last_hdr.mode.wreqb.tcode =
    646 				    FWTCODE_WREQB;
    647 				sc->sc_last_hdr.mode.wreqb.pri = 0;
    648 				sc->sc_last_hdr.mode.wreqb.src = nodeid;
    649 				sc->sc_last_hdr.mode.wreqb.dest_hi =
    650 					ntohs(destfw->sender_unicast_FIFO_hi);
    651 				sc->sc_last_hdr.mode.wreqb.dest_lo =
    652 					ntohl(destfw->sender_unicast_FIFO_lo);
    653 				sc->sc_last_hdr.mode.wreqb.extcode = 0;
    654 				sc->sc_last_dest = eui;
    655 			}
    656 
    657 			fp->mode.wreqb = sc->sc_last_hdr.mode.wreqb;
    658 			fp->mode.wreqb.len = m->m_pkthdr.len;
    659 			xfer->send.spd = min(destfw->sspd, fc->speed);
    660 		}
    661 
    662 		xfer->send.pay_len = m->m_pkthdr.len;
    663 
    664 		error = fw_asyreq(fc, -1, xfer);
    665 		if (error == EAGAIN) {
    666 			/*
    667 			 * We ran out of tlabels - requeue the packet
    668 			 * for later transmission.
    669 			 */
    670 			xfer->mbuf = 0;
    671 			mutex_enter(&sc->sc_mtx);
    672 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
    673 			mutex_exit(&sc->sc_mtx);
    674 			IF_PREPEND(&ifp->if_snd, m);
    675 			break;
    676 		}
    677 		if (error) {
    678 			/* error */
    679 			ifp->if_oerrors++;
    680 			/* XXX set error code */
    681 			fwip_output_callback(xfer);
    682 			continue;
    683 		} else {
    684 			ifp->if_opackets++;
    685 			i++;
    686 		}
    687 	}
    688 #if 0
    689 	if (i > 1)
    690 		aprint_normal("%d queued\n", i);
    691 #endif
    692 	if (i > 0)
    693 		xferq->start(fc);
    694 }
    695 
    696 /* Async. stream output */
    697 static void
    698 fwip_stream_input(struct fw_xferq *xferq)
    699 {
    700 	struct mbuf *m, *m0;
    701 	struct m_tag *mtag;
    702 	struct ifnet *ifp;
    703 	struct fwip_softc *sc;
    704 	struct fw_bulkxfer *sxfer;
    705 	struct fw_pkt *fp;
    706 	uint16_t src;
    707 	uint32_t *p;
    708 
    709 	sc = (struct fwip_softc *)xferq->sc;
    710 	ifp = sc->sc_eth.fwip_ifp;
    711 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
    712 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
    713 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
    714 		if (sc->sc_fd.fc->irx_post != NULL)
    715 			sc->sc_fd.fc->irx_post(sc->sc_fd.fc, fp->mode.ld);
    716 		m = sxfer->mbuf;
    717 
    718 		/* insert new rbuf */
    719 		sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
    720 		if (m0 != NULL) {
    721 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
    722 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
    723 		} else
    724 			aprint_error_ifnet(ifp,
    725 			    "fwip_as_input: m_getcl failed\n");
    726 
    727 		/*
    728 		 * We must have a GASP header - leave the
    729 		 * encapsulation sanity checks to the generic
    730 		 * code. Remeber that we also have the firewire async
    731 		 * stream header even though that isn't accounted for
    732 		 * in mode.stream.len.
    733 		 */
    734 		if (sxfer->resp != 0 ||
    735 		    fp->mode.stream.len < 2 * sizeof(uint32_t)) {
    736 			m_freem(m);
    737 			ifp->if_ierrors++;
    738 			continue;
    739 		}
    740 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
    741 			+ sizeof(fp->mode.stream);
    742 
    743 		/*
    744 		 * If we received the packet on the broadcast channel,
    745 		 * mark it as broadcast, otherwise we assume it must
    746 		 * be multicast.
    747 		 */
    748 		if (fp->mode.stream.chtag == broadcast_channel)
    749 			m->m_flags |= M_BCAST;
    750 		else
    751 			m->m_flags |= M_MCAST;
    752 
    753 		/*
    754 		 * Make sure we recognise the GASP specifier and
    755 		 * version.
    756 		 */
    757 		p = mtod(m, uint32_t *);
    758 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) !=
    759 								0x00005e ||
    760 		    (ntohl(p[2]) & 0xffffff) != 1) {
    761 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
    762 			    ntohl(p[1]), ntohl(p[2]));
    763 			m_freem(m);
    764 			ifp->if_ierrors++;
    765 			continue;
    766 		}
    767 
    768 		/*
    769 		 * Record the sender ID for possible BPF usage.
    770 		 */
    771 		src = ntohl(p[1]) >> 16;
    772 		if (ifp->if_bpf) {
    773 			mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
    774 			    2 * sizeof(uint32_t), M_NOWAIT);
    775 			if (mtag) {
    776 				/* bpf wants it in network byte order */
    777 				struct fw_device *fd;
    778 				uint32_t *p2 = (uint32_t *) (mtag + 1);
    779 
    780 				fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
    781 				    src & 0x3f);
    782 				if (fd) {
    783 					p2[0] = htonl(fd->eui.hi);
    784 					p2[1] = htonl(fd->eui.lo);
    785 				} else {
    786 					p2[0] = 0;
    787 					p2[1] = 0;
    788 				}
    789 				m_tag_prepend(m, mtag);
    790 			}
    791 		}
    792 
    793 		/*
    794 		 * Trim off the GASP header
    795 		 */
    796 		m_adj(m, 3*sizeof(uint32_t));
    797 		m->m_pkthdr.rcvif = ifp;
    798 		ieee1394_input(ifp, m, src);
    799 		ifp->if_ipackets++;
    800 	}
    801 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
    802 		sc->sc_fd.fc->irx_enable(sc->sc_fd.fc, sc->sc_dma_ch);
    803 }
    804 
    805 static inline void
    806 fwip_unicast_input_recycle(struct fwip_softc *sc, struct fw_xfer *xfer)
    807 {
    808 	struct mbuf *m;
    809 
    810 	/*
    811 	 * We have finished with a unicast xfer. Allocate a new
    812 	 * cluster and stick it on the back of the input queue.
    813 	 */
    814 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
    815 	if (m == NULL)
    816 		aprint_error_dev(sc->sc_fd.dev,
    817 		    "fwip_unicast_input_recycle: m_getcl failed\n");
    818 	xfer->recv.payload = mtod(m, uint32_t *);
    819 	xfer->recv.pay_len = MCLBYTES;
    820 	xfer->mbuf = m;
    821 	mutex_enter(&sc->sc_fwb.fwb_mtx);
    822 	STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
    823 	mutex_exit(&sc->sc_fwb.fwb_mtx);
    824 }
    825 
    826 static void
    827 fwip_unicast_input(struct fw_xfer *xfer)
    828 {
    829 	uint64_t address;
    830 	struct mbuf *m;
    831 	struct m_tag *mtag;
    832 	struct ifnet *ifp;
    833 	struct fwip_softc *sc;
    834 	struct fw_pkt *fp;
    835 	int rtcode;
    836 
    837 	sc = (struct fwip_softc *)xfer->sc;
    838 	ifp = sc->sc_eth.fwip_ifp;
    839 	m = xfer->mbuf;
    840 	xfer->mbuf = 0;
    841 	fp = &xfer->recv.hdr;
    842 
    843 	/*
    844 	 * Check the fifo address - we only accept addresses of
    845 	 * exactly INET_FIFO.
    846 	 */
    847 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
    848 		| fp->mode.wreqb.dest_lo;
    849 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
    850 		rtcode = FWRCODE_ER_TYPE;
    851 	} else if (address != INET_FIFO) {
    852 		rtcode = FWRCODE_ER_ADDR;
    853 	} else {
    854 		rtcode = FWRCODE_COMPLETE;
    855 	}
    856 
    857 	/*
    858 	 * Pick up a new mbuf and stick it on the back of the receive
    859 	 * queue.
    860 	 */
    861 	fwip_unicast_input_recycle(sc, xfer);
    862 
    863 	/*
    864 	 * If we've already rejected the packet, give up now.
    865 	 */
    866 	if (rtcode != FWRCODE_COMPLETE) {
    867 		m_freem(m);
    868 		ifp->if_ierrors++;
    869 		return;
    870 	}
    871 
    872 	if (ifp->if_bpf) {
    873 		/*
    874 		 * Record the sender ID for possible BPF usage.
    875 		 */
    876 		mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
    877 		    2 * sizeof(uint32_t), M_NOWAIT);
    878 		if (mtag) {
    879 			/* bpf wants it in network byte order */
    880 			struct fw_device *fd;
    881 			uint32_t *p = (uint32_t *) (mtag + 1);
    882 
    883 			fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
    884 			    fp->mode.wreqb.src & 0x3f);
    885 			if (fd) {
    886 				p[0] = htonl(fd->eui.hi);
    887 				p[1] = htonl(fd->eui.lo);
    888 			} else {
    889 				p[0] = 0;
    890 				p[1] = 0;
    891 			}
    892 			m_tag_prepend(m, mtag);
    893 		}
    894 	}
    895 
    896 	/*
    897 	 * Hand off to the generic encapsulation code. We don't use
    898 	 * ifp->if_input so that we can pass the source nodeid as an
    899 	 * argument to facilitate link-level fragment reassembly.
    900 	 */
    901 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
    902 	m->m_pkthdr.rcvif = ifp;
    903 	ieee1394_input(ifp, m, fp->mode.wreqb.src);
    904 	ifp->if_ipackets++;
    905 }
    906