Home | History | Annotate | Line # | Download | only in qbus
if_uba.c revision 1.29
      1 /*	$NetBSD: if_uba.c,v 1.29 2007/10/19 12:01:09 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)if_uba.c	7.16 (Berkeley) 12/16/90
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: if_uba.c,v 1.29 2007/10/19 12:01:09 ad Exp $");
     36 
     37 #include <sys/param.h>
     38 #include <sys/systm.h>
     39 #include <sys/malloc.h>
     40 #include <sys/mbuf.h>
     41 #include <sys/socket.h>
     42 #include <sys/device.h>
     43 
     44 #include <uvm/uvm_extern.h>
     45 
     46 #include <net/if.h>
     47 
     48 #include <sys/bus.h>
     49 
     50 #include <dev/qbus/if_uba.h>
     51 #include <dev/qbus/ubareg.h>
     52 #include <dev/qbus/ubavar.h>
     53 
     54 static	struct mbuf *getmcl(void);
     55 
     56 /*
     57  * Routines supporting UNIBUS network interfaces.
     58  *
     59  * TODO:
     60  *	Support interfaces using only one BDP statically.
     61  */
     62 
     63 /*
     64  * Init UNIBUS for interface whose headers of size hlen are to
     65  * end on a page boundary.  We allocate a UNIBUS map register for the page
     66  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
     67  * doing this once for each read and once for each write buffer.  We also
     68  * allocate page frames in the mbuffer pool for these pages.
     69  *
     70  * Recent changes:
     71  *	No special "header pages" anymore.
     72  *	Recv packets are always put in clusters.
     73  *	"size" is the maximum buffer size, may not be bigger than MCLBYTES.
     74  */
     75 int
     76 if_ubaminit(struct ifubinfo *ifu, struct uba_softc *uh, int size,
     77     struct ifrw *ifr, int nr, struct ifxmt *ifw, int nw)
     78 {
     79 	struct mbuf *m;
     80 	int totsz, i, error, rseg, nm = nr;
     81 	bus_dma_segment_t seg;
     82 	void *vaddr;
     83 
     84 #ifdef DIAGNOSTIC
     85 	if (size > MCLBYTES)
     86 		panic("if_ubaminit: size > MCLBYTES");
     87 #endif
     88 	ifu->iff_softc = uh;
     89 	/*
     90 	 * Get DMA memory for transmit buffers.
     91 	 * Buffer size are rounded up to a multiple of the uba page size,
     92 	 * then allocated contiguous.
     93 	 */
     94 	size = (size + UBA_PGOFSET) & ~UBA_PGOFSET;
     95 	totsz = size * nw;
     96 	if ((error = bus_dmamem_alloc(uh->uh_dmat, totsz, PAGE_SIZE, 0,
     97 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)))
     98 		return error;
     99 	if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
    100 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT))) {
    101 		bus_dmamem_free(uh->uh_dmat, &seg, rseg);
    102 		return error;
    103 	}
    104 
    105 	/*
    106 	 * Create receive and transmit maps.
    107 	 * Alloc all resources now so we won't fail in the future.
    108 	 */
    109 
    110 	for (i = 0; i < nr; i++) {
    111 		if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
    112 		    size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    113 		    &ifr[i].ifrw_map))) {
    114 			nr = i;
    115 			nm = nw = 0;
    116 			goto bad;
    117 		}
    118 	}
    119 	for (i = 0; i < nw; i++) {
    120 		if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
    121 		    size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    122 		    &ifw[i].ifw_map))) {
    123 			nw = i;
    124 			nm = 0;
    125 			goto bad;
    126 		}
    127 	}
    128 	/*
    129 	 * Preload the rx maps with mbuf clusters.
    130 	 */
    131 	for (i = 0; i < nm; i++) {
    132 		if ((m = getmcl()) == NULL) {
    133 			nm = i;
    134 			goto bad;
    135 		}
    136 		ifr[i].ifrw_mbuf = m;
    137 		bus_dmamap_load(uh->uh_dmat, ifr[i].ifrw_map,
    138 		    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
    139 
    140 	}
    141 	/*
    142 	 * Load the tx maps with DMA memory (common case).
    143 	 */
    144 	for (i = 0; i < nw; i++) {
    145 		ifw[i].ifw_vaddr = (char *)vaddr + size * i;
    146 		ifw[i].ifw_size = size;
    147 		bus_dmamap_load(uh->uh_dmat, ifw[i].ifw_map,
    148 		    ifw[i].ifw_vaddr, ifw[i].ifw_size, NULL, BUS_DMA_NOWAIT);
    149 	}
    150 	return 0;
    151 bad:
    152 	while (--nm >= 0) {
    153 		bus_dmamap_unload(uh->uh_dmat, ifr[nw].ifrw_map);
    154 		m_freem(ifr[nm].ifrw_mbuf);
    155 	}
    156 	while (--nw >= 0)
    157 		bus_dmamap_destroy(uh->uh_dmat, ifw[nw].ifw_map);
    158 	while (--nr >= 0)
    159 		bus_dmamap_destroy(uh->uh_dmat, ifr[nw].ifrw_map);
    160 	return (0);
    161 }
    162 
    163 struct mbuf *
    164 getmcl()
    165 {
    166 	struct mbuf *m;
    167 
    168 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    169 	if (m == NULL)
    170 		return 0;
    171 	MCLGET(m, M_DONTWAIT);
    172 	if ((m->m_flags & M_EXT) == 0) {
    173 		m_freem(m);
    174 		return 0;
    175 	}
    176 	return m;
    177 }
    178 
    179 /*
    180  * Pull read data off a interface.
    181  * Totlen is length of data, with local net header stripped.
    182  * When full cluster sized units are present
    183  * on the interface on cluster boundaries we can get them more
    184  * easily by remapping, and take advantage of this here.
    185  * Save a pointer to the interface structure and the total length,
    186  * so that protocols can determine where incoming packets arrived.
    187  * Note: we may be called to receive from a transmit buffer by some
    188  * devices.  In that case, we must force normal mapping of the buffer,
    189  * so that the correct data will appear (only unibus maps are
    190  * changed when remapping the transmit buffers).
    191  */
    192 struct mbuf *
    193 if_ubaget(struct ifubinfo *ifu, struct ifrw *ifr, struct ifnet *ifp, int len)
    194 {
    195 	struct uba_softc *uh = ifu->iff_softc;
    196 	struct mbuf *m, *mn;
    197 
    198 	if ((mn = getmcl()) == NULL)
    199 		return NULL;	/* Leave the old */
    200 
    201 	bus_dmamap_unload(uh->uh_dmat, ifr->ifrw_map);
    202 	m = ifr->ifrw_mbuf;
    203 	ifr->ifrw_mbuf = mn;
    204 	if ((bus_dmamap_load(uh->uh_dmat, ifr->ifrw_map,
    205 	    mn->m_ext.ext_buf, mn->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)))
    206 		panic("if_ubaget"); /* Cannot happen */
    207 	m->m_pkthdr.rcvif = ifp;
    208 	m->m_len = m->m_pkthdr.len = len;
    209 	return m;
    210 }
    211 
    212 /*
    213  * Called after a packet is sent. Releases hold resources.
    214  */
    215 void
    216 if_ubaend(struct ifubinfo *ifu, struct ifxmt *ifw)
    217 {
    218 	struct uba_softc *uh = ifu->iff_softc;
    219 
    220 	if (ifw->ifw_flags & IFRW_MBUF) {
    221 		bus_dmamap_unload(uh->uh_dmat, ifw->ifw_map);
    222 		m_freem(ifw->ifw_mbuf);
    223 		ifw->ifw_mbuf = NULL;
    224 	}
    225 }
    226 
    227 /*
    228  * Map a chain of mbufs onto a network interface
    229  * in preparation for an i/o operation.
    230  * The argument chain of mbufs includes the local network
    231  * header which is copied to be in the mapped, aligned
    232  * i/o space.
    233  */
    234 int
    235 if_ubaput(struct ifubinfo *ifu, struct ifxmt *ifw, struct mbuf *m)
    236 {
    237 	struct uba_softc *uh = ifu->iff_softc;
    238 	int len;
    239 
    240 	if (/* m->m_next ==*/ 0) {
    241 		/*
    242 		 * Map the outgoing packet directly.
    243 		 */
    244 		if ((ifw->ifw_flags & IFRW_MBUF) == 0) {
    245 			bus_dmamap_unload(uh->uh_dmat, ifw->ifw_map);
    246 			ifw->ifw_flags |= IFRW_MBUF;
    247 		}
    248 		bus_dmamap_load(uh->uh_dmat, ifw->ifw_map, mtod(m, void *),
    249 		    m->m_len, NULL, BUS_DMA_NOWAIT);
    250 		ifw->ifw_mbuf = m;
    251 		len = m->m_len;
    252 	} else {
    253 		if (ifw->ifw_flags & IFRW_MBUF) {
    254 			bus_dmamap_load(uh->uh_dmat, ifw->ifw_map,
    255 			    ifw->ifw_vaddr, ifw->ifw_size,NULL,BUS_DMA_NOWAIT);
    256 			ifw->ifw_flags &= ~IFRW_MBUF;
    257 		}
    258 		len = m->m_pkthdr.len;
    259 		m_copydata(m, 0, m->m_pkthdr.len, ifw->ifw_vaddr);
    260 		m_freem(m);
    261 	}
    262 	return len;
    263 }
    264