if_le_ioasic.c revision 1.17 1 1.17 thorpej /* $NetBSD: if_le_ioasic.c,v 1.17 2000/07/17 01:29:02 thorpej Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1996 Carnegie-Mellon University.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Author: Chris G. Demetriou
8 1.1 cgd *
9 1.1 cgd * Permission to use, copy, modify and distribute this software and
10 1.1 cgd * its documentation is hereby granted, provided that both the copyright
11 1.1 cgd * notice and this permission notice appear in all copies of the
12 1.1 cgd * software, derivative works or modified versions, and any portions
13 1.1 cgd * thereof, and that both notices appear in supporting documentation.
14 1.1 cgd *
15 1.1 cgd * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 1.1 cgd * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 1.1 cgd * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 1.1 cgd *
19 1.1 cgd * Carnegie Mellon requests users of this software to return to
20 1.1 cgd *
21 1.1 cgd * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 1.1 cgd * School of Computer Science
23 1.1 cgd * Carnegie Mellon University
24 1.1 cgd * Pittsburgh PA 15213-3890
25 1.1 cgd *
26 1.1 cgd * any improvements or extensions that they make and grant Carnegie the
27 1.1 cgd * rights to redistribute these changes.
28 1.1 cgd */
29 1.1 cgd
30 1.1 cgd /*
31 1.1 cgd * LANCE on DEC IOCTL ASIC.
32 1.1 cgd */
33 1.9 jonathan
34 1.9 jonathan #include <sys/cdefs.h> /* RCS ID & macro defns */
35 1.17 thorpej __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.17 2000/07/17 01:29:02 thorpej Exp $");
36 1.11 jonathan
37 1.11 jonathan #include "opt_inet.h"
38 1.1 cgd
39 1.1 cgd #include <sys/param.h>
40 1.1 cgd #include <sys/systm.h>
41 1.1 cgd #include <sys/mbuf.h>
42 1.1 cgd #include <sys/syslog.h>
43 1.1 cgd #include <sys/socket.h>
44 1.1 cgd #include <sys/device.h>
45 1.1 cgd
46 1.1 cgd #include <net/if.h>
47 1.5 cgd #include <net/if_ether.h>
48 1.6 thorpej #include <net/if_media.h>
49 1.1 cgd
50 1.1 cgd #ifdef INET
51 1.1 cgd #include <netinet/in.h>
52 1.4 is #include <netinet/if_inarp.h>
53 1.1 cgd #endif
54 1.1 cgd
55 1.12 drochner #include <dev/ic/lancereg.h>
56 1.12 drochner #include <dev/ic/lancevar.h>
57 1.1 cgd #include <dev/ic/am7990reg.h>
58 1.1 cgd #include <dev/ic/am7990var.h>
59 1.1 cgd
60 1.2 thorpej #include <dev/tc/if_levar.h>
61 1.1 cgd #include <dev/tc/tcvar.h>
62 1.13 nisimura #include <dev/tc/ioasicreg.h>
63 1.1 cgd #include <dev/tc/ioasicvar.h>
64 1.1 cgd
65 1.16 nisimura struct le_ioasic_softc {
66 1.16 nisimura struct am7990_softc sc_am7990; /* glue to MI code */
67 1.16 nisimura struct lereg1 *sc_r1; /* LANCE registers */
68 1.16 nisimura /* XXX must match with le_softc of if_levar.h XXX */
69 1.12 drochner
70 1.16 nisimura bus_dma_tag_t sc_dmat; /* bus dma tag */
71 1.16 nisimura bus_dmamap_t sc_dmamap; /* bus dmamap */
72 1.16 nisimura };
73 1.13 nisimura
74 1.13 nisimura static int le_ioasic_match __P((struct device *, struct cfdata *, void *));
75 1.13 nisimura static void le_ioasic_attach __P((struct device *, struct device *, void *));
76 1.13 nisimura
77 1.13 nisimura struct cfattach le_ioasic_ca = {
78 1.13 nisimura sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
79 1.13 nisimura };
80 1.13 nisimura
81 1.16 nisimura static void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
82 1.2 thorpej int, int));
83 1.16 nisimura static void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
84 1.2 thorpej int, int));
85 1.16 nisimura static void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
86 1.2 thorpej int, int));
87 1.16 nisimura static void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
88 1.2 thorpej int, int));
89 1.16 nisimura static void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
90 1.2 thorpej
91 1.16 nisimura static int
92 1.1 cgd le_ioasic_match(parent, match, aux)
93 1.1 cgd struct device *parent;
94 1.3 cgd struct cfdata *match;
95 1.3 cgd void *aux;
96 1.1 cgd {
97 1.1 cgd struct ioasicdev_attach_args *d = aux;
98 1.1 cgd
99 1.16 nisimura if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
100 1.16 nisimura return 0;
101 1.1 cgd
102 1.16 nisimura return 1;
103 1.1 cgd }
104 1.1 cgd
105 1.16 nisimura /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
106 1.16 nisimura #define LE_IOASIC_MEMSIZE (128*1024)
107 1.16 nisimura #define LE_IOASIC_MEMALIGN (128*1024)
108 1.16 nisimura
109 1.16 nisimura static void
110 1.1 cgd le_ioasic_attach(parent, self, aux)
111 1.1 cgd struct device *parent, *self;
112 1.1 cgd void *aux;
113 1.1 cgd {
114 1.16 nisimura struct le_ioasic_softc *sc = (void *)self;
115 1.1 cgd struct ioasicdev_attach_args *d = aux;
116 1.16 nisimura struct lance_softc *le = &sc->sc_am7990.lsc;
117 1.16 nisimura bus_space_tag_t ioasic_bst;
118 1.16 nisimura bus_space_handle_t ioasic_bsh;
119 1.16 nisimura bus_dma_tag_t dmat;
120 1.16 nisimura bus_dma_segment_t seg;
121 1.16 nisimura tc_addr_t tca;
122 1.16 nisimura u_int32_t ssr;
123 1.16 nisimura int rseg;
124 1.16 nisimura caddr_t le_iomem;
125 1.1 cgd
126 1.16 nisimura ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
127 1.16 nisimura ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
128 1.16 nisimura dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
129 1.16 nisimura /*
130 1.16 nisimura * Allocate a DMA area for the chip.
131 1.16 nisimura */
132 1.16 nisimura if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
133 1.16 nisimura 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
134 1.16 nisimura printf("can't allocate DMA area for LANCE\n");
135 1.16 nisimura return;
136 1.16 nisimura }
137 1.16 nisimura if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
138 1.16 nisimura &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
139 1.16 nisimura printf("can't map DMA area for LANCE\n");
140 1.16 nisimura bus_dmamem_free(dmat, &seg, rseg);
141 1.13 nisimura return;
142 1.13 nisimura }
143 1.16 nisimura /*
144 1.16 nisimura * Create and load the DMA map for the DMA area.
145 1.16 nisimura */
146 1.16 nisimura if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
147 1.16 nisimura LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
148 1.16 nisimura printf("can't create DMA map\n");
149 1.16 nisimura goto bad;
150 1.16 nisimura }
151 1.16 nisimura if (bus_dmamap_load(dmat, sc->sc_dmamap,
152 1.16 nisimura le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
153 1.16 nisimura printf("can't load DMA map\n");
154 1.16 nisimura goto bad;
155 1.16 nisimura }
156 1.16 nisimura /*
157 1.16 nisimura * Bind 128KB buffer with IOASIC DMA.
158 1.16 nisimura */
159 1.17 thorpej tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
160 1.16 nisimura bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
161 1.16 nisimura ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
162 1.16 nisimura ssr |= IOASIC_CSR_DMAEN_LANCE;
163 1.16 nisimura bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
164 1.13 nisimura
165 1.16 nisimura sc->sc_r1 = (struct lereg1 *)
166 1.1 cgd TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
167 1.16 nisimura le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
168 1.16 nisimura le->sc_copytodesc = le_ioasic_copytobuf_gap2;
169 1.16 nisimura le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
170 1.16 nisimura le->sc_copytobuf = le_ioasic_copytobuf_gap16;
171 1.16 nisimura le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
172 1.16 nisimura le->sc_zerobuf = le_ioasic_zerobuf_gap16;
173 1.16 nisimura
174 1.16 nisimura dec_le_common_attach(&sc->sc_am7990,
175 1.16 nisimura (u_char *)((struct ioasic_softc *)parent)->sc_base
176 1.16 nisimura + IOASIC_SLOT_2_START);
177 1.1 cgd
178 1.2 thorpej ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
179 1.2 thorpej am7990_intr, sc);
180 1.16 nisimura return;
181 1.16 nisimura
182 1.16 nisimura bad:
183 1.16 nisimura bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
184 1.16 nisimura bus_dmamem_free(dmat, &seg, rseg);
185 1.2 thorpej }
186 1.2 thorpej
187 1.2 thorpej /*
188 1.2 thorpej * Special memory access functions needed by ioasic-attached LANCE
189 1.2 thorpej * chips.
190 1.2 thorpej */
191 1.2 thorpej
192 1.2 thorpej /*
193 1.2 thorpej * gap2: two bytes of data followed by two bytes of pad.
194 1.2 thorpej *
195 1.2 thorpej * Buffers must be 4-byte aligned. The code doesn't worry about
196 1.2 thorpej * doing an extra byte.
197 1.2 thorpej */
198 1.2 thorpej
199 1.2 thorpej void
200 1.2 thorpej le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
201 1.12 drochner struct lance_softc *sc;
202 1.2 thorpej void *fromv;
203 1.2 thorpej int boff;
204 1.15 augustss int len;
205 1.2 thorpej {
206 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
207 1.15 augustss caddr_t from = fromv;
208 1.15 augustss volatile u_int16_t *bptr;
209 1.2 thorpej
210 1.2 thorpej if (boff & 0x1) {
211 1.2 thorpej /* handle unaligned first byte */
212 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + (boff - 1);
213 1.2 thorpej *bptr = (*from++ << 8) | (*bptr & 0xff);
214 1.2 thorpej bptr += 2;
215 1.2 thorpej len--;
216 1.2 thorpej } else
217 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + boff;
218 1.2 thorpej while (len > 1) {
219 1.2 thorpej *bptr = (from[1] << 8) | (from[0] & 0xff);
220 1.2 thorpej bptr += 2;
221 1.2 thorpej from += 2;
222 1.2 thorpej len -= 2;
223 1.2 thorpej }
224 1.2 thorpej if (len == 1)
225 1.2 thorpej *bptr = (u_int16_t)*from;
226 1.2 thorpej }
227 1.2 thorpej
228 1.2 thorpej void
229 1.2 thorpej le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
230 1.12 drochner struct lance_softc *sc;
231 1.2 thorpej void *tov;
232 1.2 thorpej int boff, len;
233 1.2 thorpej {
234 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
235 1.15 augustss caddr_t to = tov;
236 1.15 augustss volatile u_int16_t *bptr;
237 1.15 augustss u_int16_t tmp;
238 1.2 thorpej
239 1.2 thorpej if (boff & 0x1) {
240 1.2 thorpej /* handle unaligned first byte */
241 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + (boff - 1);
242 1.2 thorpej *to++ = (*bptr >> 8) & 0xff;
243 1.2 thorpej bptr += 2;
244 1.2 thorpej len--;
245 1.2 thorpej } else
246 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + boff;
247 1.2 thorpej while (len > 1) {
248 1.2 thorpej tmp = *bptr;
249 1.2 thorpej *to++ = tmp & 0xff;
250 1.2 thorpej *to++ = (tmp >> 8) & 0xff;
251 1.2 thorpej bptr += 2;
252 1.2 thorpej len -= 2;
253 1.2 thorpej }
254 1.2 thorpej if (len == 1)
255 1.2 thorpej *to = *bptr & 0xff;
256 1.2 thorpej }
257 1.2 thorpej
258 1.2 thorpej /*
259 1.2 thorpej * gap16: 16 bytes of data followed by 16 bytes of pad.
260 1.2 thorpej *
261 1.2 thorpej * Buffers must be 32-byte aligned.
262 1.2 thorpej */
263 1.2 thorpej
264 1.2 thorpej void
265 1.2 thorpej le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
266 1.12 drochner struct lance_softc *sc;
267 1.2 thorpej void *fromv;
268 1.2 thorpej int boff;
269 1.15 augustss int len;
270 1.2 thorpej {
271 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
272 1.15 augustss caddr_t from = fromv;
273 1.15 augustss caddr_t bptr;
274 1.2 thorpej
275 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
276 1.2 thorpej boff &= 0xf;
277 1.8 jonathan
278 1.8 jonathan /*
279 1.8 jonathan * Dispose of boff so destination of subsequent copies is
280 1.8 jonathan * 16-byte aligned.
281 1.8 jonathan */
282 1.8 jonathan if (boff) {
283 1.15 augustss int xfer;
284 1.8 jonathan xfer = min(len, 16 - boff);
285 1.2 thorpej bcopy(from, bptr + boff, xfer);
286 1.2 thorpej from += xfer;
287 1.2 thorpej bptr += 32;
288 1.2 thorpej len -= xfer;
289 1.2 thorpej }
290 1.8 jonathan
291 1.8 jonathan /* Destination of copies is now 16-byte aligned. */
292 1.8 jonathan if (len >= 16)
293 1.8 jonathan switch ((u_long)from & (sizeof(u_int32_t) -1)) {
294 1.8 jonathan case 2:
295 1.8 jonathan /* Ethernet headers make this the dominant case. */
296 1.8 jonathan do {
297 1.15 augustss u_int32_t *dst = (u_int32_t*)bptr;
298 1.15 augustss u_int16_t t0;
299 1.15 augustss u_int32_t t1, t2, t3, t4;
300 1.8 jonathan
301 1.8 jonathan /* read from odd-16-bit-aligned, cached src */
302 1.8 jonathan t0 = *(u_int16_t*)from;
303 1.8 jonathan t1 = *(u_int32_t*)(from+2);
304 1.8 jonathan t2 = *(u_int32_t*)(from+6);
305 1.8 jonathan t3 = *(u_int32_t*)(from+10);
306 1.8 jonathan t4 = *(u_int16_t*)(from+14);
307 1.8 jonathan
308 1.8 jonathan /* DMA buffer is uncached on mips */
309 1.8 jonathan dst[0] = t0 | (t1 << 16);
310 1.8 jonathan dst[1] = (t1 >> 16) | (t2 << 16);
311 1.8 jonathan dst[2] = (t2 >> 16) | (t3 << 16);
312 1.8 jonathan dst[3] = (t3 >> 16) | (t4 << 16);
313 1.8 jonathan
314 1.8 jonathan from += 16;
315 1.8 jonathan bptr += 32;
316 1.8 jonathan len -= 16;
317 1.8 jonathan } while (len >= 16);
318 1.8 jonathan break;
319 1.8 jonathan
320 1.8 jonathan case 0:
321 1.8 jonathan do {
322 1.15 augustss u_int32_t *src = (u_int32_t*)from;
323 1.15 augustss u_int32_t *dst = (u_int32_t*)bptr;
324 1.15 augustss u_int32_t t0, t1, t2, t3;
325 1.8 jonathan
326 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
327 1.8 jonathan dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
328 1.8 jonathan
329 1.8 jonathan from += 16;
330 1.8 jonathan bptr += 32;
331 1.8 jonathan len -= 16;
332 1.8 jonathan } while (len >= 16);
333 1.8 jonathan break;
334 1.8 jonathan
335 1.8 jonathan default:
336 1.8 jonathan /* Does odd-aligned case ever happen? */
337 1.8 jonathan do {
338 1.8 jonathan bcopy(from, bptr, 16);
339 1.8 jonathan from += 16;
340 1.8 jonathan bptr += 32;
341 1.8 jonathan len -= 16;
342 1.8 jonathan } while (len >= 16);
343 1.8 jonathan break;
344 1.8 jonathan }
345 1.8 jonathan if (len)
346 1.8 jonathan bcopy(from, bptr, len);
347 1.2 thorpej }
348 1.2 thorpej
349 1.2 thorpej void
350 1.2 thorpej le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
351 1.12 drochner struct lance_softc *sc;
352 1.2 thorpej void *tov;
353 1.2 thorpej int boff, len;
354 1.2 thorpej {
355 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
356 1.15 augustss caddr_t to = tov;
357 1.15 augustss caddr_t bptr;
358 1.2 thorpej
359 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
360 1.2 thorpej boff &= 0xf;
361 1.8 jonathan
362 1.8 jonathan /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
363 1.8 jonathan if (boff) {
364 1.15 augustss int xfer;
365 1.8 jonathan xfer = min(len, 16 - boff);
366 1.8 jonathan bcopy(bptr+boff, to, xfer);
367 1.2 thorpej to += xfer;
368 1.2 thorpej bptr += 32;
369 1.2 thorpej len -= xfer;
370 1.2 thorpej }
371 1.8 jonathan if (len >= 16)
372 1.8 jonathan switch ((u_long)to & (sizeof(u_int32_t) -1)) {
373 1.8 jonathan case 2:
374 1.8 jonathan /*
375 1.8 jonathan * to is aligned to an odd 16-bit boundary. Ethernet headers
376 1.8 jonathan * make this the dominant case (98% or more).
377 1.8 jonathan */
378 1.8 jonathan do {
379 1.15 augustss u_int32_t *src = (u_int32_t*)bptr;
380 1.15 augustss u_int32_t t0, t1, t2, t3;
381 1.8 jonathan
382 1.8 jonathan /* read from uncached aligned DMA buf */
383 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
384 1.8 jonathan
385 1.8 jonathan /* write to odd-16-bit-word aligned dst */
386 1.8 jonathan *(u_int16_t *) (to+0) = (u_short) t0;
387 1.8 jonathan *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
388 1.8 jonathan *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
389 1.8 jonathan *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
390 1.8 jonathan *(u_int16_t *) (to+14) = (t3 >> 16);
391 1.8 jonathan bptr += 32;
392 1.8 jonathan to += 16;
393 1.8 jonathan len -= 16;
394 1.8 jonathan } while (len > 16);
395 1.8 jonathan break;
396 1.8 jonathan case 0:
397 1.8 jonathan /* 32-bit aligned aligned copy. Rare. */
398 1.8 jonathan do {
399 1.15 augustss u_int32_t *src = (u_int32_t*)bptr;
400 1.15 augustss u_int32_t *dst = (u_int32_t*)to;
401 1.15 augustss u_int32_t t0, t1, t2, t3;
402 1.8 jonathan
403 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
404 1.8 jonathan dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
405 1.8 jonathan to += 16;
406 1.8 jonathan bptr += 32;
407 1.8 jonathan len -= 16;
408 1.8 jonathan } while (len > 16);
409 1.8 jonathan break;
410 1.8 jonathan
411 1.8 jonathan /* XXX Does odd-byte-aligned case ever happen? */
412 1.8 jonathan default:
413 1.8 jonathan do {
414 1.8 jonathan bcopy(bptr, to, 16);
415 1.8 jonathan to += 16;
416 1.8 jonathan bptr += 32;
417 1.8 jonathan len -= 16;
418 1.8 jonathan } while (len > 16);
419 1.8 jonathan break;
420 1.8 jonathan }
421 1.8 jonathan if (len)
422 1.8 jonathan bcopy(bptr, to, len);
423 1.2 thorpej }
424 1.2 thorpej
425 1.2 thorpej void
426 1.2 thorpej le_ioasic_zerobuf_gap16(sc, boff, len)
427 1.12 drochner struct lance_softc *sc;
428 1.2 thorpej int boff, len;
429 1.2 thorpej {
430 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
431 1.15 augustss caddr_t bptr;
432 1.15 augustss int xfer;
433 1.2 thorpej
434 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
435 1.2 thorpej boff &= 0xf;
436 1.2 thorpej xfer = min(len, 16 - boff);
437 1.2 thorpej while (len > 0) {
438 1.2 thorpej bzero(bptr + boff, xfer);
439 1.2 thorpej bptr += 32;
440 1.2 thorpej boff = 0;
441 1.2 thorpej len -= xfer;
442 1.2 thorpej xfer = min(len, 16);
443 1.2 thorpej }
444 1.1 cgd }
445