if_ni.c revision 1.35.4.2 1 /* $NetBSD: if_ni.c,v 1.35.4.2 2010/03/11 15:03:24 yamt Exp $ */
2 /*
3 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEBNA/DEBNT/DEBNK ethernet cards.
34 * Things that is still to do:
35 * Collect statistics.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_ni.c,v 1.35.4.2 2010/03/11 15:03:24 yamt Exp $");
40
41 #include "opt_inet.h"
42
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/systm.h>
48 #include <sys/sockio.h>
49 #include <sys/sched.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59
60 #include <net/bpf.h>
61 #include <net/bpfdesc.h>
62
63 #include <sys/bus.h>
64 #ifdef __vax__
65 #include <machine/mtpr.h>
66 #include <machine/pte.h>
67 #endif
68
69 #include <dev/bi/bireg.h>
70 #include <dev/bi/bivar.h>
71
72 #include "ioconf.h"
73 #include "locators.h"
74
75 /*
76 * Tunable buffer parameters. Good idea to have them as power of 8; then
77 * they will fit into a logical VAX page.
78 */
79 #define NMSGBUF 8 /* Message queue entries */
80 #define NTXBUF 16 /* Transmit queue entries */
81 #define NTXFRAGS 8 /* Number of transmit buffer fragments */
82 #define NRXBUF 24 /* Receive queue entries */
83 #define NBDESCS (NTXBUF * NTXFRAGS + NRXBUF)
84 #define NQUEUES 3 /* RX + TX + MSG */
85 #define PKTHDR 18 /* Length of (control) packet header */
86 #define RXADD 18 /* Additional length of receive datagram */
87 #define TXADD (10+NTXFRAGS*8) /* "" transmit "" */
88 #define MSGADD 134 /* "" message "" */
89
90 #include <dev/bi/if_nireg.h> /* XXX include earlier */
91
92 /*
93 * Macros for (most cases of) insqti/remqhi.
94 * Retry NRETRIES times to do the operation, if it still fails assume
95 * a lost lock and panic.
96 */
97 #define NRETRIES 100
98 #define INSQTI(e, h) ({ \
99 int ret = 0, __i; \
100 for (__i = 0; __i < NRETRIES; __i++) { \
101 if ((ret = insqti(e, h)) != ILCK_FAILED) \
102 break; \
103 } \
104 if (__i == NRETRIES) \
105 panic("ni: insqti failed at %d", __LINE__); \
106 ret; \
107 })
108 #define REMQHI(h) ({ \
109 int __i; void *ret = NULL; \
110 for (__i = 0; __i < NRETRIES; __i++) { \
111 if ((ret = remqhi(h)) != (void *)ILCK_FAILED) \
112 break; \
113 } \
114 if (__i == NRETRIES) \
115 panic("ni: remqhi failed at %d", __LINE__); \
116 ret; \
117 })
118
119
120 #define nipqb (&sc->sc_gvppqb->nc_pqb)
121 #define gvp sc->sc_gvppqb
122 #define fqb sc->sc_fqb
123 #define bbd sc->sc_bbd
124
125 struct ni_softc {
126 device_t sc_dev; /* Configuration common part */
127 struct evcnt sc_intrcnt; /* Interrupt coounting */
128 struct ethercom sc_ec; /* Ethernet common part */
129 #define sc_if sc_ec.ec_if /* network-visible interface */
130 bus_space_tag_t sc_iot;
131 bus_addr_t sc_ioh;
132 bus_dma_tag_t sc_dmat;
133 struct ni_gvppqb *sc_gvppqb; /* Port queue block */
134 struct ni_gvppqb *sc_pgvppqb; /* Phys address of PQB */
135 struct ni_fqb *sc_fqb; /* Free Queue block */
136 struct ni_bbd *sc_bbd; /* Buffer descriptors */
137 u_int8_t sc_enaddr[ETHER_ADDR_LEN];
138 };
139
140 static int nimatch(device_t, cfdata_t, void *);
141 static void niattach(device_t, device_t, void *);
142 static void niinit(struct ni_softc *);
143 static void nistart(struct ifnet *);
144 static void niintr(void *);
145 static int niioctl(struct ifnet *, u_long, void *);
146 static int ni_add_rxbuf(struct ni_softc *, struct ni_dg *, int);
147 static void ni_setup(struct ni_softc *);
148 static void nitimeout(struct ifnet *);
149 static void ni_shutdown(void *);
150 static void ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p);
151 static int failtest(struct ni_softc *, int, int, int, const char *);
152
153 volatile int endwait, retry; /* Used during autoconfig */
154
155 CFATTACH_DECL_NEW(ni, sizeof(struct ni_softc),
156 nimatch, niattach, NULL, NULL);
157
158 #define NI_WREG(csr, val) \
159 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
160 #define NI_RREG(csr) \
161 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
162
163 #define WAITREG(csr,val) while (NI_RREG(csr) & val);
164 /*
165 * Check for present device.
166 */
167 static int
168 nimatch(device_t parent, cfdata_t cf, void *aux)
169 {
170 struct bi_attach_args *ba = aux;
171 u_short type;
172
173 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
174 if (type != BIDT_DEBNA && type != BIDT_DEBNT && type != BIDT_DEBNK)
175 return 0;
176
177 if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
178 cf->cf_loc[BICF_NODE] != ba->ba_nodenr)
179 return 0;
180
181 return 1;
182 }
183
184 /*
185 * Allocate a bunch of descriptor-safe memory.
186 * We need to get the structures from the beginning of its own pages.
187 */
188 static void
189 ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p)
190 {
191 bus_dma_segment_t seg;
192 int nsegs, error;
193
194 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
195 &nsegs, BUS_DMA_NOWAIT)) != 0)
196 panic(" unable to allocate memory: error %d", error);
197
198 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, size, v,
199 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0)
200 panic(" unable to map memory: error %d", error);
201
202 if (p)
203 *p = seg.ds_addr;
204 memset(*v, 0, size);
205 }
206
207 static int
208 failtest(struct ni_softc *sc, int reg, int mask, int test, const char *str)
209 {
210 int i = 100;
211
212 do {
213 DELAY(100000);
214 } while (((NI_RREG(reg) & mask) != test) && --i);
215
216 if (i == 0) {
217 printf("%s: %s\n", device_xname(sc->sc_dev), str);
218 return 1;
219 }
220 return 0;
221 }
222
223
224 /*
225 * Interface exists: make available by filling in network interface
226 * record. System will initialize the interface when it is ready
227 * to accept packets.
228 */
229 static void
230 niattach(device_t parent, device_t self, void *aux)
231 {
232 struct bi_attach_args *ba = aux;
233 struct ni_softc *sc = device_private(self);
234 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
235 struct ni_msg *msg;
236 struct ni_ptdb *ptdb;
237 void *va;
238 int i, j, s, res;
239 u_short type;
240
241 sc->sc_dev = self;
242
243 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
244 printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ?
245 'T' : 'K');
246 sc->sc_iot = ba->ba_iot;
247 sc->sc_ioh = ba->ba_ioh;
248 sc->sc_dmat = ba->ba_dmat;
249
250 bi_intr_establish(ba->ba_icookie, ba->ba_ivec,
251 niintr, sc, &sc->sc_intrcnt);
252 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
253 device_xname(self), "intr");
254
255 ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb,
256 (paddr_t *)&sc->sc_pgvppqb);
257 ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0);
258 ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd),
259 (void **)&sc->sc_bbd, 0);
260 /*
261 * Zero the newly allocated memory.
262 */
263
264 nipqb->np_veclvl = (ba->ba_ivec << 2) + 2;
265 nipqb->np_node = ba->ba_intcpu;
266 nipqb->np_vpqb = (u_int32_t)gvp;
267 #ifdef __vax__
268 nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR);
269 nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR);
270 #else
271 #error Must fix support for non-vax.
272 #endif
273 nipqb->np_bvplvl = 1;
274 nipqb->np_vfqb = (u_int32_t)fqb;
275 nipqb->np_vbdt = (u_int32_t)bbd;
276 nipqb->np_nbdr = NBDESCS;
277
278 /* Free queue block */
279 nipqb->np_freeq = NQUEUES;
280 fqb->nf_mlen = PKTHDR+MSGADD;
281 fqb->nf_dlen = PKTHDR+TXADD;
282 fqb->nf_rlen = PKTHDR+RXADD;
283
284 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
285 ifp->if_softc = sc;
286 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
287 ifp->if_start = nistart;
288 ifp->if_ioctl = niioctl;
289 ifp->if_watchdog = nitimeout;
290 IFQ_SET_READY(&ifp->if_snd);
291
292 /*
293 * Start init sequence.
294 */
295
296 /* Reset the node */
297 NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST);
298 DELAY(500000);
299 i = 20;
300 while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i)
301 DELAY(500000);
302 if (i == 0) {
303 printf("%s: BROKE bit set after reset\n", device_xname(self));
304 return;
305 }
306
307 /* Check state */
308 if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state"))
309 return;
310
311 /* Clear owner bits */
312 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
313 NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN);
314
315 /* kick off init */
316 NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN);
317 while (NI_RREG(NI_PCR) & PCR_OWN)
318 DELAY(100000);
319
320 /* Check state */
321 if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize"))
322 return;
323
324 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
325
326 WAITREG(NI_PCR, PCR_OWN);
327 NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE);
328 WAITREG(NI_PCR, PCR_OWN);
329 WAITREG(NI_PSR, PSR_OWN);
330
331 /* Check state */
332 if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable"))
333 return;
334
335 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
336
337 /*
338 * The message queue packets must be located on the beginning
339 * of a page. A VAX page is 512 bytes, but it clusters 8 pages.
340 * This knowledge is used here when allocating pages.
341 * !!! How should this be done on MIPS and Alpha??? !!!
342 */
343 #if NBPG < 4096
344 #error pagesize too small
345 #endif
346 s = splvm();
347 /* Set up message free queue */
348 ni_getpgs(sc, NMSGBUF * 512, &va, 0);
349 for (i = 0; i < NMSGBUF; i++) {
350 msg = (void *)((char *)va + i * 512);
351 res = INSQTI(msg, &fqb->nf_mforw);
352 }
353 WAITREG(NI_PCR, PCR_OWN);
354 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
355 WAITREG(NI_PCR, PCR_OWN);
356
357 /* Set up xmit queue */
358 ni_getpgs(sc, NTXBUF * 512, &va, 0);
359 for (i = 0; i < NTXBUF; i++) {
360 struct ni_dg *data;
361
362 data = (void *)((char *)va + i * 512);
363 data->nd_status = 0;
364 data->nd_len = TXADD;
365 data->nd_ptdbidx = 1;
366 data->nd_opcode = BVP_DGRAM;
367 for (j = 0; j < NTXFRAGS; j++) {
368 data->bufs[j]._offset = 0;
369 data->bufs[j]._key = 1;
370 bbd[i * NTXFRAGS + j].nb_key = 1;
371 bbd[i * NTXFRAGS + j].nb_status = 0;
372 data->bufs[j]._index = i * NTXFRAGS + j;
373 }
374 res = INSQTI(data, &fqb->nf_dforw);
375 }
376 WAITREG(NI_PCR, PCR_OWN);
377 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
378 WAITREG(NI_PCR, PCR_OWN);
379
380 /* recv buffers */
381 ni_getpgs(sc, NRXBUF * 512, &va, 0);
382 for (i = 0; i < NRXBUF; i++) {
383 struct ni_dg *data;
384 int idx;
385
386 data = (void *)((char *)va + i * 512);
387 data->nd_len = RXADD;
388 data->nd_opcode = BVP_DGRAMRX;
389 data->nd_ptdbidx = 2;
390 data->bufs[0]._key = 1;
391
392 idx = NTXBUF * NTXFRAGS + i;
393 if (ni_add_rxbuf(sc, data, idx))
394 panic("niattach: ni_add_rxbuf: out of mbufs");
395
396 res = INSQTI(data, &fqb->nf_rforw);
397 }
398 WAITREG(NI_PCR, PCR_OWN);
399 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
400 WAITREG(NI_PCR, PCR_OWN);
401
402 splx(s);
403
404 /* Set initial parameters */
405 msg = REMQHI(&fqb->nf_mforw);
406
407 msg->nm_opcode = BVP_MSG;
408 msg->nm_status = 0;
409 msg->nm_len = sizeof(struct ni_param) + 6;
410 msg->nm_opcode2 = NI_WPARAM;
411 ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD;
412
413 endwait = retry = 0;
414 res = INSQTI(msg, &gvp->nc_forw0);
415
416 retry: WAITREG(NI_PCR, PCR_OWN);
417 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
418 WAITREG(NI_PCR, PCR_OWN);
419 i = 1000;
420 while (endwait == 0 && --i)
421 DELAY(10000);
422
423 if (endwait == 0) {
424 if (++retry < 3)
425 goto retry;
426 printf("%s: no response to set params\n", device_xname(self));
427 return;
428 }
429
430 /* Clear counters */
431 msg = REMQHI(&fqb->nf_mforw);
432 msg->nm_opcode = BVP_MSG;
433 msg->nm_status = 0;
434 msg->nm_len = sizeof(struct ni_param) + 6;
435 msg->nm_opcode2 = NI_RCCNTR;
436
437 res = INSQTI(msg, &gvp->nc_forw0);
438
439 WAITREG(NI_PCR, PCR_OWN);
440 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
441 WAITREG(NI_PCR, PCR_OWN);
442
443 /* Enable transmit logic */
444 msg = REMQHI(&fqb->nf_mforw);
445
446 msg->nm_opcode = BVP_MSG;
447 msg->nm_status = 0;
448 msg->nm_len = 18;
449 msg->nm_opcode2 = NI_STPTDB;
450 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
451 memset(ptdb, 0, sizeof(struct ni_ptdb));
452 ptdb->np_index = 1;
453 ptdb->np_fque = 1;
454
455 res = INSQTI(msg, &gvp->nc_forw0);
456
457 WAITREG(NI_PCR, PCR_OWN);
458 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
459 WAITREG(NI_PCR, PCR_OWN);
460
461 /* Wait for everything to finish */
462 WAITREG(NI_PSR, PSR_OWN);
463
464 printf("%s: hardware address %s\n", device_xname(self),
465 ether_sprintf(sc->sc_enaddr));
466
467 /*
468 * Attach the interface.
469 */
470 if_attach(ifp);
471 ether_ifattach(ifp, sc->sc_enaddr);
472 if (shutdownhook_establish(ni_shutdown, sc) == 0)
473 aprint_error_dev(self, "WARNING: unable to establish shutdown hook\n");
474 }
475
476 /*
477 * Initialization of interface.
478 */
479 void
480 niinit(struct ni_softc *sc)
481 {
482 struct ifnet *ifp = &sc->sc_if;
483
484 /*
485 * Set flags (so ni_setup() do the right thing).
486 */
487 ifp->if_flags |= IFF_RUNNING;
488 ifp->if_flags &= ~IFF_OACTIVE;
489
490 /*
491 * Send setup messages so that the rx/tx locic starts.
492 */
493 ni_setup(sc);
494
495 }
496
497 /*
498 * Start output on interface.
499 */
500 void
501 nistart(struct ifnet *ifp)
502 {
503 struct ni_softc *sc = ifp->if_softc;
504 struct ni_dg *data;
505 struct ni_bbd *bdp;
506 struct mbuf *m, *m0;
507 int i, cnt, res, mlen;
508
509 if (ifp->if_flags & IFF_OACTIVE)
510 return;
511 #ifdef DEBUG
512 if (ifp->if_flags & IFF_DEBUG)
513 printf("%s: nistart\n", device_xname(sc->sc_dev));
514 #endif
515
516 while (fqb->nf_dforw) {
517 IFQ_POLL(&ifp->if_snd, m);
518 if (m == 0)
519 break;
520
521 data = REMQHI(&fqb->nf_dforw);
522 if ((int)data == Q_EMPTY) {
523 ifp->if_flags |= IFF_OACTIVE;
524 break;
525 }
526
527 IFQ_DEQUEUE(&ifp->if_snd, m);
528
529 /*
530 * Count number of mbufs in chain.
531 * Always do DMA directly from mbufs, therefore the transmit
532 * ring is really big.
533 */
534 for (m0 = m, cnt = 0; m0; m0 = m0->m_next)
535 if (m0->m_len)
536 cnt++;
537 if (cnt > NTXFRAGS)
538 panic("nistart"); /* XXX */
539
540 if (ifp->if_bpf)
541 bpf_ops->bpf_mtap(ifp->if_bpf, m);
542 bdp = &bbd[(data->bufs[0]._index & 0x7fff)];
543 for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) {
544 if (m0->m_len == 0)
545 continue;
546 bdp->nb_status = (mtod(m0, u_int32_t) & NIBD_OFFSET) |
547 NIBD_VALID;
548 bdp->nb_pte = (u_int32_t)kvtopte(mtod(m0, void *));
549 bdp->nb_len = m0->m_len;
550 data->bufs[i]._offset = 0;
551 data->bufs[i]._len = bdp->nb_len;
552 data->bufs[i]._index |= NIDG_CHAIN;
553 mlen += bdp->nb_len;
554 bdp++;
555 i++;
556 }
557 data->nd_opcode = BVP_DGRAM;
558 data->nd_pad3 = 1;
559 data->nd_ptdbidx = 1;
560 data->nd_len = 10 + i * 8;
561 data->bufs[i - 1]._index &= ~NIDG_CHAIN;
562 data->nd_cmdref = (u_int32_t)m;
563 #ifdef DEBUG
564 if (ifp->if_flags & IFF_DEBUG)
565 printf("%s: sending %d bytes (%d segments)\n",
566 device_xname(sc->sc_dev), mlen, i);
567 #endif
568
569 res = INSQTI(data, &gvp->nc_forw0);
570 if (res == Q_EMPTY) {
571 WAITREG(NI_PCR, PCR_OWN);
572 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
573 }
574 }
575 }
576
577 void
578 niintr(void *arg)
579 {
580 struct ni_softc *sc = arg;
581 struct ni_dg *data;
582 struct ni_msg *msg;
583 struct ifnet *ifp = &sc->sc_if;
584 struct ni_bbd *bd;
585 struct mbuf *m;
586 int idx, res;
587
588 if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED)
589 return;
590
591 if ((NI_RREG(NI_PSR) & PSR_ERR))
592 printf("%s: PSR %x\n", device_xname(sc->sc_dev), NI_RREG(NI_PSR));
593
594 KERNEL_LOCK(1, NULL);
595 /* Got any response packets? */
596 while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) {
597
598 switch (data->nd_opcode) {
599 case BVP_DGRAMRX: /* Receive datagram */
600 idx = data->bufs[0]._index;
601 bd = &bbd[idx];
602 m = (void *)data->nd_cmdref;
603 m->m_pkthdr.len = m->m_len =
604 data->bufs[0]._len - ETHER_CRC_LEN;
605 m->m_pkthdr.rcvif = ifp;
606 if (ni_add_rxbuf(sc, data, idx)) {
607 bd->nb_len = (m->m_ext.ext_size - 2);
608 bd->nb_pte =
609 (long)kvtopte(m->m_ext.ext_buf);
610 bd->nb_status = 2 | NIBD_VALID;
611 bd->nb_key = 1;
612 }
613 data->nd_len = RXADD;
614 data->nd_status = 0;
615 res = INSQTI(data, &fqb->nf_rforw);
616 if (res == Q_EMPTY) {
617 WAITREG(NI_PCR, PCR_OWN);
618 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
619 }
620 if (m == (void *)data->nd_cmdref)
621 break; /* Out of mbufs */
622
623 if (ifp->if_bpf)
624 bpf_ops->bpf_mtap(ifp->if_bpf, m);
625 (*ifp->if_input)(ifp, m);
626 break;
627
628 case BVP_DGRAM:
629 m = (struct mbuf *)data->nd_cmdref;
630 ifp->if_flags &= ~IFF_OACTIVE;
631 m_freem(m);
632 res = INSQTI(data, &fqb->nf_dforw);
633 if (res == Q_EMPTY) {
634 WAITREG(NI_PCR, PCR_OWN);
635 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
636 }
637 break;
638
639 case BVP_MSGRX:
640 msg = (struct ni_msg *)data;
641 switch (msg->nm_opcode2) {
642 case NI_WPARAM:
643 memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN);
644 endwait = 1;
645 break;
646
647 case NI_RCCNTR:
648 case NI_CLPTDB:
649 case NI_STPTDB:
650 break;
651
652 default:
653 printf("Unkn resp %d\n",
654 msg->nm_opcode2);
655 break;
656 }
657 res = INSQTI(data, &fqb->nf_mforw);
658 if (res == Q_EMPTY) {
659 WAITREG(NI_PCR, PCR_OWN);
660 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
661 }
662 break;
663
664 default:
665 printf("Unknown opcode %d\n", data->nd_opcode);
666 res = INSQTI(data, &fqb->nf_mforw);
667 if (res == Q_EMPTY) {
668 WAITREG(NI_PCR, PCR_OWN);
669 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
670 }
671 }
672 }
673
674 /* Try to kick on the start routine again */
675 nistart(ifp);
676
677 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN|PSR_RSQ));
678 KERNEL_UNLOCK_ONE(NULL);
679 }
680
681 /*
682 * Process an ioctl request.
683 */
684 int
685 niioctl(struct ifnet *ifp, u_long cmd, void *data)
686 {
687 struct ni_softc *sc = ifp->if_softc;
688 struct ifaddr *ifa = (struct ifaddr *)data;
689 int s = splnet(), error = 0;
690
691 switch (cmd) {
692
693 case SIOCINITIFADDR:
694 ifp->if_flags |= IFF_UP;
695 switch(ifa->ifa_addr->sa_family) {
696 #ifdef INET
697 case AF_INET:
698 niinit(sc);
699 arp_ifinit(ifp, ifa);
700 break;
701 #endif
702 }
703 break;
704
705 case SIOCSIFFLAGS:
706 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
707 break;
708 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
709 case IFF_RUNNING:
710 /*
711 * If interface is marked down and it is running,
712 * stop it.
713 */
714 ifp->if_flags &= ~IFF_RUNNING;
715 ni_setup(sc);
716 break;
717 case IFF_UP:
718 /*
719 * If interface it marked up and it is stopped, then
720 * start it.
721 */
722 niinit(sc);
723 break;
724 case IFF_UP|IFF_RUNNING:
725 /*
726 * Send a new setup packet to match any new changes.
727 * (Like IFF_PROMISC etc)
728 */
729 ni_setup(sc);
730 break;
731 default:
732 break;
733 }
734 break;
735
736 case SIOCADDMULTI:
737 case SIOCDELMULTI:
738 /*
739 * Update our multicast list.
740 */
741 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
742 /*
743 * Multicast list has changed; set the hardware filter
744 * accordingly.
745 */
746 if (ifp->if_flags & IFF_RUNNING)
747 ni_setup(sc);
748 error = 0;
749 }
750 break;
751
752 default:
753 error = ether_ioctl(ifp, cmd, data);
754 break;
755 }
756 splx(s);
757 return (error);
758 }
759
760 /*
761 * Add a receive buffer to the indicated descriptor.
762 */
763 int
764 ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx)
765 {
766 struct ni_bbd *bd = &bbd[idx];
767 struct mbuf *m;
768
769 MGETHDR(m, M_DONTWAIT, MT_DATA);
770 if (m == NULL)
771 return (ENOBUFS);
772
773 MCLGET(m, M_DONTWAIT);
774 if ((m->m_flags & M_EXT) == 0) {
775 m_freem(m);
776 return (ENOBUFS);
777 }
778
779 m->m_data += 2;
780 bd->nb_len = (m->m_ext.ext_size - 2);
781 bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf);
782 bd->nb_status = 2 | NIBD_VALID;
783 bd->nb_key = 1;
784
785 data->bufs[0]._offset = 0;
786 data->bufs[0]._len = bd->nb_len;
787 data->bufs[0]._index = idx;
788 data->nd_cmdref = (long)m;
789
790 return (0);
791 }
792
793 /*
794 * Create setup packet and put in queue for sending.
795 */
796 void
797 ni_setup(struct ni_softc *sc)
798 {
799 struct ifnet *ifp = &sc->sc_if;
800 struct ni_msg *msg;
801 struct ni_ptdb *ptdb;
802 struct ether_multi *enm;
803 struct ether_multistep step;
804 int i, res;
805
806 msg = REMQHI(&fqb->nf_mforw);
807 if ((int)msg == Q_EMPTY)
808 return; /* What to do? */
809
810 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
811 memset(ptdb, 0, sizeof(struct ni_ptdb));
812
813 msg->nm_opcode = BVP_MSG;
814 msg->nm_len = 18;
815 ptdb->np_index = 2; /* definition type index */
816 ptdb->np_fque = 2; /* Free queue */
817 if (ifp->if_flags & IFF_RUNNING) {
818 msg->nm_opcode2 = NI_STPTDB;
819 ptdb->np_type = ETHERTYPE_IP;
820 ptdb->np_flags = PTDB_UNKN|PTDB_BDC;
821 if (ifp->if_flags & IFF_PROMISC)
822 ptdb->np_flags |= PTDB_PROMISC;
823 memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */
824 ptdb->np_adrlen = 1;
825 msg->nm_len += 8;
826 ifp->if_flags &= ~IFF_ALLMULTI;
827 if ((ifp->if_flags & IFF_PROMISC) == 0) {
828 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
829 i = 1;
830 while (enm != NULL) {
831 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
832 ifp->if_flags |= IFF_ALLMULTI;
833 ptdb->np_flags |= PTDB_AMC;
834 break;
835 }
836 msg->nm_len += 8;
837 ptdb->np_adrlen++;
838 memcpy(ptdb->np_mcast[i++], enm->enm_addrlo,
839 ETHER_ADDR_LEN);
840 ETHER_NEXT_MULTI(step, enm);
841 }
842 }
843 } else
844 msg->nm_opcode2 = NI_CLPTDB;
845
846 res = INSQTI(msg, &gvp->nc_forw0);
847 if (res == Q_EMPTY) {
848 WAITREG(NI_PCR, PCR_OWN);
849 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
850 }
851 }
852
853 /*
854 * Check for dead transmit logic. Not uncommon.
855 */
856 void
857 nitimeout(struct ifnet *ifp)
858 {
859 #if 0
860 struct ni_softc *sc = ifp->if_softc;
861
862 if (sc->sc_inq == 0)
863 return;
864
865 printf("%s: xmit logic died, resetting...\n", device_xname(sc->sc_dev));
866 /*
867 * Do a reset of interface, to get it going again.
868 * Will it work by just restart the transmit logic?
869 */
870 niinit(sc);
871 #endif
872 }
873
874 /*
875 * Shutdown hook. Make sure the interface is stopped at reboot.
876 */
877 void
878 ni_shutdown(void *arg)
879 {
880 struct ni_softc *sc = arg;
881
882 WAITREG(NI_PCR, PCR_OWN);
883 NI_WREG(NI_PCR, PCR_OWN|PCR_SHUTDOWN);
884 WAITREG(NI_PCR, PCR_OWN);
885 WAITREG(NI_PSR, PSR_OWN);
886 }
887