if_ni.c revision 1.33.16.2 1 /* $NetBSD: if_ni.c,v 1.33.16.2 2008/06/02 13:23:13 mjf Exp $ */
2 /*
3 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEBNA/DEBNT/DEBNK ethernet cards.
34 * Things that is still to do:
35 * Collect statistics.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_ni.c,v 1.33.16.2 2008/06/02 13:23:13 mjf Exp $");
40
41 #include "opt_inet.h"
42 #include "bpfilter.h"
43
44 #include <sys/param.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/systm.h>
49 #include <sys/sockio.h>
50 #include <sys/sched.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <net/if.h>
55 #include <net/if_ether.h>
56 #include <net/if_dl.h>
57
58 #include <netinet/in.h>
59 #include <netinet/if_inarp.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #include <net/bpfdesc.h>
64 #endif
65
66 #include <sys/bus.h>
67 #ifdef __vax__
68 #include <machine/mtpr.h>
69 #include <machine/pte.h>
70 #endif
71
72 #include <dev/bi/bireg.h>
73 #include <dev/bi/bivar.h>
74
75 #include "ioconf.h"
76 #include "locators.h"
77
78 /*
79 * Tunable buffer parameters. Good idea to have them as power of 8; then
80 * they will fit into a logical VAX page.
81 */
82 #define NMSGBUF 8 /* Message queue entries */
83 #define NTXBUF 16 /* Transmit queue entries */
84 #define NTXFRAGS 8 /* Number of transmit buffer fragments */
85 #define NRXBUF 24 /* Receive queue entries */
86 #define NBDESCS (NTXBUF * NTXFRAGS + NRXBUF)
87 #define NQUEUES 3 /* RX + TX + MSG */
88 #define PKTHDR 18 /* Length of (control) packet header */
89 #define RXADD 18 /* Additional length of receive datagram */
90 #define TXADD (10+NTXFRAGS*8) /* "" transmit "" */
91 #define MSGADD 134 /* "" message "" */
92
93 #include <dev/bi/if_nireg.h> /* XXX include earlier */
94
95 /*
96 * Macros for (most cases of) insqti/remqhi.
97 * Retry NRETRIES times to do the operation, if it still fails assume
98 * a lost lock and panic.
99 */
100 #define NRETRIES 100
101 #define INSQTI(e, h) ({ \
102 int ret = 0, __i; \
103 for (__i = 0; __i < NRETRIES; __i++) { \
104 if ((ret = insqti(e, h)) != ILCK_FAILED) \
105 break; \
106 } \
107 if (__i == NRETRIES) \
108 panic("ni: insqti failed at %d", __LINE__); \
109 ret; \
110 })
111 #define REMQHI(h) ({ \
112 int __i; void *ret = NULL; \
113 for (__i = 0; __i < NRETRIES; __i++) { \
114 if ((ret = remqhi(h)) != (void *)ILCK_FAILED) \
115 break; \
116 } \
117 if (__i == NRETRIES) \
118 panic("ni: remqhi failed at %d", __LINE__); \
119 ret; \
120 })
121
122
123 #define nipqb (&sc->sc_gvppqb->nc_pqb)
124 #define gvp sc->sc_gvppqb
125 #define fqb sc->sc_fqb
126 #define bbd sc->sc_bbd
127
128 struct ni_softc {
129 struct device sc_dev; /* Configuration common part */
130 struct evcnt sc_intrcnt; /* Interrupt coounting */
131 struct ethercom sc_ec; /* Ethernet common part */
132 #define sc_if sc_ec.ec_if /* network-visible interface */
133 bus_space_tag_t sc_iot;
134 bus_addr_t sc_ioh;
135 bus_dma_tag_t sc_dmat;
136 struct ni_gvppqb *sc_gvppqb; /* Port queue block */
137 struct ni_gvppqb *sc_pgvppqb; /* Phys address of PQB */
138 struct ni_fqb *sc_fqb; /* Free Queue block */
139 struct ni_bbd *sc_bbd; /* Buffer descriptors */
140 u_int8_t sc_enaddr[ETHER_ADDR_LEN];
141 };
142
143 static int nimatch(device_t, cfdata_t, void *);
144 static void niattach(device_t, device_t, void *);
145 static void niinit(struct ni_softc *);
146 static void nistart(struct ifnet *);
147 static void niintr(void *);
148 static int niioctl(struct ifnet *, u_long, void *);
149 static int ni_add_rxbuf(struct ni_softc *, struct ni_dg *, int);
150 static void ni_setup(struct ni_softc *);
151 static void nitimeout(struct ifnet *);
152 static void ni_shutdown(void *);
153 static void ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p);
154 static int failtest(struct ni_softc *, int, int, int, const char *);
155
156 volatile int endwait, retry; /* Used during autoconfig */
157
158 CFATTACH_DECL(ni, sizeof(struct ni_softc),
159 nimatch, niattach, NULL, NULL);
160
161 #define NI_WREG(csr, val) \
162 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
163 #define NI_RREG(csr) \
164 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
165
166 #define WAITREG(csr,val) while (NI_RREG(csr) & val);
167 /*
168 * Check for present device.
169 */
170 int
171 nimatch(parent, cf, aux)
172 struct device *parent;
173 struct cfdata *cf;
174 void *aux;
175 {
176 struct bi_attach_args *ba = aux;
177 u_short type;
178
179 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
180 if (type != BIDT_DEBNA && type != BIDT_DEBNT && type != BIDT_DEBNK)
181 return 0;
182
183 if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
184 cf->cf_loc[BICF_NODE] != ba->ba_nodenr)
185 return 0;
186
187 return 1;
188 }
189
190 /*
191 * Allocate a bunch of descriptor-safe memory.
192 * We need to get the structures from the beginning of its own pages.
193 */
194 static void
195 ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p)
196 {
197 bus_dma_segment_t seg;
198 int nsegs, error;
199
200 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
201 &nsegs, BUS_DMA_NOWAIT)) != 0)
202 panic(" unable to allocate memory: error %d", error);
203
204 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, size, v,
205 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0)
206 panic(" unable to map memory: error %d", error);
207
208 if (p)
209 *p = seg.ds_addr;
210 memset(*v, 0, size);
211 }
212
213 static int
214 failtest(struct ni_softc *sc, int reg, int mask, int test, const char *str)
215 {
216 int i = 100;
217
218 do {
219 DELAY(100000);
220 } while (((NI_RREG(reg) & mask) != test) && --i);
221
222 if (i == 0) {
223 printf("%s: %s\n", device_xname(&sc->sc_dev), str);
224 return 1;
225 }
226 return 0;
227 }
228
229
230 /*
231 * Interface exists: make available by filling in network interface
232 * record. System will initialize the interface when it is ready
233 * to accept packets.
234 */
235 void
236 niattach(parent, self, aux)
237 struct device *parent, *self;
238 void *aux;
239 {
240 struct bi_attach_args *ba = aux;
241 struct ni_softc *sc = (struct ni_softc *)self;
242 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
243 struct ni_msg *msg;
244 struct ni_ptdb *ptdb;
245 void *va;
246 int i, j, s, res;
247 u_short type;
248
249 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
250 printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ?
251 'T' : 'K');
252 sc->sc_iot = ba->ba_iot;
253 sc->sc_ioh = ba->ba_ioh;
254 sc->sc_dmat = ba->ba_dmat;
255
256 bi_intr_establish(ba->ba_icookie, ba->ba_ivec,
257 niintr, sc, &sc->sc_intrcnt);
258 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
259 device_xname(&sc->sc_dev), "intr");
260
261 ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb,
262 (paddr_t *)&sc->sc_pgvppqb);
263 ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0);
264 ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd),
265 (void **)&sc->sc_bbd, 0);
266 /*
267 * Zero the newly allocated memory.
268 */
269
270 nipqb->np_veclvl = (ba->ba_ivec << 2) + 2;
271 nipqb->np_node = ba->ba_intcpu;
272 nipqb->np_vpqb = (u_int32_t)gvp;
273 #ifdef __vax__
274 nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR);
275 nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR);
276 #else
277 #error Must fix support for non-vax.
278 #endif
279 nipqb->np_bvplvl = 1;
280 nipqb->np_vfqb = (u_int32_t)fqb;
281 nipqb->np_vbdt = (u_int32_t)bbd;
282 nipqb->np_nbdr = NBDESCS;
283
284 /* Free queue block */
285 nipqb->np_freeq = NQUEUES;
286 fqb->nf_mlen = PKTHDR+MSGADD;
287 fqb->nf_dlen = PKTHDR+TXADD;
288 fqb->nf_rlen = PKTHDR+RXADD;
289
290 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
291 ifp->if_softc = sc;
292 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
293 ifp->if_start = nistart;
294 ifp->if_ioctl = niioctl;
295 ifp->if_watchdog = nitimeout;
296 IFQ_SET_READY(&ifp->if_snd);
297
298 /*
299 * Start init sequence.
300 */
301
302 /* Reset the node */
303 NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST);
304 DELAY(500000);
305 i = 20;
306 while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i)
307 DELAY(500000);
308 if (i == 0) {
309 printf("%s: BROKE bit set after reset\n", device_xname(&sc->sc_dev));
310 return;
311 }
312
313 /* Check state */
314 if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state"))
315 return;
316
317 /* Clear owner bits */
318 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
319 NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN);
320
321 /* kick off init */
322 NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN);
323 while (NI_RREG(NI_PCR) & PCR_OWN)
324 DELAY(100000);
325
326 /* Check state */
327 if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize"))
328 return;
329
330 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
331
332 WAITREG(NI_PCR, PCR_OWN);
333 NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE);
334 WAITREG(NI_PCR, PCR_OWN);
335 WAITREG(NI_PSR, PSR_OWN);
336
337 /* Check state */
338 if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable"))
339 return;
340
341 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
342
343 /*
344 * The message queue packets must be located on the beginning
345 * of a page. A VAX page is 512 bytes, but it clusters 8 pages.
346 * This knowledge is used here when allocating pages.
347 * !!! How should this be done on MIPS and Alpha??? !!!
348 */
349 #if NBPG < 4096
350 #error pagesize too small
351 #endif
352 s = splvm();
353 /* Set up message free queue */
354 ni_getpgs(sc, NMSGBUF * 512, &va, 0);
355 for (i = 0; i < NMSGBUF; i++) {
356 msg = (void *)((char *)va + i * 512);
357 res = INSQTI(msg, &fqb->nf_mforw);
358 }
359 WAITREG(NI_PCR, PCR_OWN);
360 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
361 WAITREG(NI_PCR, PCR_OWN);
362
363 /* Set up xmit queue */
364 ni_getpgs(sc, NTXBUF * 512, &va, 0);
365 for (i = 0; i < NTXBUF; i++) {
366 struct ni_dg *data;
367
368 data = (void *)((char *)va + i * 512);
369 data->nd_status = 0;
370 data->nd_len = TXADD;
371 data->nd_ptdbidx = 1;
372 data->nd_opcode = BVP_DGRAM;
373 for (j = 0; j < NTXFRAGS; j++) {
374 data->bufs[j]._offset = 0;
375 data->bufs[j]._key = 1;
376 bbd[i * NTXFRAGS + j].nb_key = 1;
377 bbd[i * NTXFRAGS + j].nb_status = 0;
378 data->bufs[j]._index = i * NTXFRAGS + j;
379 }
380 res = INSQTI(data, &fqb->nf_dforw);
381 }
382 WAITREG(NI_PCR, PCR_OWN);
383 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
384 WAITREG(NI_PCR, PCR_OWN);
385
386 /* recv buffers */
387 ni_getpgs(sc, NRXBUF * 512, &va, 0);
388 for (i = 0; i < NRXBUF; i++) {
389 struct ni_dg *data;
390 int idx;
391
392 data = (void *)((char *)va + i * 512);
393 data->nd_len = RXADD;
394 data->nd_opcode = BVP_DGRAMRX;
395 data->nd_ptdbidx = 2;
396 data->bufs[0]._key = 1;
397
398 idx = NTXBUF * NTXFRAGS + i;
399 if (ni_add_rxbuf(sc, data, idx))
400 panic("niattach: ni_add_rxbuf: out of mbufs");
401
402 res = INSQTI(data, &fqb->nf_rforw);
403 }
404 WAITREG(NI_PCR, PCR_OWN);
405 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
406 WAITREG(NI_PCR, PCR_OWN);
407
408 splx(s);
409
410 /* Set initial parameters */
411 msg = REMQHI(&fqb->nf_mforw);
412
413 msg->nm_opcode = BVP_MSG;
414 msg->nm_status = 0;
415 msg->nm_len = sizeof(struct ni_param) + 6;
416 msg->nm_opcode2 = NI_WPARAM;
417 ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD;
418
419 endwait = retry = 0;
420 res = INSQTI(msg, &gvp->nc_forw0);
421
422 retry: WAITREG(NI_PCR, PCR_OWN);
423 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
424 WAITREG(NI_PCR, PCR_OWN);
425 i = 1000;
426 while (endwait == 0 && --i)
427 DELAY(10000);
428
429 if (endwait == 0) {
430 if (++retry < 3)
431 goto retry;
432 printf("%s: no response to set params\n", device_xname(&sc->sc_dev));
433 return;
434 }
435
436 /* Clear counters */
437 msg = REMQHI(&fqb->nf_mforw);
438 msg->nm_opcode = BVP_MSG;
439 msg->nm_status = 0;
440 msg->nm_len = sizeof(struct ni_param) + 6;
441 msg->nm_opcode2 = NI_RCCNTR;
442
443 res = INSQTI(msg, &gvp->nc_forw0);
444
445 WAITREG(NI_PCR, PCR_OWN);
446 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
447 WAITREG(NI_PCR, PCR_OWN);
448
449 /* Enable transmit logic */
450 msg = REMQHI(&fqb->nf_mforw);
451
452 msg->nm_opcode = BVP_MSG;
453 msg->nm_status = 0;
454 msg->nm_len = 18;
455 msg->nm_opcode2 = NI_STPTDB;
456 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
457 memset(ptdb, 0, sizeof(struct ni_ptdb));
458 ptdb->np_index = 1;
459 ptdb->np_fque = 1;
460
461 res = INSQTI(msg, &gvp->nc_forw0);
462
463 WAITREG(NI_PCR, PCR_OWN);
464 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
465 WAITREG(NI_PCR, PCR_OWN);
466
467 /* Wait for everything to finish */
468 WAITREG(NI_PSR, PSR_OWN);
469
470 printf("%s: hardware address %s\n", device_xname(&sc->sc_dev),
471 ether_sprintf(sc->sc_enaddr));
472
473 /*
474 * Attach the interface.
475 */
476 if_attach(ifp);
477 ether_ifattach(ifp, sc->sc_enaddr);
478 if (shutdownhook_establish(ni_shutdown, sc) == 0)
479 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
480 }
481
482 /*
483 * Initialization of interface.
484 */
485 void
486 niinit(sc)
487 struct ni_softc *sc;
488 {
489 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
490
491 /*
492 * Set flags (so ni_setup() do the right thing).
493 */
494 ifp->if_flags |= IFF_RUNNING;
495 ifp->if_flags &= ~IFF_OACTIVE;
496
497 /*
498 * Send setup messages so that the rx/tx locic starts.
499 */
500 ni_setup(sc);
501
502 }
503
504 /*
505 * Start output on interface.
506 */
507 void
508 nistart(ifp)
509 struct ifnet *ifp;
510 {
511 struct ni_softc *sc = ifp->if_softc;
512 struct ni_dg *data;
513 struct ni_bbd *bdp;
514 struct mbuf *m, *m0;
515 int i, cnt, res, mlen;
516
517 if (ifp->if_flags & IFF_OACTIVE)
518 return;
519 #ifdef DEBUG
520 if (ifp->if_flags & IFF_DEBUG)
521 printf("%s: nistart\n", device_xname(&sc->sc_dev));
522 #endif
523
524 while (fqb->nf_dforw) {
525 IFQ_POLL(&ifp->if_snd, m);
526 if (m == 0)
527 break;
528
529 data = REMQHI(&fqb->nf_dforw);
530 if ((int)data == Q_EMPTY) {
531 ifp->if_flags |= IFF_OACTIVE;
532 break;
533 }
534
535 IFQ_DEQUEUE(&ifp->if_snd, m);
536
537 /*
538 * Count number of mbufs in chain.
539 * Always do DMA directly from mbufs, therefore the transmit
540 * ring is really big.
541 */
542 for (m0 = m, cnt = 0; m0; m0 = m0->m_next)
543 if (m0->m_len)
544 cnt++;
545 if (cnt > NTXFRAGS)
546 panic("nistart"); /* XXX */
547
548 #if NBPFILTER > 0
549 if (ifp->if_bpf)
550 bpf_mtap(ifp->if_bpf, m);
551 #endif
552 bdp = &bbd[(data->bufs[0]._index & 0x7fff)];
553 for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) {
554 if (m0->m_len == 0)
555 continue;
556 bdp->nb_status = (mtod(m0, u_int32_t) & NIBD_OFFSET) |
557 NIBD_VALID;
558 bdp->nb_pte = (u_int32_t)kvtopte(mtod(m0, void *));
559 bdp->nb_len = m0->m_len;
560 data->bufs[i]._offset = 0;
561 data->bufs[i]._len = bdp->nb_len;
562 data->bufs[i]._index |= NIDG_CHAIN;
563 mlen += bdp->nb_len;
564 bdp++;
565 i++;
566 }
567 data->nd_opcode = BVP_DGRAM;
568 data->nd_pad3 = 1;
569 data->nd_ptdbidx = 1;
570 data->nd_len = 10 + i * 8;
571 data->bufs[i - 1]._index &= ~NIDG_CHAIN;
572 data->nd_cmdref = (u_int32_t)m;
573 #ifdef DEBUG
574 if (ifp->if_flags & IFF_DEBUG)
575 printf("%s: sending %d bytes (%d segments)\n",
576 device_xname(&sc->sc_dev), mlen, i);
577 #endif
578
579 res = INSQTI(data, &gvp->nc_forw0);
580 if (res == Q_EMPTY) {
581 WAITREG(NI_PCR, PCR_OWN);
582 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
583 }
584 }
585 }
586
587 void
588 niintr(void *arg)
589 {
590 struct ni_softc *sc = arg;
591 struct ni_dg *data;
592 struct ni_msg *msg;
593 struct ifnet *ifp = &sc->sc_if;
594 struct ni_bbd *bd;
595 struct mbuf *m;
596 int idx, res;
597
598 if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED)
599 return;
600
601 if ((NI_RREG(NI_PSR) & PSR_ERR))
602 printf("%s: PSR %x\n", device_xname(&sc->sc_dev), NI_RREG(NI_PSR));
603
604 KERNEL_LOCK(1, NULL);
605 /* Got any response packets? */
606 while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) {
607
608 switch (data->nd_opcode) {
609 case BVP_DGRAMRX: /* Receive datagram */
610 idx = data->bufs[0]._index;
611 bd = &bbd[idx];
612 m = (void *)data->nd_cmdref;
613 m->m_pkthdr.len = m->m_len =
614 data->bufs[0]._len - ETHER_CRC_LEN;
615 m->m_pkthdr.rcvif = ifp;
616 if (ni_add_rxbuf(sc, data, idx)) {
617 bd->nb_len = (m->m_ext.ext_size - 2);
618 bd->nb_pte =
619 (long)kvtopte(m->m_ext.ext_buf);
620 bd->nb_status = 2 | NIBD_VALID;
621 bd->nb_key = 1;
622 }
623 data->nd_len = RXADD;
624 data->nd_status = 0;
625 res = INSQTI(data, &fqb->nf_rforw);
626 if (res == Q_EMPTY) {
627 WAITREG(NI_PCR, PCR_OWN);
628 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
629 }
630 if (m == (void *)data->nd_cmdref)
631 break; /* Out of mbufs */
632
633 #if NBPFILTER > 0
634 if (ifp->if_bpf)
635 bpf_mtap(ifp->if_bpf, m);
636 #endif
637 (*ifp->if_input)(ifp, m);
638 break;
639
640 case BVP_DGRAM:
641 m = (struct mbuf *)data->nd_cmdref;
642 ifp->if_flags &= ~IFF_OACTIVE;
643 m_freem(m);
644 res = INSQTI(data, &fqb->nf_dforw);
645 if (res == Q_EMPTY) {
646 WAITREG(NI_PCR, PCR_OWN);
647 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
648 }
649 break;
650
651 case BVP_MSGRX:
652 msg = (struct ni_msg *)data;
653 switch (msg->nm_opcode2) {
654 case NI_WPARAM:
655 memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN);
656 endwait = 1;
657 break;
658
659 case NI_RCCNTR:
660 case NI_CLPTDB:
661 case NI_STPTDB:
662 break;
663
664 default:
665 printf("Unkn resp %d\n",
666 msg->nm_opcode2);
667 break;
668 }
669 res = INSQTI(data, &fqb->nf_mforw);
670 if (res == Q_EMPTY) {
671 WAITREG(NI_PCR, PCR_OWN);
672 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
673 }
674 break;
675
676 default:
677 printf("Unknown opcode %d\n", data->nd_opcode);
678 res = INSQTI(data, &fqb->nf_mforw);
679 if (res == Q_EMPTY) {
680 WAITREG(NI_PCR, PCR_OWN);
681 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
682 }
683 }
684 }
685
686 /* Try to kick on the start routine again */
687 nistart(ifp);
688
689 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN|PSR_RSQ));
690 KERNEL_UNLOCK_ONE(NULL);
691 }
692
693 /*
694 * Process an ioctl request.
695 */
696 int
697 niioctl(ifp, cmd, data)
698 register struct ifnet *ifp;
699 u_long cmd;
700 void *data;
701 {
702 struct ni_softc *sc = ifp->if_softc;
703 struct ifaddr *ifa = (struct ifaddr *)data;
704 int s = splnet(), error = 0;
705
706 switch (cmd) {
707
708 case SIOCSIFADDR:
709 ifp->if_flags |= IFF_UP;
710 switch(ifa->ifa_addr->sa_family) {
711 #ifdef INET
712 case AF_INET:
713 niinit(sc);
714 arp_ifinit(ifp, ifa);
715 break;
716 #endif
717 }
718 break;
719
720 case SIOCSIFFLAGS:
721 if ((ifp->if_flags & IFF_UP) == 0 &&
722 (ifp->if_flags & IFF_RUNNING) != 0) {
723 /*
724 * If interface is marked down and it is running,
725 * stop it.
726 */
727 ifp->if_flags &= ~IFF_RUNNING;
728 ni_setup(sc);
729 } else if ((ifp->if_flags & IFF_UP) != 0 &&
730 (ifp->if_flags & IFF_RUNNING) == 0) {
731 /*
732 * If interface it marked up and it is stopped, then
733 * start it.
734 */
735 niinit(sc);
736 } else if ((ifp->if_flags & IFF_UP) != 0) {
737 /*
738 * Send a new setup packet to match any new changes.
739 * (Like IFF_PROMISC etc)
740 */
741 ni_setup(sc);
742 }
743 break;
744
745 case SIOCADDMULTI:
746 case SIOCDELMULTI:
747 /*
748 * Update our multicast list.
749 */
750 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
751 /*
752 * Multicast list has changed; set the hardware filter
753 * accordingly.
754 */
755 if (ifp->if_flags & IFF_RUNNING)
756 ni_setup(sc);
757 error = 0;
758 }
759 break;
760
761 default:
762 error = EINVAL;
763
764 }
765 splx(s);
766 return (error);
767 }
768
769 /*
770 * Add a receive buffer to the indicated descriptor.
771 */
772 int
773 ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx)
774 {
775 struct ni_bbd *bd = &bbd[idx];
776 struct mbuf *m;
777
778 MGETHDR(m, M_DONTWAIT, MT_DATA);
779 if (m == NULL)
780 return (ENOBUFS);
781
782 MCLGET(m, M_DONTWAIT);
783 if ((m->m_flags & M_EXT) == 0) {
784 m_freem(m);
785 return (ENOBUFS);
786 }
787
788 m->m_data += 2;
789 bd->nb_len = (m->m_ext.ext_size - 2);
790 bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf);
791 bd->nb_status = 2 | NIBD_VALID;
792 bd->nb_key = 1;
793
794 data->bufs[0]._offset = 0;
795 data->bufs[0]._len = bd->nb_len;
796 data->bufs[0]._index = idx;
797 data->nd_cmdref = (long)m;
798
799 return (0);
800 }
801
802 /*
803 * Create setup packet and put in queue for sending.
804 */
805 void
806 ni_setup(struct ni_softc *sc)
807 {
808 struct ifnet *ifp = &sc->sc_if;
809 struct ni_msg *msg;
810 struct ni_ptdb *ptdb;
811 struct ether_multi *enm;
812 struct ether_multistep step;
813 int i, res;
814
815 msg = REMQHI(&fqb->nf_mforw);
816 if ((int)msg == Q_EMPTY)
817 return; /* What to do? */
818
819 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
820 memset(ptdb, 0, sizeof(struct ni_ptdb));
821
822 msg->nm_opcode = BVP_MSG;
823 msg->nm_len = 18;
824 ptdb->np_index = 2; /* definition type index */
825 ptdb->np_fque = 2; /* Free queue */
826 if (ifp->if_flags & IFF_RUNNING) {
827 msg->nm_opcode2 = NI_STPTDB;
828 ptdb->np_type = ETHERTYPE_IP;
829 ptdb->np_flags = PTDB_UNKN|PTDB_BDC;
830 if (ifp->if_flags & IFF_PROMISC)
831 ptdb->np_flags |= PTDB_PROMISC;
832 memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */
833 ptdb->np_adrlen = 1;
834 msg->nm_len += 8;
835 ifp->if_flags &= ~IFF_ALLMULTI;
836 if ((ifp->if_flags & IFF_PROMISC) == 0) {
837 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
838 i = 1;
839 while (enm != NULL) {
840 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
841 ifp->if_flags |= IFF_ALLMULTI;
842 ptdb->np_flags |= PTDB_AMC;
843 break;
844 }
845 msg->nm_len += 8;
846 ptdb->np_adrlen++;
847 memcpy(ptdb->np_mcast[i++], enm->enm_addrlo,
848 ETHER_ADDR_LEN);
849 ETHER_NEXT_MULTI(step, enm);
850 }
851 }
852 } else
853 msg->nm_opcode2 = NI_CLPTDB;
854
855 res = INSQTI(msg, &gvp->nc_forw0);
856 if (res == Q_EMPTY) {
857 WAITREG(NI_PCR, PCR_OWN);
858 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
859 }
860 }
861
862 /*
863 * Check for dead transmit logic. Not uncommon.
864 */
865 void
866 nitimeout(ifp)
867 struct ifnet *ifp;
868 {
869 #if 0
870 struct ni_softc *sc = ifp->if_softc;
871
872 if (sc->sc_inq == 0)
873 return;
874
875 printf("%s: xmit logic died, resetting...\n", device_xname(&sc->sc_dev));
876 /*
877 * Do a reset of interface, to get it going again.
878 * Will it work by just restart the transmit logic?
879 */
880 niinit(sc);
881 #endif
882 }
883
884 /*
885 * Shutdown hook. Make sure the interface is stopped at reboot.
886 */
887 void
888 ni_shutdown(arg)
889 void *arg;
890 {
891 struct ni_softc *sc = arg;
892
893 WAITREG(NI_PCR, PCR_OWN);
894 NI_WREG(NI_PCR, PCR_OWN|PCR_SHUTDOWN);
895 WAITREG(NI_PCR, PCR_OWN);
896 WAITREG(NI_PSR, PSR_OWN);
897
898 }
899
900