if_ni.c revision 1.35.4.3 1 /* $NetBSD: if_ni.c,v 1.35.4.3 2010/08/11 22:53:18 yamt Exp $ */
2 /*
3 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEBNA/DEBNT/DEBNK ethernet cards.
34 * Things that is still to do:
35 * Collect statistics.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_ni.c,v 1.35.4.3 2010/08/11 22:53:18 yamt Exp $");
40
41 #include "opt_inet.h"
42
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/systm.h>
48 #include <sys/sockio.h>
49 #include <sys/sched.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59
60 #include <net/bpf.h>
61 #include <net/bpfdesc.h>
62
63 #include <sys/bus.h>
64 #ifdef __vax__
65 #include <machine/mtpr.h>
66 #include <machine/pte.h>
67 #endif
68
69 #include <dev/bi/bireg.h>
70 #include <dev/bi/bivar.h>
71
72 #include "ioconf.h"
73 #include "locators.h"
74
75 /*
76 * Tunable buffer parameters. Good idea to have them as power of 8; then
77 * they will fit into a logical VAX page.
78 */
79 #define NMSGBUF 8 /* Message queue entries */
80 #define NTXBUF 16 /* Transmit queue entries */
81 #define NTXFRAGS 8 /* Number of transmit buffer fragments */
82 #define NRXBUF 24 /* Receive queue entries */
83 #define NBDESCS (NTXBUF * NTXFRAGS + NRXBUF)
84 #define NQUEUES 3 /* RX + TX + MSG */
85 #define PKTHDR 18 /* Length of (control) packet header */
86 #define RXADD 18 /* Additional length of receive datagram */
87 #define TXADD (10+NTXFRAGS*8) /* "" transmit "" */
88 #define MSGADD 134 /* "" message "" */
89
90 #include <dev/bi/if_nireg.h> /* XXX include earlier */
91
92 /*
93 * Macros for (most cases of) insqti/remqhi.
94 * Retry NRETRIES times to do the operation, if it still fails assume
95 * a lost lock and panic.
96 */
97 #define NRETRIES 100
98 #define INSQTI(e, h) ({ \
99 int ret = 0, __i; \
100 for (__i = 0; __i < NRETRIES; __i++) { \
101 if ((ret = insqti(e, h)) != ILCK_FAILED) \
102 break; \
103 } \
104 if (__i == NRETRIES) \
105 panic("ni: insqti failed at %d", __LINE__); \
106 ret; \
107 })
108 #define REMQHI(h) ({ \
109 int __i; void *ret = NULL; \
110 for (__i = 0; __i < NRETRIES; __i++) { \
111 if ((ret = remqhi(h)) != (void *)ILCK_FAILED) \
112 break; \
113 } \
114 if (__i == NRETRIES) \
115 panic("ni: remqhi failed at %d", __LINE__); \
116 ret; \
117 })
118
119
120 #define nipqb (&sc->sc_gvppqb->nc_pqb)
121 #define gvp sc->sc_gvppqb
122 #define fqb sc->sc_fqb
123 #define bbd sc->sc_bbd
124
125 struct ni_softc {
126 device_t sc_dev; /* Configuration common part */
127 struct evcnt sc_intrcnt; /* Interrupt coounting */
128 struct ethercom sc_ec; /* Ethernet common part */
129 #define sc_if sc_ec.ec_if /* network-visible interface */
130 bus_space_tag_t sc_iot;
131 bus_addr_t sc_ioh;
132 bus_dma_tag_t sc_dmat;
133 struct ni_gvppqb *sc_gvppqb; /* Port queue block */
134 struct ni_gvppqb *sc_pgvppqb; /* Phys address of PQB */
135 struct ni_fqb *sc_fqb; /* Free Queue block */
136 struct ni_bbd *sc_bbd; /* Buffer descriptors */
137 u_int8_t sc_enaddr[ETHER_ADDR_LEN];
138 };
139
140 static int nimatch(device_t, cfdata_t, void *);
141 static void niattach(device_t, device_t, void *);
142 static void niinit(struct ni_softc *);
143 static void nistart(struct ifnet *);
144 static void niintr(void *);
145 static int niioctl(struct ifnet *, u_long, void *);
146 static int ni_add_rxbuf(struct ni_softc *, struct ni_dg *, int);
147 static void ni_setup(struct ni_softc *);
148 static void nitimeout(struct ifnet *);
149 static void ni_shutdown(void *);
150 static void ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p);
151 static int failtest(struct ni_softc *, int, int, int, const char *);
152
153 volatile int endwait, retry; /* Used during autoconfig */
154
155 CFATTACH_DECL_NEW(ni, sizeof(struct ni_softc),
156 nimatch, niattach, NULL, NULL);
157
158 #define NI_WREG(csr, val) \
159 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
160 #define NI_RREG(csr) \
161 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
162
163 #define WAITREG(csr,val) while (NI_RREG(csr) & val);
164 /*
165 * Check for present device.
166 */
167 static int
168 nimatch(device_t parent, cfdata_t cf, void *aux)
169 {
170 struct bi_attach_args *ba = aux;
171 u_short type;
172
173 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
174 if (type != BIDT_DEBNA && type != BIDT_DEBNT && type != BIDT_DEBNK)
175 return 0;
176
177 if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
178 cf->cf_loc[BICF_NODE] != ba->ba_nodenr)
179 return 0;
180
181 return 1;
182 }
183
184 /*
185 * Allocate a bunch of descriptor-safe memory.
186 * We need to get the structures from the beginning of its own pages.
187 */
188 static void
189 ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p)
190 {
191 bus_dma_segment_t seg;
192 int nsegs, error;
193
194 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
195 &nsegs, BUS_DMA_NOWAIT)) != 0)
196 panic(" unable to allocate memory: error %d", error);
197
198 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, size, v,
199 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0)
200 panic(" unable to map memory: error %d", error);
201
202 if (p)
203 *p = seg.ds_addr;
204 memset(*v, 0, size);
205 }
206
207 static int
208 failtest(struct ni_softc *sc, int reg, int mask, int test, const char *str)
209 {
210 int i = 100;
211
212 do {
213 DELAY(100000);
214 } while (((NI_RREG(reg) & mask) != test) && --i);
215
216 if (i == 0) {
217 printf("%s: %s\n", device_xname(sc->sc_dev), str);
218 return 1;
219 }
220 return 0;
221 }
222
223
224 /*
225 * Interface exists: make available by filling in network interface
226 * record. System will initialize the interface when it is ready
227 * to accept packets.
228 */
229 static void
230 niattach(device_t parent, device_t self, void *aux)
231 {
232 struct bi_attach_args *ba = aux;
233 struct ni_softc *sc = device_private(self);
234 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
235 struct ni_msg *msg;
236 struct ni_ptdb *ptdb;
237 void *va;
238 int i, j, s, res;
239 u_short type;
240
241 sc->sc_dev = self;
242
243 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
244 printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ?
245 'T' : 'K');
246 sc->sc_iot = ba->ba_iot;
247 sc->sc_ioh = ba->ba_ioh;
248 sc->sc_dmat = ba->ba_dmat;
249
250 bi_intr_establish(ba->ba_icookie, ba->ba_ivec,
251 niintr, sc, &sc->sc_intrcnt);
252 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
253 device_xname(self), "intr");
254
255 ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb,
256 (paddr_t *)&sc->sc_pgvppqb);
257 ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0);
258 ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd),
259 (void **)&sc->sc_bbd, 0);
260 /*
261 * Zero the newly allocated memory.
262 */
263
264 nipqb->np_veclvl = (ba->ba_ivec << 2) + 2;
265 nipqb->np_node = ba->ba_intcpu;
266 nipqb->np_vpqb = (u_int32_t)gvp;
267 #ifdef __vax__
268 nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR);
269 nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR);
270 #else
271 #error Must fix support for non-vax.
272 #endif
273 nipqb->np_bvplvl = 1;
274 nipqb->np_vfqb = (u_int32_t)fqb;
275 nipqb->np_vbdt = (u_int32_t)bbd;
276 nipqb->np_nbdr = NBDESCS;
277
278 /* Free queue block */
279 nipqb->np_freeq = NQUEUES;
280 fqb->nf_mlen = PKTHDR+MSGADD;
281 fqb->nf_dlen = PKTHDR+TXADD;
282 fqb->nf_rlen = PKTHDR+RXADD;
283
284 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
285 ifp->if_softc = sc;
286 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
287 ifp->if_start = nistart;
288 ifp->if_ioctl = niioctl;
289 ifp->if_watchdog = nitimeout;
290 IFQ_SET_READY(&ifp->if_snd);
291
292 /*
293 * Start init sequence.
294 */
295
296 /* Reset the node */
297 NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST);
298 DELAY(500000);
299 i = 20;
300 while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i)
301 DELAY(500000);
302 if (i == 0) {
303 printf("%s: BROKE bit set after reset\n", device_xname(self));
304 return;
305 }
306
307 /* Check state */
308 if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state"))
309 return;
310
311 /* Clear owner bits */
312 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
313 NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN);
314
315 /* kick off init */
316 NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN);
317 while (NI_RREG(NI_PCR) & PCR_OWN)
318 DELAY(100000);
319
320 /* Check state */
321 if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize"))
322 return;
323
324 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
325
326 WAITREG(NI_PCR, PCR_OWN);
327 NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE);
328 WAITREG(NI_PCR, PCR_OWN);
329 WAITREG(NI_PSR, PSR_OWN);
330
331 /* Check state */
332 if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable"))
333 return;
334
335 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
336
337 /*
338 * The message queue packets must be located on the beginning
339 * of a page. A VAX page is 512 bytes, but it clusters 8 pages.
340 * This knowledge is used here when allocating pages.
341 * !!! How should this be done on MIPS and Alpha??? !!!
342 */
343 #if NBPG < 4096
344 #error pagesize too small
345 #endif
346 s = splvm();
347 /* Set up message free queue */
348 ni_getpgs(sc, NMSGBUF * 512, &va, 0);
349 for (i = 0; i < NMSGBUF; i++) {
350 msg = (void *)((char *)va + i * 512);
351 res = INSQTI(msg, &fqb->nf_mforw);
352 }
353 WAITREG(NI_PCR, PCR_OWN);
354 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
355 WAITREG(NI_PCR, PCR_OWN);
356
357 /* Set up xmit queue */
358 ni_getpgs(sc, NTXBUF * 512, &va, 0);
359 for (i = 0; i < NTXBUF; i++) {
360 struct ni_dg *data;
361
362 data = (void *)((char *)va + i * 512);
363 data->nd_status = 0;
364 data->nd_len = TXADD;
365 data->nd_ptdbidx = 1;
366 data->nd_opcode = BVP_DGRAM;
367 for (j = 0; j < NTXFRAGS; j++) {
368 data->bufs[j]._offset = 0;
369 data->bufs[j]._key = 1;
370 bbd[i * NTXFRAGS + j].nb_key = 1;
371 bbd[i * NTXFRAGS + j].nb_status = 0;
372 data->bufs[j]._index = i * NTXFRAGS + j;
373 }
374 res = INSQTI(data, &fqb->nf_dforw);
375 }
376 WAITREG(NI_PCR, PCR_OWN);
377 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
378 WAITREG(NI_PCR, PCR_OWN);
379
380 /* recv buffers */
381 ni_getpgs(sc, NRXBUF * 512, &va, 0);
382 for (i = 0; i < NRXBUF; i++) {
383 struct ni_dg *data;
384 int idx;
385
386 data = (void *)((char *)va + i * 512);
387 data->nd_len = RXADD;
388 data->nd_opcode = BVP_DGRAMRX;
389 data->nd_ptdbidx = 2;
390 data->bufs[0]._key = 1;
391
392 idx = NTXBUF * NTXFRAGS + i;
393 if (ni_add_rxbuf(sc, data, idx))
394 panic("niattach: ni_add_rxbuf: out of mbufs");
395
396 res = INSQTI(data, &fqb->nf_rforw);
397 }
398 WAITREG(NI_PCR, PCR_OWN);
399 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
400 WAITREG(NI_PCR, PCR_OWN);
401
402 splx(s);
403
404 /* Set initial parameters */
405 msg = REMQHI(&fqb->nf_mforw);
406
407 msg->nm_opcode = BVP_MSG;
408 msg->nm_status = 0;
409 msg->nm_len = sizeof(struct ni_param) + 6;
410 msg->nm_opcode2 = NI_WPARAM;
411 ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD;
412
413 endwait = retry = 0;
414 res = INSQTI(msg, &gvp->nc_forw0);
415
416 retry: WAITREG(NI_PCR, PCR_OWN);
417 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
418 WAITREG(NI_PCR, PCR_OWN);
419 i = 1000;
420 while (endwait == 0 && --i)
421 DELAY(10000);
422
423 if (endwait == 0) {
424 if (++retry < 3)
425 goto retry;
426 printf("%s: no response to set params\n", device_xname(self));
427 return;
428 }
429
430 /* Clear counters */
431 msg = REMQHI(&fqb->nf_mforw);
432 msg->nm_opcode = BVP_MSG;
433 msg->nm_status = 0;
434 msg->nm_len = sizeof(struct ni_param) + 6;
435 msg->nm_opcode2 = NI_RCCNTR;
436
437 res = INSQTI(msg, &gvp->nc_forw0);
438
439 WAITREG(NI_PCR, PCR_OWN);
440 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
441 WAITREG(NI_PCR, PCR_OWN);
442
443 /* Enable transmit logic */
444 msg = REMQHI(&fqb->nf_mforw);
445
446 msg->nm_opcode = BVP_MSG;
447 msg->nm_status = 0;
448 msg->nm_len = 18;
449 msg->nm_opcode2 = NI_STPTDB;
450 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
451 memset(ptdb, 0, sizeof(struct ni_ptdb));
452 ptdb->np_index = 1;
453 ptdb->np_fque = 1;
454
455 res = INSQTI(msg, &gvp->nc_forw0);
456
457 WAITREG(NI_PCR, PCR_OWN);
458 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
459 WAITREG(NI_PCR, PCR_OWN);
460
461 /* Wait for everything to finish */
462 WAITREG(NI_PSR, PSR_OWN);
463
464 printf("%s: hardware address %s\n", device_xname(self),
465 ether_sprintf(sc->sc_enaddr));
466
467 /*
468 * Attach the interface.
469 */
470 if_attach(ifp);
471 ether_ifattach(ifp, sc->sc_enaddr);
472 if (shutdownhook_establish(ni_shutdown, sc) == 0)
473 aprint_error_dev(self, "WARNING: unable to establish shutdown hook\n");
474 }
475
476 /*
477 * Initialization of interface.
478 */
479 void
480 niinit(struct ni_softc *sc)
481 {
482 struct ifnet *ifp = &sc->sc_if;
483
484 /*
485 * Set flags (so ni_setup() do the right thing).
486 */
487 ifp->if_flags |= IFF_RUNNING;
488 ifp->if_flags &= ~IFF_OACTIVE;
489
490 /*
491 * Send setup messages so that the rx/tx locic starts.
492 */
493 ni_setup(sc);
494
495 }
496
497 /*
498 * Start output on interface.
499 */
500 void
501 nistart(struct ifnet *ifp)
502 {
503 struct ni_softc *sc = ifp->if_softc;
504 struct ni_dg *data;
505 struct ni_bbd *bdp;
506 struct mbuf *m, *m0;
507 int i, cnt, res, mlen;
508
509 if (ifp->if_flags & IFF_OACTIVE)
510 return;
511 #ifdef DEBUG
512 if (ifp->if_flags & IFF_DEBUG)
513 printf("%s: nistart\n", device_xname(sc->sc_dev));
514 #endif
515
516 while (fqb->nf_dforw) {
517 IFQ_POLL(&ifp->if_snd, m);
518 if (m == 0)
519 break;
520
521 data = REMQHI(&fqb->nf_dforw);
522 if ((int)data == Q_EMPTY) {
523 ifp->if_flags |= IFF_OACTIVE;
524 break;
525 }
526
527 IFQ_DEQUEUE(&ifp->if_snd, m);
528
529 /*
530 * Count number of mbufs in chain.
531 * Always do DMA directly from mbufs, therefore the transmit
532 * ring is really big.
533 */
534 for (m0 = m, cnt = 0; m0; m0 = m0->m_next)
535 if (m0->m_len)
536 cnt++;
537 if (cnt > NTXFRAGS)
538 panic("nistart"); /* XXX */
539
540 bpf_mtap(ifp, m);
541 bdp = &bbd[(data->bufs[0]._index & 0x7fff)];
542 for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) {
543 if (m0->m_len == 0)
544 continue;
545 bdp->nb_status = (mtod(m0, u_int32_t) & NIBD_OFFSET) |
546 NIBD_VALID;
547 bdp->nb_pte = (u_int32_t)kvtopte(mtod(m0, void *));
548 bdp->nb_len = m0->m_len;
549 data->bufs[i]._offset = 0;
550 data->bufs[i]._len = bdp->nb_len;
551 data->bufs[i]._index |= NIDG_CHAIN;
552 mlen += bdp->nb_len;
553 bdp++;
554 i++;
555 }
556 data->nd_opcode = BVP_DGRAM;
557 data->nd_pad3 = 1;
558 data->nd_ptdbidx = 1;
559 data->nd_len = 10 + i * 8;
560 data->bufs[i - 1]._index &= ~NIDG_CHAIN;
561 data->nd_cmdref = (u_int32_t)m;
562 #ifdef DEBUG
563 if (ifp->if_flags & IFF_DEBUG)
564 printf("%s: sending %d bytes (%d segments)\n",
565 device_xname(sc->sc_dev), mlen, i);
566 #endif
567
568 res = INSQTI(data, &gvp->nc_forw0);
569 if (res == Q_EMPTY) {
570 WAITREG(NI_PCR, PCR_OWN);
571 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
572 }
573 }
574 }
575
576 void
577 niintr(void *arg)
578 {
579 struct ni_softc *sc = arg;
580 struct ni_dg *data;
581 struct ni_msg *msg;
582 struct ifnet *ifp = &sc->sc_if;
583 struct ni_bbd *bd;
584 struct mbuf *m;
585 int idx, res;
586
587 if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED)
588 return;
589
590 if ((NI_RREG(NI_PSR) & PSR_ERR))
591 printf("%s: PSR %x\n", device_xname(sc->sc_dev), NI_RREG(NI_PSR));
592
593 KERNEL_LOCK(1, NULL);
594 /* Got any response packets? */
595 while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) {
596
597 switch (data->nd_opcode) {
598 case BVP_DGRAMRX: /* Receive datagram */
599 idx = data->bufs[0]._index;
600 bd = &bbd[idx];
601 m = (void *)data->nd_cmdref;
602 m->m_pkthdr.len = m->m_len =
603 data->bufs[0]._len - ETHER_CRC_LEN;
604 m->m_pkthdr.rcvif = ifp;
605 if (ni_add_rxbuf(sc, data, idx)) {
606 bd->nb_len = (m->m_ext.ext_size - 2);
607 bd->nb_pte =
608 (long)kvtopte(m->m_ext.ext_buf);
609 bd->nb_status = 2 | NIBD_VALID;
610 bd->nb_key = 1;
611 }
612 data->nd_len = RXADD;
613 data->nd_status = 0;
614 res = INSQTI(data, &fqb->nf_rforw);
615 if (res == Q_EMPTY) {
616 WAITREG(NI_PCR, PCR_OWN);
617 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
618 }
619 if (m == (void *)data->nd_cmdref)
620 break; /* Out of mbufs */
621
622 bpf_mtap(ifp, m);
623 (*ifp->if_input)(ifp, m);
624 break;
625
626 case BVP_DGRAM:
627 m = (struct mbuf *)data->nd_cmdref;
628 ifp->if_flags &= ~IFF_OACTIVE;
629 m_freem(m);
630 res = INSQTI(data, &fqb->nf_dforw);
631 if (res == Q_EMPTY) {
632 WAITREG(NI_PCR, PCR_OWN);
633 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
634 }
635 break;
636
637 case BVP_MSGRX:
638 msg = (struct ni_msg *)data;
639 switch (msg->nm_opcode2) {
640 case NI_WPARAM:
641 memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN);
642 endwait = 1;
643 break;
644
645 case NI_RCCNTR:
646 case NI_CLPTDB:
647 case NI_STPTDB:
648 break;
649
650 default:
651 printf("Unkn resp %d\n",
652 msg->nm_opcode2);
653 break;
654 }
655 res = INSQTI(data, &fqb->nf_mforw);
656 if (res == Q_EMPTY) {
657 WAITREG(NI_PCR, PCR_OWN);
658 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
659 }
660 break;
661
662 default:
663 printf("Unknown opcode %d\n", data->nd_opcode);
664 res = INSQTI(data, &fqb->nf_mforw);
665 if (res == Q_EMPTY) {
666 WAITREG(NI_PCR, PCR_OWN);
667 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
668 }
669 }
670 }
671
672 /* Try to kick on the start routine again */
673 nistart(ifp);
674
675 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN|PSR_RSQ));
676 KERNEL_UNLOCK_ONE(NULL);
677 }
678
679 /*
680 * Process an ioctl request.
681 */
682 int
683 niioctl(struct ifnet *ifp, u_long cmd, void *data)
684 {
685 struct ni_softc *sc = ifp->if_softc;
686 struct ifaddr *ifa = (struct ifaddr *)data;
687 int s = splnet(), error = 0;
688
689 switch (cmd) {
690
691 case SIOCINITIFADDR:
692 ifp->if_flags |= IFF_UP;
693 switch(ifa->ifa_addr->sa_family) {
694 #ifdef INET
695 case AF_INET:
696 niinit(sc);
697 arp_ifinit(ifp, ifa);
698 break;
699 #endif
700 }
701 break;
702
703 case SIOCSIFFLAGS:
704 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
705 break;
706 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
707 case IFF_RUNNING:
708 /*
709 * If interface is marked down and it is running,
710 * stop it.
711 */
712 ifp->if_flags &= ~IFF_RUNNING;
713 ni_setup(sc);
714 break;
715 case IFF_UP:
716 /*
717 * If interface it marked up and it is stopped, then
718 * start it.
719 */
720 niinit(sc);
721 break;
722 case IFF_UP|IFF_RUNNING:
723 /*
724 * Send a new setup packet to match any new changes.
725 * (Like IFF_PROMISC etc)
726 */
727 ni_setup(sc);
728 break;
729 default:
730 break;
731 }
732 break;
733
734 case SIOCADDMULTI:
735 case SIOCDELMULTI:
736 /*
737 * Update our multicast list.
738 */
739 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
740 /*
741 * Multicast list has changed; set the hardware filter
742 * accordingly.
743 */
744 if (ifp->if_flags & IFF_RUNNING)
745 ni_setup(sc);
746 error = 0;
747 }
748 break;
749
750 default:
751 error = ether_ioctl(ifp, cmd, data);
752 break;
753 }
754 splx(s);
755 return (error);
756 }
757
758 /*
759 * Add a receive buffer to the indicated descriptor.
760 */
761 int
762 ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx)
763 {
764 struct ni_bbd *bd = &bbd[idx];
765 struct mbuf *m;
766
767 MGETHDR(m, M_DONTWAIT, MT_DATA);
768 if (m == NULL)
769 return (ENOBUFS);
770
771 MCLGET(m, M_DONTWAIT);
772 if ((m->m_flags & M_EXT) == 0) {
773 m_freem(m);
774 return (ENOBUFS);
775 }
776
777 m->m_data += 2;
778 bd->nb_len = (m->m_ext.ext_size - 2);
779 bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf);
780 bd->nb_status = 2 | NIBD_VALID;
781 bd->nb_key = 1;
782
783 data->bufs[0]._offset = 0;
784 data->bufs[0]._len = bd->nb_len;
785 data->bufs[0]._index = idx;
786 data->nd_cmdref = (long)m;
787
788 return (0);
789 }
790
791 /*
792 * Create setup packet and put in queue for sending.
793 */
794 void
795 ni_setup(struct ni_softc *sc)
796 {
797 struct ifnet *ifp = &sc->sc_if;
798 struct ni_msg *msg;
799 struct ni_ptdb *ptdb;
800 struct ether_multi *enm;
801 struct ether_multistep step;
802 int i, res;
803
804 msg = REMQHI(&fqb->nf_mforw);
805 if ((int)msg == Q_EMPTY)
806 return; /* What to do? */
807
808 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
809 memset(ptdb, 0, sizeof(struct ni_ptdb));
810
811 msg->nm_opcode = BVP_MSG;
812 msg->nm_len = 18;
813 ptdb->np_index = 2; /* definition type index */
814 ptdb->np_fque = 2; /* Free queue */
815 if (ifp->if_flags & IFF_RUNNING) {
816 msg->nm_opcode2 = NI_STPTDB;
817 ptdb->np_type = ETHERTYPE_IP;
818 ptdb->np_flags = PTDB_UNKN|PTDB_BDC;
819 if (ifp->if_flags & IFF_PROMISC)
820 ptdb->np_flags |= PTDB_PROMISC;
821 memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */
822 ptdb->np_adrlen = 1;
823 msg->nm_len += 8;
824 ifp->if_flags &= ~IFF_ALLMULTI;
825 if ((ifp->if_flags & IFF_PROMISC) == 0) {
826 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
827 i = 1;
828 while (enm != NULL) {
829 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
830 ifp->if_flags |= IFF_ALLMULTI;
831 ptdb->np_flags |= PTDB_AMC;
832 break;
833 }
834 msg->nm_len += 8;
835 ptdb->np_adrlen++;
836 memcpy(ptdb->np_mcast[i++], enm->enm_addrlo,
837 ETHER_ADDR_LEN);
838 ETHER_NEXT_MULTI(step, enm);
839 }
840 }
841 } else
842 msg->nm_opcode2 = NI_CLPTDB;
843
844 res = INSQTI(msg, &gvp->nc_forw0);
845 if (res == Q_EMPTY) {
846 WAITREG(NI_PCR, PCR_OWN);
847 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN);
848 }
849 }
850
851 /*
852 * Check for dead transmit logic. Not uncommon.
853 */
854 void
855 nitimeout(struct ifnet *ifp)
856 {
857 #if 0
858 struct ni_softc *sc = ifp->if_softc;
859
860 if (sc->sc_inq == 0)
861 return;
862
863 printf("%s: xmit logic died, resetting...\n", device_xname(sc->sc_dev));
864 /*
865 * Do a reset of interface, to get it going again.
866 * Will it work by just restart the transmit logic?
867 */
868 niinit(sc);
869 #endif
870 }
871
872 /*
873 * Shutdown hook. Make sure the interface is stopped at reboot.
874 */
875 void
876 ni_shutdown(void *arg)
877 {
878 struct ni_softc *sc = arg;
879
880 WAITREG(NI_PCR, PCR_OWN);
881 NI_WREG(NI_PCR, PCR_OWN|PCR_SHUTDOWN);
882 WAITREG(NI_PCR, PCR_OWN);
883 WAITREG(NI_PSR, PSR_OWN);
884 }
885