if_se.c revision 1.108 1 /* $NetBSD: if_se.c,v 1.108 2020/06/29 23:04:57 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Ian W. Dall <ian.dall (at) dsto.defence.gov.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Ian W. Dall.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Driver for Cabletron EA41x scsi ethernet adaptor.
35 *
36 * Written by Ian Dall <ian.dall (at) dsto.defence.gov.au> Feb 3, 1997
37 *
38 * Acknowledgement: Thanks are due to Philip L. Budne <budd (at) cs.bu.edu>
39 * who reverse engineered the EA41x. In developing this code,
40 * Phil's userland daemon "etherd", was refered to extensively in lieu
41 * of accurate documentation for the device.
42 *
43 * This is a weird device! It doesn't conform to the scsi spec in much
44 * at all. About the only standard command supported is inquiry. Most
45 * commands are 6 bytes long, but the recv data is only 1 byte. Data
46 * must be received by periodically polling the device with the recv
47 * command.
48 *
49 * This driver is also a bit unusual. It must look like a network
50 * interface and it must also appear to be a scsi device to the scsi
51 * system. Hence there are cases where there are two entry points. eg
52 * sedone is to be called from the scsi subsytem and se_ifstart from
53 * the network interface subsystem. In addition, to facilitate scsi
54 * commands issued by userland programs, there are open, close and
55 * ioctl entry points. This allows a user program to, for example,
56 * display the ea41x stats and download new code into the adaptor ---
57 * functions which can't be performed through the ifconfig interface.
58 * Normal operation does not require any special userland program.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: if_se.c,v 1.108 2020/06/29 23:04:57 riastradh Exp $");
63
64 #ifdef _KERNEL_OPT
65 #include "opt_inet.h"
66 #include "opt_net_mpsafe.h"
67 #include "opt_atalk.h"
68 #endif
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/syslog.h>
74 #include <sys/kernel.h>
75 #include <sys/file.h>
76 #include <sys/stat.h>
77 #include <sys/ioctl.h>
78 #include <sys/buf.h>
79 #include <sys/uio.h>
80 #include <sys/malloc.h>
81 #include <sys/errno.h>
82 #include <sys/device.h>
83 #include <sys/disklabel.h>
84 #include <sys/disk.h>
85 #include <sys/proc.h>
86 #include <sys/conf.h>
87 #include <sys/mutex.h>
88 #include <sys/pcq.h>
89 #include <sys/workqueue.h>
90
91 #include <dev/scsipi/scsipi_all.h>
92 #include <dev/scsipi/scsi_ctron_ether.h>
93 #include <dev/scsipi/scsiconf.h>
94
95 #include <sys/mbuf.h>
96
97 #include <sys/socket.h>
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_ether.h>
101 #include <net/if_media.h>
102 #include <net/bpf.h>
103
104 #ifdef INET
105 #include <netinet/in.h>
106 #include <netinet/if_inarp.h>
107 #endif
108
109
110 #ifdef NETATALK
111 #include <netatalk/at.h>
112 #endif
113
114
115 #define SETIMEOUT 1000
116 #define SEOUTSTANDING 4
117 #define SERETRIES 4
118 #define SE_PREFIX 4
119 #define ETHER_CRC 4
120 #define SEMINSIZE 60
121
122 /* Make this big enough for an ETHERMTU packet in promiscuous mode. */
123 #define MAX_SNAP (ETHERMTU + sizeof(struct ether_header) + \
124 SE_PREFIX + ETHER_CRC)
125
126 /* 10 full length packets appears to be the max ever returned. 16k is OK */
127 #define RBUF_LEN (16 * 1024)
128
129 /* Tuning parameters:
130 * The EA41x only returns a maximum of 10 packets (regardless of size).
131 * We will attempt to adapt to polling fast enough to get RDATA_GOAL packets
132 * per read
133 */
134 #define RDATA_MAX 10
135 #define RDATA_GOAL 8
136
137 /* se_poll and se_poll0 are the normal polling rate and the minimum
138 * polling rate respectively. se_poll0 should be chosen so that at
139 * maximum ethernet speed, we will read nearly RDATA_MAX packets. se_poll
140 * should be chosen for reasonable maximum latency.
141 * In practice, if we are being saturated with min length packets, we
142 * can't poll fast enough. Polling with zero delay actually
143 * worsens performance. se_poll0 is enforced to be always at least 1
144 */
145 #define SE_POLL 40 /* default in milliseconds */
146 #define SE_POLL0 10 /* default in milliseconds */
147 int se_poll = 0; /* Delay in ticks set at attach time */
148 int se_poll0 = 0;
149 #ifdef SE_DEBUG
150 int se_max_received = 0; /* Instrumentation */
151 #endif
152
153 #define PROTOCMD(p, d) \
154 ((d) = (p))
155
156 #define PROTOCMD_DECL(name) \
157 static const struct scsi_ctron_ether_generic name
158
159 #define PROTOCMD_DECL_SPECIAL(name) \
160 static const struct __CONCAT(scsi_, name) name
161
162 /* Command initializers for commands using scsi_ctron_ether_generic */
163 PROTOCMD_DECL(ctron_ether_send) = {CTRON_ETHER_SEND, 0, {0,0}, 0};
164 PROTOCMD_DECL(ctron_ether_add_proto) = {CTRON_ETHER_ADD_PROTO, 0, {0,0}, 0};
165 PROTOCMD_DECL(ctron_ether_get_addr) = {CTRON_ETHER_GET_ADDR, 0, {0,0}, 0};
166 PROTOCMD_DECL(ctron_ether_set_media) = {CTRON_ETHER_SET_MEDIA, 0, {0,0}, 0};
167 PROTOCMD_DECL(ctron_ether_set_addr) = {CTRON_ETHER_SET_ADDR, 0, {0,0}, 0};
168 PROTOCMD_DECL(ctron_ether_set_multi) = {CTRON_ETHER_SET_MULTI, 0, {0,0}, 0};
169 PROTOCMD_DECL(ctron_ether_remove_multi) =
170 {CTRON_ETHER_REMOVE_MULTI, 0, {0,0}, 0};
171
172 /* Command initializers for commands using their own structures */
173 PROTOCMD_DECL_SPECIAL(ctron_ether_recv) = {CTRON_ETHER_RECV};
174 PROTOCMD_DECL_SPECIAL(ctron_ether_set_mode) =
175 {CTRON_ETHER_SET_MODE, 0, {0,0}, 0};
176
177 struct se_softc {
178 device_t sc_dev;
179 struct ethercom sc_ethercom; /* Ethernet common part */
180 struct scsipi_periph *sc_periph;/* contains our targ, lun, etc. */
181
182 struct callout sc_recv_ch;
183 struct kmutex sc_iflock;
184 struct if_percpuq *sc_ipq;
185 struct workqueue *sc_recv_wq, *sc_send_wq;
186 struct work sc_recv_work, sc_send_work;
187 int sc_recv_work_pending, sc_send_work_pending;
188
189 char *sc_tbuf;
190 char *sc_rbuf;
191 int protos;
192 #define PROTO_IP 0x01
193 #define PROTO_ARP 0x02
194 #define PROTO_REVARP 0x04
195 #define PROTO_AT 0x08
196 #define PROTO_AARP 0x10
197 int sc_debug;
198 int sc_flags;
199 int sc_last_timeout;
200 int sc_enabled;
201 int sc_attach_state;
202 };
203
204 static int sematch(device_t, cfdata_t, void *);
205 static void seattach(device_t, device_t, void *);
206 static int sedetach(device_t, int);
207
208 static void se_ifstart(struct ifnet *);
209
210 static void sedone(struct scsipi_xfer *, int);
211 static int se_ioctl(struct ifnet *, u_long, void *);
212 static void sewatchdog(struct ifnet *);
213
214 #if 0
215 static inline uint16_t ether_cmp(void *, void *);
216 #endif
217 static void se_recv_callout(void *);
218 static void se_recv_worker(struct work *wk, void *cookie);
219 static void se_recv(struct se_softc *);
220 static struct mbuf *se_get(struct se_softc *, char *, int);
221 static int se_read(struct se_softc *, char *, int);
222 static void se_reset(struct se_softc *);
223 static int se_add_proto(struct se_softc *, int);
224 static int se_get_addr(struct se_softc *, uint8_t *);
225 static int se_set_media(struct se_softc *, int);
226 static int se_init(struct se_softc *);
227 static int se_set_multi(struct se_softc *, uint8_t *);
228 static int se_remove_multi(struct se_softc *, uint8_t *);
229 #if 0
230 static int sc_set_all_multi(struct se_softc *, int);
231 #endif
232 static void se_stop(struct se_softc *);
233 static inline int se_scsipi_cmd(struct scsipi_periph *periph,
234 struct scsipi_generic *scsipi_cmd,
235 int cmdlen, u_char *data_addr, int datalen,
236 int retries, int timeout, struct buf *bp,
237 int flags);
238 static void se_send_worker(struct work *wk, void *cookie);
239 static int se_set_mode(struct se_softc *, int, int);
240
241 int se_enable(struct se_softc *);
242 void se_disable(struct se_softc *);
243
244 CFATTACH_DECL_NEW(se, sizeof(struct se_softc),
245 sematch, seattach, sedetach, NULL);
246
247 extern struct cfdriver se_cd;
248
249 dev_type_open(seopen);
250 dev_type_close(seclose);
251 dev_type_ioctl(seioctl);
252
253 const struct cdevsw se_cdevsw = {
254 .d_open = seopen,
255 .d_close = seclose,
256 .d_read = noread,
257 .d_write = nowrite,
258 .d_ioctl = seioctl,
259 .d_stop = nostop,
260 .d_tty = notty,
261 .d_poll = nopoll,
262 .d_mmap = nommap,
263 .d_kqfilter = nokqfilter,
264 .d_discard = nodiscard,
265 .d_flag = D_OTHER | D_MPSAFE
266 };
267
268 const struct scsipi_periphsw se_switch = {
269 NULL, /* Use default error handler */
270 NULL, /* have no queue */
271 NULL, /* have no async handler */
272 sedone, /* deal with send/recv completion */
273 };
274
275 const struct scsipi_inquiry_pattern se_patterns[] = {
276 {T_PROCESSOR, T_FIXED,
277 "CABLETRN", "EA412", ""},
278 {T_PROCESSOR, T_FIXED,
279 "Cabletrn", "EA412", ""},
280 };
281
282 #if 0
283 /*
284 * Compare two Ether/802 addresses for equality, inlined and
285 * unrolled for speed.
286 * Note: use this like memcmp()
287 */
288 static inline uint16_t
289 ether_cmp(void *one, void *two)
290 {
291 uint16_t *a = (uint16_t *) one;
292 uint16_t *b = (uint16_t *) two;
293 uint16_t diff;
294
295 diff = (a[0] - b[0]) | (a[1] - b[1]) | (a[2] - b[2]);
296
297 return (diff);
298 }
299
300 #define ETHER_CMP ether_cmp
301 #endif
302
303 static int
304 sematch(device_t parent, cfdata_t match, void *aux)
305 {
306 struct scsipibus_attach_args *sa = aux;
307 int priority;
308
309 (void)scsipi_inqmatch(&sa->sa_inqbuf,
310 se_patterns, sizeof(se_patterns) / sizeof(se_patterns[0]),
311 sizeof(se_patterns[0]), &priority);
312 return (priority);
313 }
314
315 /*
316 * The routine called by the low level scsi routine when it discovers
317 * a device suitable for this driver.
318 */
319 static void
320 seattach(device_t parent, device_t self, void *aux)
321 {
322 struct se_softc *sc = device_private(self);
323 struct scsipibus_attach_args *sa = aux;
324 struct scsipi_periph *periph = sa->sa_periph;
325 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
326 uint8_t myaddr[ETHER_ADDR_LEN];
327 char wqname[MAXCOMLEN];
328 int rv;
329
330 sc->sc_dev = self;
331
332 printf("\n");
333 SC_DEBUG(periph, SCSIPI_DB2, ("seattach: "));
334
335 sc->sc_attach_state = 0;
336 callout_init(&sc->sc_recv_ch, CALLOUT_MPSAFE);
337 mutex_init(&sc->sc_iflock, MUTEX_DEFAULT, IPL_SOFTNET);
338
339 /*
340 * Store information needed to contact our base driver
341 */
342 sc->sc_periph = periph;
343 periph->periph_dev = sc->sc_dev;
344 periph->periph_switch = &se_switch;
345
346 se_poll = (SE_POLL * hz) / 1000;
347 se_poll = se_poll? se_poll: 1;
348 se_poll0 = (SE_POLL0 * hz) / 1000;
349 se_poll0 = se_poll0? se_poll0: 1;
350
351 /*
352 * Initialize and attach send and receive buffers
353 */
354 sc->sc_tbuf = malloc(ETHERMTU + sizeof(struct ether_header),
355 M_DEVBUF, M_WAITOK);
356 sc->sc_rbuf = malloc(RBUF_LEN, M_DEVBUF, M_WAITOK);
357
358 /* Initialize ifnet structure. */
359 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
360 ifp->if_softc = sc;
361 ifp->if_start = se_ifstart;
362 ifp->if_ioctl = se_ioctl;
363 ifp->if_watchdog = sewatchdog;
364 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
365 ifp->if_extflags = IFEF_MPSAFE;
366 IFQ_SET_READY(&ifp->if_snd);
367
368 se_get_addr(sc, myaddr);
369 sc->sc_attach_state = 1;
370
371 /* Attach the interface. */
372 rv = if_initialize(ifp);
373 if (rv != 0) {
374 sedetach(sc->sc_dev, 0);
375 return; /* Error */
376 }
377
378 snprintf(wqname, sizeof(wqname), "%sRx", device_xname(sc->sc_dev));
379 rv = workqueue_create(&sc->sc_recv_wq, wqname, se_recv_worker, sc,
380 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
381 if (rv != 0) {
382 aprint_error_dev(sc->sc_dev,
383 "unable to create recv Rx workqueue\n");
384 sedetach(sc->sc_dev, 0);
385 return; /* Error */
386 }
387 sc->sc_recv_work_pending = false;
388 sc->sc_attach_state = 2;
389
390 snprintf(wqname, sizeof(wqname), "%sTx", device_xname(sc->sc_dev));
391 rv = workqueue_create(&sc->sc_send_wq, wqname, se_send_worker, ifp,
392 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
393 if (rv != 0) {
394 aprint_error_dev(sc->sc_dev,
395 "unable to create send Tx workqueue\n");
396 sedetach(sc->sc_dev, 0);
397 return; /* Error */
398 }
399 sc->sc_send_work_pending = false;
400 sc->sc_attach_state = 3;
401
402 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
403 ether_ifattach(ifp, myaddr);
404 if_register(ifp);
405 sc->sc_attach_state = 4;
406 }
407
408 static int
409 sedetach(device_t self, int flags)
410 {
411 struct se_softc *sc = device_private(self);
412 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
413
414 switch(sc->sc_attach_state) {
415 case 4:
416 se_stop(sc);
417 mutex_enter(&sc->sc_iflock);
418 ifp->if_flags &= ~IFF_RUNNING;
419 se_disable(sc);
420 callout_halt(&sc->sc_recv_ch, NULL);
421 ether_ifdetach(ifp);
422 if_detach(ifp);
423 mutex_exit(&sc->sc_iflock);
424 if_percpuq_destroy(sc->sc_ipq);
425 /*FALLTHROUGH*/
426 case 3:
427 workqueue_destroy(sc->sc_send_wq);
428 /*FALLTHROUGH*/
429 case 2:
430 workqueue_destroy(sc->sc_recv_wq);
431 /*FALLTHROUGH*/
432 case 1:
433 free(sc->sc_rbuf, M_DEVBUF);
434 free(sc->sc_tbuf, M_DEVBUF);
435 callout_destroy(&sc->sc_recv_ch);
436 mutex_destroy(&sc->sc_iflock);
437 break;
438 default:
439 aprint_error_dev(sc->sc_dev, "detach failed (state %d)\n",
440 sc->sc_attach_state);
441 return 1;
442 break;
443 }
444 return 0;
445 }
446
447 /*
448 * Send a command to the device
449 */
450 static inline int
451 se_scsipi_cmd(struct scsipi_periph *periph, struct scsipi_generic *cmd,
452 int cmdlen, u_char *data_addr, int datalen, int retries, int timeout,
453 struct buf *bp, int flags)
454 {
455 int error;
456
457 error = scsipi_command(periph, cmd, cmdlen, data_addr,
458 datalen, retries, timeout, bp, flags);
459 return (error);
460 }
461
462 /*
463 * Start routine for calling from network sub system
464 */
465 static void
466 se_ifstart(struct ifnet *ifp)
467 {
468 struct se_softc *sc = ifp->if_softc;
469 int i = 100;
470
471 mutex_enter(&sc->sc_iflock);
472 while (i && sc->sc_send_work_pending == true) {
473 i--;
474 delay(10);
475 }
476 if (i) {
477 sc->sc_send_work_pending = true;
478 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
479 } else
480 if_statinc(ifp, if_oerrors);
481 mutex_exit(&sc->sc_iflock);
482 }
483
484 /*
485 * Invoke the transmit workqueue and transmission on the interface.
486 */
487 static void
488 se_send_worker(struct work *wk, void *cookie)
489 {
490 struct ifnet *ifp = cookie;
491 struct se_softc *sc = ifp->if_softc;
492 struct scsi_ctron_ether_generic send_cmd;
493 struct mbuf *m, *m0;
494 int len, error;
495 u_char *cp;
496
497 mutex_enter(&sc->sc_iflock);
498 sc->sc_send_work_pending = false;
499 mutex_exit(&sc->sc_iflock);
500
501 KASSERT(if_is_mpsafe(ifp));
502
503 /* Don't transmit if interface is busy or not running */
504 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
505 return;
506
507 while (1) {
508 IFQ_DEQUEUE(&ifp->if_snd, m0);
509 if (m0 == 0)
510 break;
511
512 /* If BPF is listening on this interface, let it see the
513 * packet before we commit it to the wire.
514 */
515 bpf_mtap(ifp, m0, BPF_D_OUT);
516
517 /* We need to use m->m_pkthdr.len, so require the header */
518 if ((m0->m_flags & M_PKTHDR) == 0)
519 panic("ctscstart: no header mbuf");
520 len = m0->m_pkthdr.len;
521
522 /* Mark the interface busy. */
523 ifp->if_flags |= IFF_OACTIVE;
524
525 /* Chain; copy into linear buffer allocated at attach time. */
526 cp = sc->sc_tbuf;
527 for (m = m0; m != NULL; ) {
528 memcpy(cp, mtod(m, u_char *), m->m_len);
529 cp += m->m_len;
530 m = m0 = m_free(m);
531 }
532 if (len < SEMINSIZE) {
533 #ifdef SEDEBUG
534 if (sc->sc_debug)
535 printf("se: packet size %d (%zu) < %d\n", len,
536 cp - (u_char *)sc->sc_tbuf, SEMINSIZE);
537 #endif
538 memset(cp, 0, SEMINSIZE - len);
539 len = SEMINSIZE;
540 }
541
542 /* Fill out SCSI command. */
543 PROTOCMD(ctron_ether_send, send_cmd);
544 _lto2b(len, send_cmd.length);
545
546 /* Send command to device. */
547 error = se_scsipi_cmd(sc->sc_periph,
548 (void *)&send_cmd, sizeof(send_cmd),
549 sc->sc_tbuf, len, SERETRIES,
550 SETIMEOUT, NULL, XS_CTL_NOSLEEP | XS_CTL_DATA_OUT);
551 if (error) {
552 aprint_error_dev(sc->sc_dev,
553 "not queued, error %d\n", error);
554 if_statinc(ifp, if_oerrors);
555 ifp->if_flags &= ~IFF_OACTIVE;
556 } else
557 if_statinc(ifp, if_opackets);
558 }
559 }
560
561
562 /*
563 * Called from the scsibus layer via our scsi device switch.
564 */
565 static void
566 sedone(struct scsipi_xfer *xs, int error)
567 {
568 struct se_softc *sc = device_private(xs->xs_periph->periph_dev);
569 struct scsipi_generic *cmd = xs->cmd;
570 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
571
572 if (IS_SEND(cmd)) {
573 ifp->if_flags &= ~IFF_OACTIVE;
574 } else if (IS_RECV(cmd)) {
575 /* RECV complete */
576 /* pass data up. reschedule a recv */
577 /* scsipi_free_xs will call start. Harmless. */
578 if (error) {
579 /* Reschedule after a delay */
580 callout_reset(&sc->sc_recv_ch, se_poll,
581 se_recv_callout, (void *)sc);
582 } else {
583 int n, ntimeo;
584 n = se_read(sc, xs->data, xs->datalen - xs->resid);
585 #ifdef SE_DEBUG
586 if (n > se_max_received)
587 se_max_received = n;
588 #endif
589 if (n == 0)
590 ntimeo = se_poll;
591 else if (n >= RDATA_MAX)
592 ntimeo = se_poll0;
593 else {
594 ntimeo = sc->sc_last_timeout;
595 ntimeo = (ntimeo * RDATA_GOAL)/n;
596 ntimeo = (ntimeo < se_poll0?
597 se_poll0: ntimeo);
598 ntimeo = (ntimeo > se_poll?
599 se_poll: ntimeo);
600 }
601 sc->sc_last_timeout = ntimeo;
602 callout_reset(&sc->sc_recv_ch, ntimeo,
603 se_recv_callout, (void *)sc);
604 }
605 }
606 }
607
608 /*
609 * Setup a receive command by queuing the work.
610 * Usually called from a callout, but also from se_init().
611 */
612 static void
613 se_recv_callout(void *v)
614 {
615 /* do a recv command */
616 struct se_softc *sc = (struct se_softc *) v;
617
618 if (sc->sc_enabled == 0)
619 return;
620
621 mutex_enter(&sc->sc_iflock);
622 if (sc->sc_recv_work_pending == true) {
623 callout_reset(&sc->sc_recv_ch, se_poll,
624 se_recv_callout, (void *)sc);
625 return;
626 }
627
628 sc->sc_recv_work_pending = true;
629 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
630 mutex_exit(&sc->sc_iflock);
631 }
632
633 /*
634 * Invoke the receive workqueue
635 */
636 static void
637 se_recv_worker(struct work *wk, void *cookie)
638 {
639 struct se_softc *sc = (struct se_softc *) cookie;
640
641 mutex_enter(&sc->sc_iflock);
642 sc->sc_recv_work_pending = false;
643 mutex_exit(&sc->sc_iflock);
644 se_recv(sc);
645
646 }
647
648 /*
649 * Do the actual work of receiving data.
650 */
651 static void
652 se_recv(struct se_softc *sc)
653 {
654 struct scsi_ctron_ether_recv recv_cmd;
655 int error;
656
657 /* do a recv command */
658 PROTOCMD(ctron_ether_recv, recv_cmd);
659
660 error = se_scsipi_cmd(sc->sc_periph,
661 (void *)&recv_cmd, sizeof(recv_cmd),
662 sc->sc_rbuf, RBUF_LEN, SERETRIES, SETIMEOUT, NULL,
663 XS_CTL_NOSLEEP | XS_CTL_DATA_IN);
664 if (error)
665 callout_reset(&sc->sc_recv_ch, se_poll,
666 se_recv_callout, (void *)sc);
667 }
668
669 /*
670 * We copy the data into mbufs. When full cluster sized units are present
671 * we copy into clusters.
672 */
673 static struct mbuf *
674 se_get(struct se_softc *sc, char *data, int totlen)
675 {
676 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
677 struct mbuf *m, *m0, *newm;
678 int len;
679
680 MGETHDR(m0, M_DONTWAIT, MT_DATA);
681 if (m0 == 0)
682 return (0);
683 m_set_rcvif(m0, ifp);
684 m0->m_pkthdr.len = totlen;
685 len = MHLEN;
686 m = m0;
687
688 while (totlen > 0) {
689 if (totlen >= MINCLSIZE) {
690 MCLGET(m, M_DONTWAIT);
691 if ((m->m_flags & M_EXT) == 0)
692 goto bad;
693 len = MCLBYTES;
694 }
695
696 if (m == m0) {
697 char *newdata = (char *)
698 ALIGN(m->m_data + sizeof(struct ether_header)) -
699 sizeof(struct ether_header);
700 len -= newdata - m->m_data;
701 m->m_data = newdata;
702 }
703
704 m->m_len = len = uimin(totlen, len);
705 memcpy(mtod(m, void *), data, len);
706 data += len;
707
708 totlen -= len;
709 if (totlen > 0) {
710 MGET(newm, M_DONTWAIT, MT_DATA);
711 if (newm == 0)
712 goto bad;
713 len = MLEN;
714 m = m->m_next = newm;
715 }
716 }
717
718 return (m0);
719
720 bad:
721 m_freem(m0);
722 return (0);
723 }
724
725 /*
726 * Pass packets to higher levels.
727 */
728 static int
729 se_read(struct se_softc *sc, char *data, int datalen)
730 {
731 struct mbuf *m;
732 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
733 int n;
734
735 n = 0;
736 while (datalen >= 2) {
737 int len = _2btol(data);
738 data += 2;
739 datalen -= 2;
740
741 if (len == 0)
742 break;
743 #ifdef SEDEBUG
744 if (sc->sc_debug) {
745 printf("se_read: datalen = %d, packetlen = %d, proto = 0x%04x\n", datalen, len,
746 ntohs(((struct ether_header *)data)->ether_type));
747 }
748 #endif
749 if (len <= sizeof(struct ether_header) ||
750 len > MAX_SNAP) {
751 #ifdef SEDEBUG
752 printf("%s: invalid packet size %d; dropping\n",
753 device_xname(sc->sc_dev), len);
754 #endif
755 if_statinc(ifp, if_ierrors);
756 goto next_packet;
757 }
758
759 /* Don't need crc. Must keep ether header for BPF */
760 m = se_get(sc, data, len - ETHER_CRC);
761 if (m == 0) {
762 #ifdef SEDEBUG
763 if (sc->sc_debug)
764 printf("se_read: se_get returned null\n");
765 #endif
766 if_statinc(ifp, if_ierrors);
767 goto next_packet;
768 }
769 if ((ifp->if_flags & IFF_PROMISC) != 0) {
770 m_adj(m, SE_PREFIX);
771 }
772
773 /* Pass the packet up. */
774 if_percpuq_enqueue(sc->sc_ipq, m);
775
776 next_packet:
777 data += len;
778 datalen -= len;
779 n++;
780 }
781 return (n);
782 }
783
784
785 static void
786 sewatchdog(struct ifnet *ifp)
787 {
788 struct se_softc *sc = ifp->if_softc;
789
790 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
791 if_statinc(ifp, if_oerrors);
792
793 se_reset(sc);
794 }
795
796 static void
797 se_reset(struct se_softc *sc)
798 {
799 #if 0
800 /* Maybe we don't *really* want to reset the entire bus
801 * because the ctron isn't working. We would like to send a
802 * "BUS DEVICE RESET" message, but don't think the ctron
803 * understands it.
804 */
805 se_scsipi_cmd(sc->sc_periph, 0, 0, 0, 0, SERETRIES, 2000, NULL,
806 XS_CTL_RESET);
807 #endif
808 se_init(sc);
809 }
810
811 static int
812 se_add_proto(struct se_softc *sc, int proto)
813 {
814 int error;
815 struct scsi_ctron_ether_generic add_proto_cmd;
816 uint8_t data[2];
817 _lto2b(proto, data);
818 #ifdef SEDEBUG
819 if (sc->sc_debug)
820 printf("se: adding proto 0x%02x%02x\n", data[0], data[1]);
821 #endif
822
823 PROTOCMD(ctron_ether_add_proto, add_proto_cmd);
824 _lto2b(sizeof(data), add_proto_cmd.length);
825 error = se_scsipi_cmd(sc->sc_periph,
826 (void *)&add_proto_cmd, sizeof(add_proto_cmd),
827 data, sizeof(data), SERETRIES, SETIMEOUT, NULL,
828 XS_CTL_DATA_OUT);
829 return (error);
830 }
831
832 static int
833 se_get_addr(struct se_softc *sc, uint8_t *myaddr)
834 {
835 int error;
836 struct scsi_ctron_ether_generic get_addr_cmd;
837
838 PROTOCMD(ctron_ether_get_addr, get_addr_cmd);
839 _lto2b(ETHER_ADDR_LEN, get_addr_cmd.length);
840 error = se_scsipi_cmd(sc->sc_periph,
841 (void *)&get_addr_cmd, sizeof(get_addr_cmd),
842 myaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
843 XS_CTL_DATA_IN);
844 printf("%s: ethernet address %s\n", device_xname(sc->sc_dev),
845 ether_sprintf(myaddr));
846 return (error);
847 }
848
849
850 static int
851 se_set_media(struct se_softc *sc, int type)
852 {
853 int error;
854 struct scsi_ctron_ether_generic set_media_cmd;
855
856 PROTOCMD(ctron_ether_set_media, set_media_cmd);
857 set_media_cmd.byte3 = type;
858 error = se_scsipi_cmd(sc->sc_periph,
859 (void *)&set_media_cmd, sizeof(set_media_cmd),
860 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
861 return (error);
862 }
863
864 static int
865 se_set_mode(struct se_softc *sc, int len, int mode)
866 {
867 int error;
868 struct scsi_ctron_ether_set_mode set_mode_cmd;
869
870 PROTOCMD(ctron_ether_set_mode, set_mode_cmd);
871 set_mode_cmd.mode = mode;
872 _lto2b(len, set_mode_cmd.length);
873 error = se_scsipi_cmd(sc->sc_periph,
874 (void *)&set_mode_cmd, sizeof(set_mode_cmd),
875 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
876 return (error);
877 }
878
879
880 static int
881 se_init(struct se_softc *sc)
882 {
883 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
884 struct scsi_ctron_ether_generic set_addr_cmd;
885 uint8_t enaddr[ETHER_ADDR_LEN];
886 int error;
887
888 if (ifp->if_flags & IFF_PROMISC) {
889 error = se_set_mode(sc, MAX_SNAP, 1);
890 }
891 else
892 error = se_set_mode(sc, ETHERMTU + sizeof(struct ether_header),
893 0);
894 if (error != 0)
895 return (error);
896
897 PROTOCMD(ctron_ether_set_addr, set_addr_cmd);
898 _lto2b(ETHER_ADDR_LEN, set_addr_cmd.length);
899 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
900 error = se_scsipi_cmd(sc->sc_periph,
901 (void *)&set_addr_cmd, sizeof(set_addr_cmd),
902 enaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
903 XS_CTL_DATA_OUT);
904 if (error != 0)
905 return (error);
906
907 if ((sc->protos & PROTO_IP) &&
908 (error = se_add_proto(sc, ETHERTYPE_IP)) != 0)
909 return (error);
910 if ((sc->protos & PROTO_ARP) &&
911 (error = se_add_proto(sc, ETHERTYPE_ARP)) != 0)
912 return (error);
913 if ((sc->protos & PROTO_REVARP) &&
914 (error = se_add_proto(sc, ETHERTYPE_REVARP)) != 0)
915 return (error);
916 #ifdef NETATALK
917 if ((sc->protos & PROTO_AT) &&
918 (error = se_add_proto(sc, ETHERTYPE_ATALK)) != 0)
919 return (error);
920 if ((sc->protos & PROTO_AARP) &&
921 (error = se_add_proto(sc, ETHERTYPE_AARP)) != 0)
922 return (error);
923 #endif
924
925 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == IFF_UP) {
926 ifp->if_flags |= IFF_RUNNING;
927 mutex_enter(&sc->sc_iflock);
928 sc->sc_recv_work_pending = true;
929 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
930 mutex_exit(&sc->sc_iflock);
931 ifp->if_flags &= ~IFF_OACTIVE;
932 mutex_enter(&sc->sc_iflock);
933 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
934 mutex_exit(&sc->sc_iflock);
935 }
936 return (error);
937 }
938
939 static int
940 se_set_multi(struct se_softc *sc, uint8_t *addr)
941 {
942 struct scsi_ctron_ether_generic set_multi_cmd;
943 int error;
944
945 if (sc->sc_debug)
946 printf("%s: set_set_multi: %s\n", device_xname(sc->sc_dev),
947 ether_sprintf(addr));
948
949 PROTOCMD(ctron_ether_set_multi, set_multi_cmd);
950 _lto2b(ETHER_ADDR_LEN, set_multi_cmd.length);
951 error = se_scsipi_cmd(sc->sc_periph,
952 (void *)&set_multi_cmd, sizeof(set_multi_cmd),
953 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
954 return (error);
955 }
956
957 static int
958 se_remove_multi(struct se_softc *sc, uint8_t *addr)
959 {
960 struct scsi_ctron_ether_generic remove_multi_cmd;
961 int error;
962
963 if (sc->sc_debug)
964 printf("%s: se_remove_multi: %s\n", device_xname(sc->sc_dev),
965 ether_sprintf(addr));
966
967 PROTOCMD(ctron_ether_remove_multi, remove_multi_cmd);
968 _lto2b(ETHER_ADDR_LEN, remove_multi_cmd.length);
969 error = se_scsipi_cmd(sc->sc_periph,
970 (void *)&remove_multi_cmd, sizeof(remove_multi_cmd),
971 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
972 return (error);
973 }
974
975 #if 0 /* not used --thorpej */
976 static int
977 sc_set_all_multi(struct se_softc *sc, int set)
978 {
979 int error = 0;
980 uint8_t *addr;
981 struct ethercom *ec = &sc->sc_ethercom;
982 struct ether_multi *enm;
983 struct ether_multistep step;
984
985 ETHER_LOCK(ec);
986 ETHER_FIRST_MULTI(step, ec, enm);
987 while (enm != NULL) {
988 if (ETHER_CMP(enm->enm_addrlo, enm->enm_addrhi)) {
989 /*
990 * We must listen to a range of multicast addresses.
991 * For now, just accept all multicasts, rather than
992 * trying to set only those filter bits needed to match
993 * the range. (At this time, the only use of address
994 * ranges is for IP multicast routing, for which the
995 * range is big enough to require all bits set.)
996 */
997 /* We have no way of adding a range to this device.
998 * stepping through all addresses in the range is
999 * typically not possible. The only real alternative
1000 * is to go into promicuous mode and filter by hand.
1001 */
1002 ETHER_UNLOCK(ec);
1003 return (ENODEV);
1004
1005 }
1006
1007 addr = enm->enm_addrlo;
1008 if ((error = set ? se_set_multi(sc, addr) :
1009 se_remove_multi(sc, addr)) != 0)
1010 return (error);
1011 ETHER_NEXT_MULTI(step, enm);
1012 }
1013 ETHER_UNLOCK(ec);
1014
1015 return (error);
1016 }
1017 #endif /* not used */
1018
1019 static void
1020 se_stop(struct se_softc *sc)
1021 {
1022
1023 /* Don't schedule any reads */
1024 callout_stop(&sc->sc_recv_ch);
1025
1026 /* Wait for the workqueues to finish */
1027 mutex_enter(&sc->sc_iflock);
1028 workqueue_wait(sc->sc_recv_wq, &sc->sc_recv_work);
1029 workqueue_wait(sc->sc_send_wq, &sc->sc_send_work);
1030 mutex_exit(&sc->sc_iflock);
1031
1032 /* Abort any scsi cmds in progress */
1033 mutex_enter(chan_mtx(sc->sc_periph->periph_channel));
1034 scsipi_kill_pending(sc->sc_periph);
1035 mutex_exit(chan_mtx(sc->sc_periph->periph_channel));
1036 }
1037
1038
1039 /*
1040 * Process an ioctl request.
1041 */
1042 static int
1043 se_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1044 {
1045 struct se_softc *sc = ifp->if_softc;
1046 struct ifaddr *ifa = (struct ifaddr *)data;
1047 struct ifreq *ifr = (struct ifreq *)data;
1048 struct sockaddr *sa;
1049 int error = 0;
1050
1051
1052 switch (cmd) {
1053
1054 case SIOCINITIFADDR:
1055 mutex_enter(&sc->sc_iflock);
1056 if ((error = se_enable(sc)) != 0)
1057 break;
1058 ifp->if_flags |= IFF_UP;
1059 mutex_exit(&sc->sc_iflock);
1060
1061 if ((error = se_set_media(sc, CMEDIA_AUTOSENSE)) != 0)
1062 break;
1063
1064 switch (ifa->ifa_addr->sa_family) {
1065 #ifdef INET
1066 case AF_INET:
1067 sc->protos |= (PROTO_IP | PROTO_ARP | PROTO_REVARP);
1068 if ((error = se_init(sc)) != 0)
1069 break;
1070 arp_ifinit(ifp, ifa);
1071 break;
1072 #endif
1073 #ifdef NETATALK
1074 case AF_APPLETALK:
1075 sc->protos |= (PROTO_AT | PROTO_AARP);
1076 if ((error = se_init(sc)) != 0)
1077 break;
1078 break;
1079 #endif
1080 default:
1081 error = se_init(sc);
1082 break;
1083 }
1084 break;
1085
1086
1087 case SIOCSIFFLAGS:
1088 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1089 break;
1090 /* XXX re-use ether_ioctl() */
1091 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1092 case IFF_RUNNING:
1093 /*
1094 * If interface is marked down and it is running, then
1095 * stop it.
1096 */
1097 se_stop(sc);
1098 mutex_enter(&sc->sc_iflock);
1099 ifp->if_flags &= ~IFF_RUNNING;
1100 se_disable(sc);
1101 mutex_exit(&sc->sc_iflock);
1102 break;
1103 case IFF_UP:
1104 /*
1105 * If interface is marked up and it is stopped, then
1106 * start it.
1107 */
1108 mutex_enter(&sc->sc_iflock);
1109 error = se_enable(sc);
1110 mutex_exit(&sc->sc_iflock);
1111 if (error)
1112 break;
1113 error = se_init(sc);
1114 break;
1115 default:
1116 /*
1117 * Reset the interface to pick up changes in any other
1118 * flags that affect hardware registers.
1119 */
1120 if (sc->sc_enabled)
1121 error = se_init(sc);
1122 break;
1123 }
1124 #ifdef SEDEBUG
1125 if (ifp->if_flags & IFF_DEBUG)
1126 sc->sc_debug = 1;
1127 else
1128 sc->sc_debug = 0;
1129 #endif
1130 break;
1131
1132 case SIOCADDMULTI:
1133 case SIOCDELMULTI:
1134 mutex_enter(&sc->sc_iflock);
1135 sa = sockaddr_dup(ifreq_getaddr(cmd, ifr), M_WAITOK);
1136 mutex_exit(&sc->sc_iflock);
1137 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1138 if (ifp->if_flags & IFF_RUNNING) {
1139 error = (cmd == SIOCADDMULTI) ?
1140 se_set_multi(sc, sa->sa_data) :
1141 se_remove_multi(sc, sa->sa_data);
1142 } else
1143 error = 0;
1144 }
1145 mutex_enter(&sc->sc_iflock);
1146 sockaddr_free(sa);
1147 mutex_exit(&sc->sc_iflock);
1148 break;
1149
1150 default:
1151
1152 error = ether_ioctl(ifp, cmd, data);
1153 break;
1154 }
1155
1156 return (error);
1157 }
1158
1159 /*
1160 * Enable the network interface.
1161 */
1162 int
1163 se_enable(struct se_softc *sc)
1164 {
1165 struct scsipi_periph *periph = sc->sc_periph;
1166 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1167 int error = 0;
1168
1169 if (sc->sc_enabled == 0) {
1170 if ((error = scsipi_adapter_addref(adapt)) == 0)
1171 sc->sc_enabled = 1;
1172 else
1173 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1174 }
1175 return (error);
1176 }
1177
1178 /*
1179 * Disable the network interface.
1180 */
1181 void
1182 se_disable(struct se_softc *sc)
1183 {
1184 struct scsipi_periph *periph = sc->sc_periph;
1185 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1186
1187 if (sc->sc_enabled != 0) {
1188 scsipi_adapter_delref(adapt);
1189 sc->sc_enabled = 0;
1190 }
1191 }
1192
1193 #define SEUNIT(z) (minor(z))
1194 /*
1195 * open the device.
1196 */
1197 int
1198 seopen(dev_t dev, int flag, int fmt, struct lwp *l)
1199 {
1200 int unit, error;
1201 struct se_softc *sc;
1202 struct scsipi_periph *periph;
1203 struct scsipi_adapter *adapt;
1204
1205 unit = SEUNIT(dev);
1206 sc = device_lookup_private(&se_cd, unit);
1207 if (sc == NULL)
1208 return (ENXIO);
1209
1210 periph = sc->sc_periph;
1211 adapt = periph->periph_channel->chan_adapter;
1212
1213 if ((error = scsipi_adapter_addref(adapt)) != 0)
1214 return (error);
1215
1216 SC_DEBUG(periph, SCSIPI_DB1,
1217 ("scopen: dev=0x%"PRIx64" (unit %d (of %d))\n", dev, unit,
1218 se_cd.cd_ndevs));
1219
1220 periph->periph_flags |= PERIPH_OPEN;
1221
1222 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
1223 return (0);
1224 }
1225
1226 /*
1227 * close the device.. only called if we are the LAST
1228 * occurence of an open device
1229 */
1230 int
1231 seclose(dev_t dev, int flag, int fmt, struct lwp *l)
1232 {
1233 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1234 struct scsipi_periph *periph = sc->sc_periph;
1235 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1236
1237 SC_DEBUG(sc->sc_periph, SCSIPI_DB1, ("closing\n"));
1238
1239 scsipi_wait_drain(periph);
1240
1241 scsipi_adapter_delref(adapt);
1242 periph->periph_flags &= ~PERIPH_OPEN;
1243
1244 return (0);
1245 }
1246
1247 /*
1248 * Perform special action on behalf of the user
1249 * Only does generic scsi ioctls.
1250 */
1251 int
1252 seioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1253 {
1254 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1255
1256 return (scsipi_do_ioctl(sc->sc_periph, dev, cmd, addr, flag, l));
1257 }
1258