if_se.c revision 1.112 1 /* $NetBSD: if_se.c,v 1.112 2020/09/29 02:58:52 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Ian W. Dall <ian.dall (at) dsto.defence.gov.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Ian W. Dall.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Driver for Cabletron EA41x scsi ethernet adaptor.
35 *
36 * Written by Ian Dall <ian.dall (at) dsto.defence.gov.au> Feb 3, 1997
37 *
38 * Acknowledgement: Thanks are due to Philip L. Budne <budd (at) cs.bu.edu>
39 * who reverse engineered the EA41x. In developing this code,
40 * Phil's userland daemon "etherd", was refered to extensively in lieu
41 * of accurate documentation for the device.
42 *
43 * This is a weird device! It doesn't conform to the scsi spec in much
44 * at all. About the only standard command supported is inquiry. Most
45 * commands are 6 bytes long, but the recv data is only 1 byte. Data
46 * must be received by periodically polling the device with the recv
47 * command.
48 *
49 * This driver is also a bit unusual. It must look like a network
50 * interface and it must also appear to be a scsi device to the scsi
51 * system. Hence there are cases where there are two entry points. eg
52 * sedone is to be called from the scsi subsytem and se_ifstart from
53 * the network interface subsystem. In addition, to facilitate scsi
54 * commands issued by userland programs, there are open, close and
55 * ioctl entry points. This allows a user program to, for example,
56 * display the ea41x stats and download new code into the adaptor ---
57 * functions which can't be performed through the ifconfig interface.
58 * Normal operation does not require any special userland program.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: if_se.c,v 1.112 2020/09/29 02:58:52 msaitoh Exp $");
63
64 #ifdef _KERNEL_OPT
65 #include "opt_inet.h"
66 #include "opt_net_mpsafe.h"
67 #include "opt_atalk.h"
68 #endif
69
70 #include <sys/param.h>
71 #include <sys/types.h>
72
73 #include <sys/buf.h>
74 #include <sys/callout.h>
75 #include <sys/conf.h>
76 #include <sys/device.h>
77 #include <sys/disk.h>
78 #include <sys/disklabel.h>
79 #include <sys/errno.h>
80 #include <sys/file.h>
81 #include <sys/ioctl.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/socket.h>
88 #include <sys/stat.h>
89 #include <sys/syslog.h>
90 #include <sys/systm.h>
91 #include <sys/uio.h>
92 #include <sys/workqueue.h>
93
94 #include <dev/scsipi/scsi_ctron_ether.h>
95 #include <dev/scsipi/scsiconf.h>
96 #include <dev/scsipi/scsipi_all.h>
97
98 #include <net/bpf.h>
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_ether.h>
102 #include <net/if_media.h>
103
104 #ifdef INET
105 #include <netinet/if_inarp.h>
106 #include <netinet/in.h>
107 #endif
108
109 #ifdef NETATALK
110 #include <netatalk/at.h>
111 #endif
112
113 #define SETIMEOUT 1000
114 #define SEOUTSTANDING 4
115 #define SERETRIES 4
116 #define SE_PREFIX 4
117 #define ETHER_CRC 4
118 #define SEMINSIZE 60
119
120 /* Make this big enough for an ETHERMTU packet in promiscuous mode. */
121 #define MAX_SNAP (ETHERMTU + sizeof(struct ether_header) + \
122 SE_PREFIX + ETHER_CRC)
123
124 /* 10 full length packets appears to be the max ever returned. 16k is OK */
125 #define RBUF_LEN (16 * 1024)
126
127 /* Tuning parameters:
128 * The EA41x only returns a maximum of 10 packets (regardless of size).
129 * We will attempt to adapt to polling fast enough to get RDATA_GOAL packets
130 * per read
131 */
132 #define RDATA_MAX 10
133 #define RDATA_GOAL 8
134
135 /* se_poll and se_poll0 are the normal polling rate and the minimum
136 * polling rate respectively. se_poll0 should be chosen so that at
137 * maximum ethernet speed, we will read nearly RDATA_MAX packets. se_poll
138 * should be chosen for reasonable maximum latency.
139 * In practice, if we are being saturated with min length packets, we
140 * can't poll fast enough. Polling with zero delay actually
141 * worsens performance. se_poll0 is enforced to be always at least 1
142 */
143 #define SE_POLL 40 /* default in milliseconds */
144 #define SE_POLL0 10 /* default in milliseconds */
145 int se_poll = 0; /* Delay in ticks set at attach time */
146 int se_poll0 = 0;
147 #ifdef SE_DEBUG
148 int se_max_received = 0; /* Instrumentation */
149 #endif
150
151 #define PROTOCMD(p, d) \
152 ((d) = (p))
153
154 #define PROTOCMD_DECL(name) \
155 static const struct scsi_ctron_ether_generic name
156
157 #define PROTOCMD_DECL_SPECIAL(name) \
158 static const struct __CONCAT(scsi_, name) name
159
160 /* Command initializers for commands using scsi_ctron_ether_generic */
161 PROTOCMD_DECL(ctron_ether_send) = {CTRON_ETHER_SEND, 0, {0,0}, 0};
162 PROTOCMD_DECL(ctron_ether_add_proto) = {CTRON_ETHER_ADD_PROTO, 0, {0,0}, 0};
163 PROTOCMD_DECL(ctron_ether_get_addr) = {CTRON_ETHER_GET_ADDR, 0, {0,0}, 0};
164 PROTOCMD_DECL(ctron_ether_set_media) = {CTRON_ETHER_SET_MEDIA, 0, {0,0}, 0};
165 PROTOCMD_DECL(ctron_ether_set_addr) = {CTRON_ETHER_SET_ADDR, 0, {0,0}, 0};
166 PROTOCMD_DECL(ctron_ether_set_multi) = {CTRON_ETHER_SET_MULTI, 0, {0,0}, 0};
167 PROTOCMD_DECL(ctron_ether_remove_multi) =
168 {CTRON_ETHER_REMOVE_MULTI, 0, {0,0}, 0};
169
170 /* Command initializers for commands using their own structures */
171 PROTOCMD_DECL_SPECIAL(ctron_ether_recv) = {CTRON_ETHER_RECV};
172 PROTOCMD_DECL_SPECIAL(ctron_ether_set_mode) =
173 {CTRON_ETHER_SET_MODE, 0, {0,0}, 0};
174
175 struct se_softc {
176 device_t sc_dev;
177 struct ethercom sc_ethercom; /* Ethernet common part */
178 struct scsipi_periph *sc_periph;/* contains our targ, lun, etc. */
179
180 struct callout sc_recv_ch;
181 struct kmutex sc_iflock;
182 struct if_percpuq *sc_ipq;
183 struct workqueue *sc_recv_wq, *sc_send_wq;
184 struct work sc_recv_work, sc_send_work;
185 int sc_recv_work_pending, sc_send_work_pending;
186
187 char *sc_tbuf;
188 char *sc_rbuf;
189 int protos;
190 #define PROTO_IP 0x01
191 #define PROTO_ARP 0x02
192 #define PROTO_REVARP 0x04
193 #define PROTO_AT 0x08
194 #define PROTO_AARP 0x10
195 int sc_debug;
196 int sc_flags;
197 int sc_last_timeout;
198 int sc_enabled;
199 int sc_attach_state;
200 };
201
202 static int sematch(device_t, cfdata_t, void *);
203 static void seattach(device_t, device_t, void *);
204 static int sedetach(device_t, int);
205
206 static void se_ifstart(struct ifnet *);
207
208 static void sedone(struct scsipi_xfer *, int);
209 static int se_ioctl(struct ifnet *, u_long, void *);
210 static void sewatchdog(struct ifnet *);
211
212 #if 0
213 static inline uint16_t ether_cmp(void *, void *);
214 #endif
215 static void se_recv_callout(void *);
216 static void se_recv_worker(struct work *wk, void *cookie);
217 static void se_recv(struct se_softc *);
218 static struct mbuf *se_get(struct se_softc *, char *, int);
219 static int se_read(struct se_softc *, char *, int);
220 static void se_reset(struct se_softc *);
221 static int se_add_proto(struct se_softc *, int);
222 static int se_get_addr(struct se_softc *, uint8_t *);
223 static int se_set_media(struct se_softc *, int);
224 static int se_init(struct se_softc *);
225 static int se_set_multi(struct se_softc *, uint8_t *);
226 static int se_remove_multi(struct se_softc *, uint8_t *);
227 #if 0
228 static int sc_set_all_multi(struct se_softc *, int);
229 #endif
230 static void se_stop(struct se_softc *);
231 static inline int se_scsipi_cmd(struct scsipi_periph *periph,
232 struct scsipi_generic *scsipi_cmd,
233 int cmdlen, u_char *data_addr, int datalen,
234 int retries, int timeout, struct buf *bp,
235 int flags);
236 static void se_send_worker(struct work *wk, void *cookie);
237 static int se_set_mode(struct se_softc *, int, int);
238
239 int se_enable(struct se_softc *);
240 void se_disable(struct se_softc *);
241
242 CFATTACH_DECL_NEW(se, sizeof(struct se_softc),
243 sematch, seattach, sedetach, NULL);
244
245 extern struct cfdriver se_cd;
246
247 dev_type_open(seopen);
248 dev_type_close(seclose);
249 dev_type_ioctl(seioctl);
250
251 const struct cdevsw se_cdevsw = {
252 .d_open = seopen,
253 .d_close = seclose,
254 .d_read = noread,
255 .d_write = nowrite,
256 .d_ioctl = seioctl,
257 .d_stop = nostop,
258 .d_tty = notty,
259 .d_poll = nopoll,
260 .d_mmap = nommap,
261 .d_kqfilter = nokqfilter,
262 .d_discard = nodiscard,
263 .d_flag = D_OTHER | D_MPSAFE
264 };
265
266 const struct scsipi_periphsw se_switch = {
267 NULL, /* Use default error handler */
268 NULL, /* have no queue */
269 NULL, /* have no async handler */
270 sedone, /* deal with send/recv completion */
271 };
272
273 const struct scsipi_inquiry_pattern se_patterns[] = {
274 {T_PROCESSOR, T_FIXED,
275 "CABLETRN", "EA412", ""},
276 {T_PROCESSOR, T_FIXED,
277 "Cabletrn", "EA412", ""},
278 };
279
280 #if 0
281 /*
282 * Compare two Ether/802 addresses for equality, inlined and
283 * unrolled for speed.
284 * Note: use this like memcmp()
285 */
286 static inline uint16_t
287 ether_cmp(void *one, void *two)
288 {
289 uint16_t *a = (uint16_t *) one;
290 uint16_t *b = (uint16_t *) two;
291 uint16_t diff;
292
293 diff = (a[0] - b[0]) | (a[1] - b[1]) | (a[2] - b[2]);
294
295 return (diff);
296 }
297
298 #define ETHER_CMP ether_cmp
299 #endif
300
301 static int
302 sematch(device_t parent, cfdata_t match, void *aux)
303 {
304 struct scsipibus_attach_args *sa = aux;
305 int priority;
306
307 (void)scsipi_inqmatch(&sa->sa_inqbuf,
308 se_patterns, sizeof(se_patterns) / sizeof(se_patterns[0]),
309 sizeof(se_patterns[0]), &priority);
310 return (priority);
311 }
312
313 /*
314 * The routine called by the low level scsi routine when it discovers
315 * a device suitable for this driver.
316 */
317 static void
318 seattach(device_t parent, device_t self, void *aux)
319 {
320 struct se_softc *sc = device_private(self);
321 struct scsipibus_attach_args *sa = aux;
322 struct scsipi_periph *periph = sa->sa_periph;
323 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
324 uint8_t myaddr[ETHER_ADDR_LEN];
325 char wqname[MAXCOMLEN];
326 int rv;
327
328 sc->sc_dev = self;
329
330 printf("\n");
331 SC_DEBUG(periph, SCSIPI_DB2, ("seattach: "));
332
333 sc->sc_attach_state = 0;
334 callout_init(&sc->sc_recv_ch, CALLOUT_MPSAFE);
335 callout_setfunc(&sc->sc_recv_ch, se_recv_callout, (void *)sc);
336 mutex_init(&sc->sc_iflock, MUTEX_DEFAULT, IPL_SOFTNET);
337
338 /*
339 * Store information needed to contact our base driver
340 */
341 sc->sc_periph = periph;
342 periph->periph_dev = sc->sc_dev;
343 periph->periph_switch = &se_switch;
344
345 se_poll = (SE_POLL * hz) / 1000;
346 se_poll = se_poll? se_poll: 1;
347 se_poll0 = (SE_POLL0 * hz) / 1000;
348 se_poll0 = se_poll0? se_poll0: 1;
349
350 /*
351 * Initialize and attach send and receive buffers
352 */
353 sc->sc_tbuf = malloc(ETHERMTU + sizeof(struct ether_header),
354 M_DEVBUF, M_WAITOK);
355 sc->sc_rbuf = malloc(RBUF_LEN, M_DEVBUF, M_WAITOK);
356
357 /* Initialize ifnet structure. */
358 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
359 ifp->if_softc = sc;
360 ifp->if_start = se_ifstart;
361 ifp->if_ioctl = se_ioctl;
362 ifp->if_watchdog = sewatchdog;
363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
364 ifp->if_extflags = IFEF_MPSAFE;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 se_get_addr(sc, myaddr);
368 sc->sc_attach_state = 1;
369
370 /* Attach the interface. */
371 rv = if_initialize(ifp);
372 if (rv != 0) {
373 sedetach(sc->sc_dev, 0);
374 return; /* Error */
375 }
376
377 snprintf(wqname, sizeof(wqname), "%sRx", device_xname(sc->sc_dev));
378 rv = workqueue_create(&sc->sc_recv_wq, wqname, se_recv_worker, sc,
379 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
380 if (rv != 0) {
381 aprint_error_dev(sc->sc_dev,
382 "unable to create recv Rx workqueue\n");
383 sedetach(sc->sc_dev, 0);
384 return; /* Error */
385 }
386 sc->sc_recv_work_pending = false;
387 sc->sc_attach_state = 2;
388
389 snprintf(wqname, sizeof(wqname), "%sTx", device_xname(sc->sc_dev));
390 rv = workqueue_create(&sc->sc_send_wq, wqname, se_send_worker, ifp,
391 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
392 if (rv != 0) {
393 aprint_error_dev(sc->sc_dev,
394 "unable to create send Tx workqueue\n");
395 sedetach(sc->sc_dev, 0);
396 return; /* Error */
397 }
398 sc->sc_send_work_pending = false;
399 sc->sc_attach_state = 3;
400
401 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
402 ether_ifattach(ifp, myaddr);
403 if_register(ifp);
404 sc->sc_attach_state = 4;
405 }
406
407 static int
408 sedetach(device_t self, int flags)
409 {
410 struct se_softc *sc = device_private(self);
411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
412
413 switch(sc->sc_attach_state) {
414 case 4:
415 se_stop(sc);
416 mutex_enter(&sc->sc_iflock);
417 ifp->if_flags &= ~IFF_RUNNING;
418 se_disable(sc);
419 ether_ifdetach(ifp);
420 if_detach(ifp);
421 mutex_exit(&sc->sc_iflock);
422 if_percpuq_destroy(sc->sc_ipq);
423 /*FALLTHROUGH*/
424 case 3:
425 workqueue_destroy(sc->sc_send_wq);
426 /*FALLTHROUGH*/
427 case 2:
428 workqueue_destroy(sc->sc_recv_wq);
429 /*FALLTHROUGH*/
430 case 1:
431 free(sc->sc_rbuf, M_DEVBUF);
432 free(sc->sc_tbuf, M_DEVBUF);
433 callout_destroy(&sc->sc_recv_ch);
434 mutex_destroy(&sc->sc_iflock);
435 break;
436 default:
437 aprint_error_dev(sc->sc_dev, "detach failed (state %d)\n",
438 sc->sc_attach_state);
439 return 1;
440 break;
441 }
442 return 0;
443 }
444
445 /*
446 * Send a command to the device
447 */
448 static inline int
449 se_scsipi_cmd(struct scsipi_periph *periph, struct scsipi_generic *cmd,
450 int cmdlen, u_char *data_addr, int datalen, int retries, int timeout,
451 struct buf *bp, int flags)
452 {
453 int error;
454
455 error = scsipi_command(periph, cmd, cmdlen, data_addr,
456 datalen, retries, timeout, bp, flags);
457 return (error);
458 }
459
460 /*
461 * Start routine for calling from network sub system
462 */
463 static void
464 se_ifstart(struct ifnet *ifp)
465 {
466 struct se_softc *sc = ifp->if_softc;
467
468 mutex_enter(&sc->sc_iflock);
469 if (!sc->sc_send_work_pending) {
470 sc->sc_send_work_pending = true;
471 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
472 }
473 /* else: nothing to do - work is already queued */
474 mutex_exit(&sc->sc_iflock);
475 }
476
477 /*
478 * Invoke the transmit workqueue and transmission on the interface.
479 */
480 static void
481 se_send_worker(struct work *wk, void *cookie)
482 {
483 struct ifnet *ifp = cookie;
484 struct se_softc *sc = ifp->if_softc;
485 struct scsi_ctron_ether_generic send_cmd;
486 struct mbuf *m, *m0;
487 int len, error;
488 u_char *cp;
489
490 mutex_enter(&sc->sc_iflock);
491 sc->sc_send_work_pending = false;
492 mutex_exit(&sc->sc_iflock);
493
494 KASSERT(if_is_mpsafe(ifp));
495
496 /* Don't transmit if interface is busy or not running */
497 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
498 return;
499
500 while (1) {
501 IFQ_DEQUEUE(&ifp->if_snd, m0);
502 if (m0 == 0)
503 break;
504
505 /* If BPF is listening on this interface, let it see the
506 * packet before we commit it to the wire.
507 */
508 bpf_mtap(ifp, m0, BPF_D_OUT);
509
510 /* We need to use m->m_pkthdr.len, so require the header */
511 if ((m0->m_flags & M_PKTHDR) == 0)
512 panic("ctscstart: no header mbuf");
513 len = m0->m_pkthdr.len;
514
515 /* Mark the interface busy. */
516 ifp->if_flags |= IFF_OACTIVE;
517
518 /* Chain; copy into linear buffer allocated at attach time. */
519 cp = sc->sc_tbuf;
520 for (m = m0; m != NULL; ) {
521 memcpy(cp, mtod(m, u_char *), m->m_len);
522 cp += m->m_len;
523 m = m0 = m_free(m);
524 }
525 if (len < SEMINSIZE) {
526 #ifdef SEDEBUG
527 if (sc->sc_debug)
528 printf("se: packet size %d (%zu) < %d\n", len,
529 cp - (u_char *)sc->sc_tbuf, SEMINSIZE);
530 #endif
531 memset(cp, 0, SEMINSIZE - len);
532 len = SEMINSIZE;
533 }
534
535 /* Fill out SCSI command. */
536 PROTOCMD(ctron_ether_send, send_cmd);
537 _lto2b(len, send_cmd.length);
538
539 /* Send command to device. */
540 error = se_scsipi_cmd(sc->sc_periph,
541 (void *)&send_cmd, sizeof(send_cmd),
542 sc->sc_tbuf, len, SERETRIES,
543 SETIMEOUT, NULL, XS_CTL_NOSLEEP | XS_CTL_DATA_OUT);
544 if (error) {
545 aprint_error_dev(sc->sc_dev,
546 "not queued, error %d\n", error);
547 if_statinc(ifp, if_oerrors);
548 ifp->if_flags &= ~IFF_OACTIVE;
549 } else
550 if_statinc(ifp, if_opackets);
551 }
552 }
553
554
555 /*
556 * Called from the scsibus layer via our scsi device switch.
557 */
558 static void
559 sedone(struct scsipi_xfer *xs, int error)
560 {
561 struct se_softc *sc = device_private(xs->xs_periph->periph_dev);
562 struct scsipi_generic *cmd = xs->cmd;
563 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
564
565 if (IS_SEND(cmd)) {
566 ifp->if_flags &= ~IFF_OACTIVE;
567 } else if (IS_RECV(cmd)) {
568 /* RECV complete */
569 /* pass data up. reschedule a recv */
570 /* scsipi_free_xs will call start. Harmless. */
571 if (error) {
572 /* Reschedule after a delay */
573 callout_schedule(&sc->sc_recv_ch, se_poll);
574 } else {
575 int n, ntimeo;
576 n = se_read(sc, xs->data, xs->datalen - xs->resid);
577 #ifdef SE_DEBUG
578 if (n > se_max_received)
579 se_max_received = n;
580 #endif
581 if (n == 0)
582 ntimeo = se_poll;
583 else if (n >= RDATA_MAX)
584 ntimeo = se_poll0;
585 else {
586 ntimeo = sc->sc_last_timeout;
587 ntimeo = (ntimeo * RDATA_GOAL)/n;
588 ntimeo = (ntimeo < se_poll0?
589 se_poll0: ntimeo);
590 ntimeo = (ntimeo > se_poll?
591 se_poll: ntimeo);
592 }
593 sc->sc_last_timeout = ntimeo;
594 callout_schedule(&sc->sc_recv_ch, ntimeo);
595 }
596 }
597 }
598
599 /*
600 * Setup a receive command by queuing the work.
601 * Usually called from a callout, but also from se_init().
602 */
603 static void
604 se_recv_callout(void *v)
605 {
606 /* do a recv command */
607 struct se_softc *sc = (struct se_softc *) v;
608
609 if (sc->sc_enabled == 0)
610 return;
611
612 mutex_enter(&sc->sc_iflock);
613 if (sc->sc_recv_work_pending == true) {
614 callout_schedule(&sc->sc_recv_ch, se_poll);
615 mutex_exit(&sc->sc_iflock);
616 return;
617 }
618
619 sc->sc_recv_work_pending = true;
620 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
621 mutex_exit(&sc->sc_iflock);
622 }
623
624 /*
625 * Invoke the receive workqueue
626 */
627 static void
628 se_recv_worker(struct work *wk, void *cookie)
629 {
630 struct se_softc *sc = (struct se_softc *) cookie;
631
632 mutex_enter(&sc->sc_iflock);
633 sc->sc_recv_work_pending = false;
634 mutex_exit(&sc->sc_iflock);
635 se_recv(sc);
636
637 }
638
639 /*
640 * Do the actual work of receiving data.
641 */
642 static void
643 se_recv(struct se_softc *sc)
644 {
645 struct scsi_ctron_ether_recv recv_cmd;
646 int error;
647
648 /* do a recv command */
649 PROTOCMD(ctron_ether_recv, recv_cmd);
650
651 error = se_scsipi_cmd(sc->sc_periph,
652 (void *)&recv_cmd, sizeof(recv_cmd),
653 sc->sc_rbuf, RBUF_LEN, SERETRIES, SETIMEOUT, NULL,
654 XS_CTL_NOSLEEP | XS_CTL_DATA_IN);
655 if (error)
656 callout_schedule(&sc->sc_recv_ch, se_poll);
657 }
658
659 /*
660 * We copy the data into mbufs. When full cluster sized units are present
661 * we copy into clusters.
662 */
663 static struct mbuf *
664 se_get(struct se_softc *sc, char *data, int totlen)
665 {
666 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
667 struct mbuf *m, *m0, *newm;
668 int len;
669
670 MGETHDR(m0, M_DONTWAIT, MT_DATA);
671 if (m0 == 0)
672 return (0);
673 m_set_rcvif(m0, ifp);
674 m0->m_pkthdr.len = totlen;
675 len = MHLEN;
676 m = m0;
677
678 while (totlen > 0) {
679 if (totlen >= MINCLSIZE) {
680 MCLGET(m, M_DONTWAIT);
681 if ((m->m_flags & M_EXT) == 0)
682 goto bad;
683 len = MCLBYTES;
684 }
685
686 if (m == m0) {
687 char *newdata = (char *)
688 ALIGN(m->m_data + sizeof(struct ether_header)) -
689 sizeof(struct ether_header);
690 len -= newdata - m->m_data;
691 m->m_data = newdata;
692 }
693
694 m->m_len = len = uimin(totlen, len);
695 memcpy(mtod(m, void *), data, len);
696 data += len;
697
698 totlen -= len;
699 if (totlen > 0) {
700 MGET(newm, M_DONTWAIT, MT_DATA);
701 if (newm == 0)
702 goto bad;
703 len = MLEN;
704 m = m->m_next = newm;
705 }
706 }
707
708 return (m0);
709
710 bad:
711 m_freem(m0);
712 return (0);
713 }
714
715 /*
716 * Pass packets to higher levels.
717 */
718 static int
719 se_read(struct se_softc *sc, char *data, int datalen)
720 {
721 struct mbuf *m;
722 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
723 int n;
724
725 n = 0;
726 while (datalen >= 2) {
727 int len = _2btol(data);
728 data += 2;
729 datalen -= 2;
730
731 if (len == 0)
732 break;
733 #ifdef SEDEBUG
734 if (sc->sc_debug) {
735 printf("se_read: datalen = %d, packetlen = %d, proto = 0x%04x\n", datalen, len,
736 ntohs(((struct ether_header *)data)->ether_type));
737 }
738 #endif
739 if (len <= sizeof(struct ether_header) ||
740 len > MAX_SNAP) {
741 #ifdef SEDEBUG
742 printf("%s: invalid packet size %d; dropping\n",
743 device_xname(sc->sc_dev), len);
744 #endif
745 if_statinc(ifp, if_ierrors);
746 goto next_packet;
747 }
748
749 /* Don't need crc. Must keep ether header for BPF */
750 m = se_get(sc, data, len - ETHER_CRC);
751 if (m == 0) {
752 #ifdef SEDEBUG
753 if (sc->sc_debug)
754 printf("se_read: se_get returned null\n");
755 #endif
756 if_statinc(ifp, if_ierrors);
757 goto next_packet;
758 }
759 if ((ifp->if_flags & IFF_PROMISC) != 0) {
760 m_adj(m, SE_PREFIX);
761 }
762
763 /* Pass the packet up. */
764 if_percpuq_enqueue(sc->sc_ipq, m);
765
766 next_packet:
767 data += len;
768 datalen -= len;
769 n++;
770 }
771 return (n);
772 }
773
774
775 static void
776 sewatchdog(struct ifnet *ifp)
777 {
778 struct se_softc *sc = ifp->if_softc;
779
780 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
781 if_statinc(ifp, if_oerrors);
782
783 se_reset(sc);
784 }
785
786 static void
787 se_reset(struct se_softc *sc)
788 {
789 #if 0
790 /* Maybe we don't *really* want to reset the entire bus
791 * because the ctron isn't working. We would like to send a
792 * "BUS DEVICE RESET" message, but don't think the ctron
793 * understands it.
794 */
795 se_scsipi_cmd(sc->sc_periph, 0, 0, 0, 0, SERETRIES, 2000, NULL,
796 XS_CTL_RESET);
797 #endif
798 se_init(sc);
799 }
800
801 static int
802 se_add_proto(struct se_softc *sc, int proto)
803 {
804 int error;
805 struct scsi_ctron_ether_generic add_proto_cmd;
806 uint8_t data[2];
807 _lto2b(proto, data);
808 #ifdef SEDEBUG
809 if (sc->sc_debug)
810 printf("se: adding proto 0x%02x%02x\n", data[0], data[1]);
811 #endif
812
813 PROTOCMD(ctron_ether_add_proto, add_proto_cmd);
814 _lto2b(sizeof(data), add_proto_cmd.length);
815 error = se_scsipi_cmd(sc->sc_periph,
816 (void *)&add_proto_cmd, sizeof(add_proto_cmd),
817 data, sizeof(data), SERETRIES, SETIMEOUT, NULL,
818 XS_CTL_DATA_OUT);
819 return (error);
820 }
821
822 static int
823 se_get_addr(struct se_softc *sc, uint8_t *myaddr)
824 {
825 int error;
826 struct scsi_ctron_ether_generic get_addr_cmd;
827
828 PROTOCMD(ctron_ether_get_addr, get_addr_cmd);
829 _lto2b(ETHER_ADDR_LEN, get_addr_cmd.length);
830 error = se_scsipi_cmd(sc->sc_periph,
831 (void *)&get_addr_cmd, sizeof(get_addr_cmd),
832 myaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
833 XS_CTL_DATA_IN);
834 printf("%s: ethernet address %s\n", device_xname(sc->sc_dev),
835 ether_sprintf(myaddr));
836 return (error);
837 }
838
839
840 static int
841 se_set_media(struct se_softc *sc, int type)
842 {
843 int error;
844 struct scsi_ctron_ether_generic set_media_cmd;
845
846 PROTOCMD(ctron_ether_set_media, set_media_cmd);
847 set_media_cmd.byte3 = type;
848 error = se_scsipi_cmd(sc->sc_periph,
849 (void *)&set_media_cmd, sizeof(set_media_cmd),
850 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
851 return (error);
852 }
853
854 static int
855 se_set_mode(struct se_softc *sc, int len, int mode)
856 {
857 int error;
858 struct scsi_ctron_ether_set_mode set_mode_cmd;
859
860 PROTOCMD(ctron_ether_set_mode, set_mode_cmd);
861 set_mode_cmd.mode = mode;
862 _lto2b(len, set_mode_cmd.length);
863 error = se_scsipi_cmd(sc->sc_periph,
864 (void *)&set_mode_cmd, sizeof(set_mode_cmd),
865 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
866 return (error);
867 }
868
869
870 static int
871 se_init(struct se_softc *sc)
872 {
873 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
874 struct scsi_ctron_ether_generic set_addr_cmd;
875 uint8_t enaddr[ETHER_ADDR_LEN];
876 int error;
877
878 if (ifp->if_flags & IFF_PROMISC) {
879 error = se_set_mode(sc, MAX_SNAP, 1);
880 }
881 else
882 error = se_set_mode(sc, ETHERMTU + sizeof(struct ether_header),
883 0);
884 if (error != 0)
885 return (error);
886
887 PROTOCMD(ctron_ether_set_addr, set_addr_cmd);
888 _lto2b(ETHER_ADDR_LEN, set_addr_cmd.length);
889 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
890 error = se_scsipi_cmd(sc->sc_periph,
891 (void *)&set_addr_cmd, sizeof(set_addr_cmd),
892 enaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
893 XS_CTL_DATA_OUT);
894 if (error != 0)
895 return (error);
896
897 if ((sc->protos & PROTO_IP) &&
898 (error = se_add_proto(sc, ETHERTYPE_IP)) != 0)
899 return (error);
900 if ((sc->protos & PROTO_ARP) &&
901 (error = se_add_proto(sc, ETHERTYPE_ARP)) != 0)
902 return (error);
903 if ((sc->protos & PROTO_REVARP) &&
904 (error = se_add_proto(sc, ETHERTYPE_REVARP)) != 0)
905 return (error);
906 #ifdef NETATALK
907 if ((sc->protos & PROTO_AT) &&
908 (error = se_add_proto(sc, ETHERTYPE_ATALK)) != 0)
909 return (error);
910 if ((sc->protos & PROTO_AARP) &&
911 (error = se_add_proto(sc, ETHERTYPE_AARP)) != 0)
912 return (error);
913 #endif
914
915 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == IFF_UP) {
916 ifp->if_flags |= IFF_RUNNING;
917 mutex_enter(&sc->sc_iflock);
918 if (!sc->sc_recv_work_pending) {
919 sc->sc_recv_work_pending = true;
920 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work,
921 NULL);
922 }
923 mutex_exit(&sc->sc_iflock);
924 ifp->if_flags &= ~IFF_OACTIVE;
925 mutex_enter(&sc->sc_iflock);
926 if (!sc->sc_send_work_pending) {
927 sc->sc_send_work_pending = true;
928 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work,
929 NULL);
930 }
931 mutex_exit(&sc->sc_iflock);
932 }
933 return (error);
934 }
935
936 static int
937 se_set_multi(struct se_softc *sc, uint8_t *addr)
938 {
939 struct scsi_ctron_ether_generic set_multi_cmd;
940 int error;
941
942 if (sc->sc_debug)
943 printf("%s: set_set_multi: %s\n", device_xname(sc->sc_dev),
944 ether_sprintf(addr));
945
946 PROTOCMD(ctron_ether_set_multi, set_multi_cmd);
947 _lto2b(ETHER_ADDR_LEN, set_multi_cmd.length);
948 error = se_scsipi_cmd(sc->sc_periph,
949 (void *)&set_multi_cmd, sizeof(set_multi_cmd),
950 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
951 return (error);
952 }
953
954 static int
955 se_remove_multi(struct se_softc *sc, uint8_t *addr)
956 {
957 struct scsi_ctron_ether_generic remove_multi_cmd;
958 int error;
959
960 if (sc->sc_debug)
961 printf("%s: se_remove_multi: %s\n", device_xname(sc->sc_dev),
962 ether_sprintf(addr));
963
964 PROTOCMD(ctron_ether_remove_multi, remove_multi_cmd);
965 _lto2b(ETHER_ADDR_LEN, remove_multi_cmd.length);
966 error = se_scsipi_cmd(sc->sc_periph,
967 (void *)&remove_multi_cmd, sizeof(remove_multi_cmd),
968 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
969 return (error);
970 }
971
972 #if 0 /* not used --thorpej */
973 static int
974 sc_set_all_multi(struct se_softc *sc, int set)
975 {
976 int error = 0;
977 uint8_t *addr;
978 struct ethercom *ec = &sc->sc_ethercom;
979 struct ether_multi *enm;
980 struct ether_multistep step;
981
982 ETHER_LOCK(ec);
983 ETHER_FIRST_MULTI(step, ec, enm);
984 while (enm != NULL) {
985 if (ETHER_CMP(enm->enm_addrlo, enm->enm_addrhi)) {
986 /*
987 * We must listen to a range of multicast addresses.
988 * For now, just accept all multicasts, rather than
989 * trying to set only those filter bits needed to match
990 * the range. (At this time, the only use of address
991 * ranges is for IP multicast routing, for which the
992 * range is big enough to require all bits set.)
993 */
994 /* We have no way of adding a range to this device.
995 * stepping through all addresses in the range is
996 * typically not possible. The only real alternative
997 * is to go into promicuous mode and filter by hand.
998 */
999 ETHER_UNLOCK(ec);
1000 return (ENODEV);
1001
1002 }
1003
1004 addr = enm->enm_addrlo;
1005 if ((error = set ? se_set_multi(sc, addr) :
1006 se_remove_multi(sc, addr)) != 0)
1007 return (error);
1008 ETHER_NEXT_MULTI(step, enm);
1009 }
1010 ETHER_UNLOCK(ec);
1011
1012 return (error);
1013 }
1014 #endif /* not used */
1015
1016 static void
1017 se_stop(struct se_softc *sc)
1018 {
1019
1020 /* Don't schedule any reads */
1021 callout_halt(&sc->sc_recv_ch, &sc->sc_iflock);
1022
1023 /* Wait for the workqueues to finish */
1024 mutex_enter(&sc->sc_iflock);
1025 workqueue_wait(sc->sc_recv_wq, &sc->sc_recv_work);
1026 workqueue_wait(sc->sc_send_wq, &sc->sc_send_work);
1027 mutex_exit(&sc->sc_iflock);
1028
1029 /* Abort any scsi cmds in progress */
1030 mutex_enter(chan_mtx(sc->sc_periph->periph_channel));
1031 scsipi_kill_pending(sc->sc_periph);
1032 mutex_exit(chan_mtx(sc->sc_periph->periph_channel));
1033 }
1034
1035
1036 /*
1037 * Process an ioctl request.
1038 */
1039 static int
1040 se_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1041 {
1042 struct se_softc *sc = ifp->if_softc;
1043 struct ifaddr *ifa = (struct ifaddr *)data;
1044 struct ifreq *ifr = (struct ifreq *)data;
1045 struct sockaddr *sa;
1046 int error = 0;
1047
1048
1049 switch (cmd) {
1050
1051 case SIOCINITIFADDR:
1052 mutex_enter(&sc->sc_iflock);
1053 if ((error = se_enable(sc)) != 0)
1054 break;
1055 ifp->if_flags |= IFF_UP;
1056 mutex_exit(&sc->sc_iflock);
1057
1058 if ((error = se_set_media(sc, CMEDIA_AUTOSENSE)) != 0)
1059 break;
1060
1061 switch (ifa->ifa_addr->sa_family) {
1062 #ifdef INET
1063 case AF_INET:
1064 sc->protos |= (PROTO_IP | PROTO_ARP | PROTO_REVARP);
1065 if ((error = se_init(sc)) != 0)
1066 break;
1067 arp_ifinit(ifp, ifa);
1068 break;
1069 #endif
1070 #ifdef NETATALK
1071 case AF_APPLETALK:
1072 sc->protos |= (PROTO_AT | PROTO_AARP);
1073 if ((error = se_init(sc)) != 0)
1074 break;
1075 break;
1076 #endif
1077 default:
1078 error = se_init(sc);
1079 break;
1080 }
1081 break;
1082
1083
1084 case SIOCSIFFLAGS:
1085 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1086 break;
1087 /* XXX re-use ether_ioctl() */
1088 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1089 case IFF_RUNNING:
1090 /*
1091 * If interface is marked down and it is running, then
1092 * stop it.
1093 */
1094 se_stop(sc);
1095 mutex_enter(&sc->sc_iflock);
1096 ifp->if_flags &= ~IFF_RUNNING;
1097 se_disable(sc);
1098 mutex_exit(&sc->sc_iflock);
1099 break;
1100 case IFF_UP:
1101 /*
1102 * If interface is marked up and it is stopped, then
1103 * start it.
1104 */
1105 mutex_enter(&sc->sc_iflock);
1106 error = se_enable(sc);
1107 mutex_exit(&sc->sc_iflock);
1108 if (error)
1109 break;
1110 error = se_init(sc);
1111 break;
1112 default:
1113 /*
1114 * Reset the interface to pick up changes in any other
1115 * flags that affect hardware registers.
1116 */
1117 if (sc->sc_enabled)
1118 error = se_init(sc);
1119 break;
1120 }
1121 #ifdef SEDEBUG
1122 if (ifp->if_flags & IFF_DEBUG)
1123 sc->sc_debug = 1;
1124 else
1125 sc->sc_debug = 0;
1126 #endif
1127 break;
1128
1129 case SIOCADDMULTI:
1130 case SIOCDELMULTI:
1131 mutex_enter(&sc->sc_iflock);
1132 sa = sockaddr_dup(ifreq_getaddr(cmd, ifr), M_WAITOK);
1133 mutex_exit(&sc->sc_iflock);
1134 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1135 if (ifp->if_flags & IFF_RUNNING) {
1136 error = (cmd == SIOCADDMULTI) ?
1137 se_set_multi(sc, sa->sa_data) :
1138 se_remove_multi(sc, sa->sa_data);
1139 } else
1140 error = 0;
1141 }
1142 mutex_enter(&sc->sc_iflock);
1143 sockaddr_free(sa);
1144 mutex_exit(&sc->sc_iflock);
1145 break;
1146
1147 default:
1148
1149 error = ether_ioctl(ifp, cmd, data);
1150 break;
1151 }
1152
1153 return (error);
1154 }
1155
1156 /*
1157 * Enable the network interface.
1158 */
1159 int
1160 se_enable(struct se_softc *sc)
1161 {
1162 struct scsipi_periph *periph = sc->sc_periph;
1163 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1164 int error = 0;
1165
1166 if (sc->sc_enabled == 0) {
1167 if ((error = scsipi_adapter_addref(adapt)) == 0)
1168 sc->sc_enabled = 1;
1169 else
1170 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1171 }
1172 return (error);
1173 }
1174
1175 /*
1176 * Disable the network interface.
1177 */
1178 void
1179 se_disable(struct se_softc *sc)
1180 {
1181 struct scsipi_periph *periph = sc->sc_periph;
1182 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1183
1184 if (sc->sc_enabled != 0) {
1185 scsipi_adapter_delref(adapt);
1186 sc->sc_enabled = 0;
1187 }
1188 }
1189
1190 #define SEUNIT(z) (minor(z))
1191 /*
1192 * open the device.
1193 */
1194 int
1195 seopen(dev_t dev, int flag, int fmt, struct lwp *l)
1196 {
1197 int unit, error;
1198 struct se_softc *sc;
1199 struct scsipi_periph *periph;
1200 struct scsipi_adapter *adapt;
1201
1202 unit = SEUNIT(dev);
1203 sc = device_lookup_private(&se_cd, unit);
1204 if (sc == NULL)
1205 return (ENXIO);
1206
1207 periph = sc->sc_periph;
1208 adapt = periph->periph_channel->chan_adapter;
1209
1210 if ((error = scsipi_adapter_addref(adapt)) != 0)
1211 return (error);
1212
1213 SC_DEBUG(periph, SCSIPI_DB1,
1214 ("scopen: dev=0x%"PRIx64" (unit %d (of %d))\n", dev, unit,
1215 se_cd.cd_ndevs));
1216
1217 periph->periph_flags |= PERIPH_OPEN;
1218
1219 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
1220 return (0);
1221 }
1222
1223 /*
1224 * close the device.. only called if we are the LAST
1225 * occurrence of an open device
1226 */
1227 int
1228 seclose(dev_t dev, int flag, int fmt, struct lwp *l)
1229 {
1230 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1231 struct scsipi_periph *periph = sc->sc_periph;
1232 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1233
1234 SC_DEBUG(sc->sc_periph, SCSIPI_DB1, ("closing\n"));
1235
1236 scsipi_wait_drain(periph);
1237
1238 scsipi_adapter_delref(adapt);
1239 periph->periph_flags &= ~PERIPH_OPEN;
1240
1241 return (0);
1242 }
1243
1244 /*
1245 * Perform special action on behalf of the user
1246 * Only does generic scsi ioctls.
1247 */
1248 int
1249 seioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1250 {
1251 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1252
1253 return (scsipi_do_ioctl(sc->sc_periph, dev, cmd, addr, flag, l));
1254 }
1255