if_se.c revision 1.113 1 /* $NetBSD: if_se.c,v 1.113 2021/06/16 00:21:19 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Ian W. Dall <ian.dall (at) dsto.defence.gov.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Ian W. Dall.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Driver for Cabletron EA41x scsi ethernet adaptor.
35 *
36 * Written by Ian Dall <ian.dall (at) dsto.defence.gov.au> Feb 3, 1997
37 *
38 * Acknowledgement: Thanks are due to Philip L. Budne <budd (at) cs.bu.edu>
39 * who reverse engineered the EA41x. In developing this code,
40 * Phil's userland daemon "etherd", was refered to extensively in lieu
41 * of accurate documentation for the device.
42 *
43 * This is a weird device! It doesn't conform to the scsi spec in much
44 * at all. About the only standard command supported is inquiry. Most
45 * commands are 6 bytes long, but the recv data is only 1 byte. Data
46 * must be received by periodically polling the device with the recv
47 * command.
48 *
49 * This driver is also a bit unusual. It must look like a network
50 * interface and it must also appear to be a scsi device to the scsi
51 * system. Hence there are cases where there are two entry points. eg
52 * sedone is to be called from the scsi subsytem and se_ifstart from
53 * the network interface subsystem. In addition, to facilitate scsi
54 * commands issued by userland programs, there are open, close and
55 * ioctl entry points. This allows a user program to, for example,
56 * display the ea41x stats and download new code into the adaptor ---
57 * functions which can't be performed through the ifconfig interface.
58 * Normal operation does not require any special userland program.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: if_se.c,v 1.113 2021/06/16 00:21:19 riastradh Exp $");
63
64 #ifdef _KERNEL_OPT
65 #include "opt_inet.h"
66 #include "opt_net_mpsafe.h"
67 #include "opt_atalk.h"
68 #endif
69
70 #include <sys/param.h>
71 #include <sys/types.h>
72
73 #include <sys/buf.h>
74 #include <sys/callout.h>
75 #include <sys/conf.h>
76 #include <sys/device.h>
77 #include <sys/disk.h>
78 #include <sys/disklabel.h>
79 #include <sys/errno.h>
80 #include <sys/file.h>
81 #include <sys/ioctl.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/socket.h>
88 #include <sys/stat.h>
89 #include <sys/syslog.h>
90 #include <sys/systm.h>
91 #include <sys/uio.h>
92 #include <sys/workqueue.h>
93
94 #include <dev/scsipi/scsi_ctron_ether.h>
95 #include <dev/scsipi/scsiconf.h>
96 #include <dev/scsipi/scsipi_all.h>
97
98 #include <net/bpf.h>
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_ether.h>
102 #include <net/if_media.h>
103
104 #ifdef INET
105 #include <netinet/if_inarp.h>
106 #include <netinet/in.h>
107 #endif
108
109 #ifdef NETATALK
110 #include <netatalk/at.h>
111 #endif
112
113 #define SETIMEOUT 1000
114 #define SEOUTSTANDING 4
115 #define SERETRIES 4
116 #define SE_PREFIX 4
117 #define ETHER_CRC 4
118 #define SEMINSIZE 60
119
120 /* Make this big enough for an ETHERMTU packet in promiscuous mode. */
121 #define MAX_SNAP (ETHERMTU + sizeof(struct ether_header) + \
122 SE_PREFIX + ETHER_CRC)
123
124 /* 10 full length packets appears to be the max ever returned. 16k is OK */
125 #define RBUF_LEN (16 * 1024)
126
127 /* Tuning parameters:
128 * The EA41x only returns a maximum of 10 packets (regardless of size).
129 * We will attempt to adapt to polling fast enough to get RDATA_GOAL packets
130 * per read
131 */
132 #define RDATA_MAX 10
133 #define RDATA_GOAL 8
134
135 /* se_poll and se_poll0 are the normal polling rate and the minimum
136 * polling rate respectively. se_poll0 should be chosen so that at
137 * maximum ethernet speed, we will read nearly RDATA_MAX packets. se_poll
138 * should be chosen for reasonable maximum latency.
139 * In practice, if we are being saturated with min length packets, we
140 * can't poll fast enough. Polling with zero delay actually
141 * worsens performance. se_poll0 is enforced to be always at least 1
142 */
143 #define SE_POLL 40 /* default in milliseconds */
144 #define SE_POLL0 10 /* default in milliseconds */
145 int se_poll = 0; /* Delay in ticks set at attach time */
146 int se_poll0 = 0;
147 #ifdef SE_DEBUG
148 int se_max_received = 0; /* Instrumentation */
149 #endif
150
151 #define PROTOCMD(p, d) \
152 ((d) = (p))
153
154 #define PROTOCMD_DECL(name) \
155 static const struct scsi_ctron_ether_generic name
156
157 #define PROTOCMD_DECL_SPECIAL(name) \
158 static const struct __CONCAT(scsi_, name) name
159
160 /* Command initializers for commands using scsi_ctron_ether_generic */
161 PROTOCMD_DECL(ctron_ether_send) = {CTRON_ETHER_SEND, 0, {0,0}, 0};
162 PROTOCMD_DECL(ctron_ether_add_proto) = {CTRON_ETHER_ADD_PROTO, 0, {0,0}, 0};
163 PROTOCMD_DECL(ctron_ether_get_addr) = {CTRON_ETHER_GET_ADDR, 0, {0,0}, 0};
164 PROTOCMD_DECL(ctron_ether_set_media) = {CTRON_ETHER_SET_MEDIA, 0, {0,0}, 0};
165 PROTOCMD_DECL(ctron_ether_set_addr) = {CTRON_ETHER_SET_ADDR, 0, {0,0}, 0};
166 PROTOCMD_DECL(ctron_ether_set_multi) = {CTRON_ETHER_SET_MULTI, 0, {0,0}, 0};
167 PROTOCMD_DECL(ctron_ether_remove_multi) =
168 {CTRON_ETHER_REMOVE_MULTI, 0, {0,0}, 0};
169
170 /* Command initializers for commands using their own structures */
171 PROTOCMD_DECL_SPECIAL(ctron_ether_recv) = {CTRON_ETHER_RECV};
172 PROTOCMD_DECL_SPECIAL(ctron_ether_set_mode) =
173 {CTRON_ETHER_SET_MODE, 0, {0,0}, 0};
174
175 struct se_softc {
176 device_t sc_dev;
177 struct ethercom sc_ethercom; /* Ethernet common part */
178 struct scsipi_periph *sc_periph;/* contains our targ, lun, etc. */
179
180 struct callout sc_recv_ch;
181 struct kmutex sc_iflock;
182 struct if_percpuq *sc_ipq;
183 struct workqueue *sc_recv_wq, *sc_send_wq;
184 struct work sc_recv_work, sc_send_work;
185 int sc_recv_work_pending, sc_send_work_pending;
186
187 char *sc_tbuf;
188 char *sc_rbuf;
189 int protos;
190 #define PROTO_IP 0x01
191 #define PROTO_ARP 0x02
192 #define PROTO_REVARP 0x04
193 #define PROTO_AT 0x08
194 #define PROTO_AARP 0x10
195 int sc_debug;
196 int sc_flags;
197 int sc_last_timeout;
198 int sc_enabled;
199 int sc_attach_state;
200 };
201
202 static int sematch(device_t, cfdata_t, void *);
203 static void seattach(device_t, device_t, void *);
204 static int sedetach(device_t, int);
205
206 static void se_ifstart(struct ifnet *);
207
208 static void sedone(struct scsipi_xfer *, int);
209 static int se_ioctl(struct ifnet *, u_long, void *);
210 static void sewatchdog(struct ifnet *);
211
212 #if 0
213 static inline uint16_t ether_cmp(void *, void *);
214 #endif
215 static void se_recv_callout(void *);
216 static void se_recv_worker(struct work *wk, void *cookie);
217 static void se_recv(struct se_softc *);
218 static struct mbuf *se_get(struct se_softc *, char *, int);
219 static int se_read(struct se_softc *, char *, int);
220 static void se_reset(struct se_softc *);
221 static int se_add_proto(struct se_softc *, int);
222 static int se_get_addr(struct se_softc *, uint8_t *);
223 static int se_set_media(struct se_softc *, int);
224 static int se_init(struct se_softc *);
225 static int se_set_multi(struct se_softc *, uint8_t *);
226 static int se_remove_multi(struct se_softc *, uint8_t *);
227 #if 0
228 static int sc_set_all_multi(struct se_softc *, int);
229 #endif
230 static void se_stop(struct se_softc *);
231 static inline int se_scsipi_cmd(struct scsipi_periph *periph,
232 struct scsipi_generic *scsipi_cmd,
233 int cmdlen, u_char *data_addr, int datalen,
234 int retries, int timeout, struct buf *bp,
235 int flags);
236 static void se_send_worker(struct work *wk, void *cookie);
237 static int se_set_mode(struct se_softc *, int, int);
238
239 int se_enable(struct se_softc *);
240 void se_disable(struct se_softc *);
241
242 CFATTACH_DECL_NEW(se, sizeof(struct se_softc),
243 sematch, seattach, sedetach, NULL);
244
245 extern struct cfdriver se_cd;
246
247 dev_type_open(seopen);
248 dev_type_close(seclose);
249 dev_type_ioctl(seioctl);
250
251 const struct cdevsw se_cdevsw = {
252 .d_open = seopen,
253 .d_close = seclose,
254 .d_read = noread,
255 .d_write = nowrite,
256 .d_ioctl = seioctl,
257 .d_stop = nostop,
258 .d_tty = notty,
259 .d_poll = nopoll,
260 .d_mmap = nommap,
261 .d_kqfilter = nokqfilter,
262 .d_discard = nodiscard,
263 .d_flag = D_OTHER | D_MPSAFE
264 };
265
266 const struct scsipi_periphsw se_switch = {
267 NULL, /* Use default error handler */
268 NULL, /* have no queue */
269 NULL, /* have no async handler */
270 sedone, /* deal with send/recv completion */
271 };
272
273 const struct scsipi_inquiry_pattern se_patterns[] = {
274 {T_PROCESSOR, T_FIXED,
275 "CABLETRN", "EA412", ""},
276 {T_PROCESSOR, T_FIXED,
277 "Cabletrn", "EA412", ""},
278 };
279
280 #if 0
281 /*
282 * Compare two Ether/802 addresses for equality, inlined and
283 * unrolled for speed.
284 * Note: use this like memcmp()
285 */
286 static inline uint16_t
287 ether_cmp(void *one, void *two)
288 {
289 uint16_t *a = (uint16_t *) one;
290 uint16_t *b = (uint16_t *) two;
291 uint16_t diff;
292
293 diff = (a[0] - b[0]) | (a[1] - b[1]) | (a[2] - b[2]);
294
295 return (diff);
296 }
297
298 #define ETHER_CMP ether_cmp
299 #endif
300
301 static int
302 sematch(device_t parent, cfdata_t match, void *aux)
303 {
304 struct scsipibus_attach_args *sa = aux;
305 int priority;
306
307 (void)scsipi_inqmatch(&sa->sa_inqbuf,
308 se_patterns, sizeof(se_patterns) / sizeof(se_patterns[0]),
309 sizeof(se_patterns[0]), &priority);
310 return (priority);
311 }
312
313 /*
314 * The routine called by the low level scsi routine when it discovers
315 * a device suitable for this driver.
316 */
317 static void
318 seattach(device_t parent, device_t self, void *aux)
319 {
320 struct se_softc *sc = device_private(self);
321 struct scsipibus_attach_args *sa = aux;
322 struct scsipi_periph *periph = sa->sa_periph;
323 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
324 uint8_t myaddr[ETHER_ADDR_LEN];
325 char wqname[MAXCOMLEN];
326 int rv;
327
328 sc->sc_dev = self;
329
330 printf("\n");
331 SC_DEBUG(periph, SCSIPI_DB2, ("seattach: "));
332
333 sc->sc_attach_state = 0;
334 callout_init(&sc->sc_recv_ch, CALLOUT_MPSAFE);
335 callout_setfunc(&sc->sc_recv_ch, se_recv_callout, (void *)sc);
336 mutex_init(&sc->sc_iflock, MUTEX_DEFAULT, IPL_SOFTNET);
337
338 /*
339 * Store information needed to contact our base driver
340 */
341 sc->sc_periph = periph;
342 periph->periph_dev = sc->sc_dev;
343 periph->periph_switch = &se_switch;
344
345 se_poll = (SE_POLL * hz) / 1000;
346 se_poll = se_poll? se_poll: 1;
347 se_poll0 = (SE_POLL0 * hz) / 1000;
348 se_poll0 = se_poll0? se_poll0: 1;
349
350 /*
351 * Initialize and attach send and receive buffers
352 */
353 sc->sc_tbuf = malloc(ETHERMTU + sizeof(struct ether_header),
354 M_DEVBUF, M_WAITOK);
355 sc->sc_rbuf = malloc(RBUF_LEN, M_DEVBUF, M_WAITOK);
356
357 /* Initialize ifnet structure. */
358 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
359 ifp->if_softc = sc;
360 ifp->if_start = se_ifstart;
361 ifp->if_ioctl = se_ioctl;
362 ifp->if_watchdog = sewatchdog;
363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
364 ifp->if_extflags = IFEF_MPSAFE;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 se_get_addr(sc, myaddr);
368 sc->sc_attach_state = 1;
369
370 /* Attach the interface. */
371 if_initialize(ifp);
372
373 snprintf(wqname, sizeof(wqname), "%sRx", device_xname(sc->sc_dev));
374 rv = workqueue_create(&sc->sc_recv_wq, wqname, se_recv_worker, sc,
375 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
376 if (rv != 0) {
377 aprint_error_dev(sc->sc_dev,
378 "unable to create recv Rx workqueue\n");
379 sedetach(sc->sc_dev, 0);
380 return; /* Error */
381 }
382 sc->sc_recv_work_pending = false;
383 sc->sc_attach_state = 2;
384
385 snprintf(wqname, sizeof(wqname), "%sTx", device_xname(sc->sc_dev));
386 rv = workqueue_create(&sc->sc_send_wq, wqname, se_send_worker, ifp,
387 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
388 if (rv != 0) {
389 aprint_error_dev(sc->sc_dev,
390 "unable to create send Tx workqueue\n");
391 sedetach(sc->sc_dev, 0);
392 return; /* Error */
393 }
394 sc->sc_send_work_pending = false;
395 sc->sc_attach_state = 3;
396
397 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
398 ether_ifattach(ifp, myaddr);
399 if_register(ifp);
400 sc->sc_attach_state = 4;
401 }
402
403 static int
404 sedetach(device_t self, int flags)
405 {
406 struct se_softc *sc = device_private(self);
407 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
408
409 switch(sc->sc_attach_state) {
410 case 4:
411 se_stop(sc);
412 mutex_enter(&sc->sc_iflock);
413 ifp->if_flags &= ~IFF_RUNNING;
414 se_disable(sc);
415 ether_ifdetach(ifp);
416 if_detach(ifp);
417 mutex_exit(&sc->sc_iflock);
418 if_percpuq_destroy(sc->sc_ipq);
419 /*FALLTHROUGH*/
420 case 3:
421 workqueue_destroy(sc->sc_send_wq);
422 /*FALLTHROUGH*/
423 case 2:
424 workqueue_destroy(sc->sc_recv_wq);
425 /*FALLTHROUGH*/
426 case 1:
427 free(sc->sc_rbuf, M_DEVBUF);
428 free(sc->sc_tbuf, M_DEVBUF);
429 callout_destroy(&sc->sc_recv_ch);
430 mutex_destroy(&sc->sc_iflock);
431 break;
432 default:
433 aprint_error_dev(sc->sc_dev, "detach failed (state %d)\n",
434 sc->sc_attach_state);
435 return 1;
436 break;
437 }
438 return 0;
439 }
440
441 /*
442 * Send a command to the device
443 */
444 static inline int
445 se_scsipi_cmd(struct scsipi_periph *periph, struct scsipi_generic *cmd,
446 int cmdlen, u_char *data_addr, int datalen, int retries, int timeout,
447 struct buf *bp, int flags)
448 {
449 int error;
450
451 error = scsipi_command(periph, cmd, cmdlen, data_addr,
452 datalen, retries, timeout, bp, flags);
453 return (error);
454 }
455
456 /*
457 * Start routine for calling from network sub system
458 */
459 static void
460 se_ifstart(struct ifnet *ifp)
461 {
462 struct se_softc *sc = ifp->if_softc;
463
464 mutex_enter(&sc->sc_iflock);
465 if (!sc->sc_send_work_pending) {
466 sc->sc_send_work_pending = true;
467 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
468 }
469 /* else: nothing to do - work is already queued */
470 mutex_exit(&sc->sc_iflock);
471 }
472
473 /*
474 * Invoke the transmit workqueue and transmission on the interface.
475 */
476 static void
477 se_send_worker(struct work *wk, void *cookie)
478 {
479 struct ifnet *ifp = cookie;
480 struct se_softc *sc = ifp->if_softc;
481 struct scsi_ctron_ether_generic send_cmd;
482 struct mbuf *m, *m0;
483 int len, error;
484 u_char *cp;
485
486 mutex_enter(&sc->sc_iflock);
487 sc->sc_send_work_pending = false;
488 mutex_exit(&sc->sc_iflock);
489
490 KASSERT(if_is_mpsafe(ifp));
491
492 /* Don't transmit if interface is busy or not running */
493 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
494 return;
495
496 while (1) {
497 IFQ_DEQUEUE(&ifp->if_snd, m0);
498 if (m0 == 0)
499 break;
500
501 /* If BPF is listening on this interface, let it see the
502 * packet before we commit it to the wire.
503 */
504 bpf_mtap(ifp, m0, BPF_D_OUT);
505
506 /* We need to use m->m_pkthdr.len, so require the header */
507 if ((m0->m_flags & M_PKTHDR) == 0)
508 panic("ctscstart: no header mbuf");
509 len = m0->m_pkthdr.len;
510
511 /* Mark the interface busy. */
512 ifp->if_flags |= IFF_OACTIVE;
513
514 /* Chain; copy into linear buffer allocated at attach time. */
515 cp = sc->sc_tbuf;
516 for (m = m0; m != NULL; ) {
517 memcpy(cp, mtod(m, u_char *), m->m_len);
518 cp += m->m_len;
519 m = m0 = m_free(m);
520 }
521 if (len < SEMINSIZE) {
522 #ifdef SEDEBUG
523 if (sc->sc_debug)
524 printf("se: packet size %d (%zu) < %d\n", len,
525 cp - (u_char *)sc->sc_tbuf, SEMINSIZE);
526 #endif
527 memset(cp, 0, SEMINSIZE - len);
528 len = SEMINSIZE;
529 }
530
531 /* Fill out SCSI command. */
532 PROTOCMD(ctron_ether_send, send_cmd);
533 _lto2b(len, send_cmd.length);
534
535 /* Send command to device. */
536 error = se_scsipi_cmd(sc->sc_periph,
537 (void *)&send_cmd, sizeof(send_cmd),
538 sc->sc_tbuf, len, SERETRIES,
539 SETIMEOUT, NULL, XS_CTL_NOSLEEP | XS_CTL_DATA_OUT);
540 if (error) {
541 aprint_error_dev(sc->sc_dev,
542 "not queued, error %d\n", error);
543 if_statinc(ifp, if_oerrors);
544 ifp->if_flags &= ~IFF_OACTIVE;
545 } else
546 if_statinc(ifp, if_opackets);
547 }
548 }
549
550
551 /*
552 * Called from the scsibus layer via our scsi device switch.
553 */
554 static void
555 sedone(struct scsipi_xfer *xs, int error)
556 {
557 struct se_softc *sc = device_private(xs->xs_periph->periph_dev);
558 struct scsipi_generic *cmd = xs->cmd;
559 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
560
561 if (IS_SEND(cmd)) {
562 ifp->if_flags &= ~IFF_OACTIVE;
563 } else if (IS_RECV(cmd)) {
564 /* RECV complete */
565 /* pass data up. reschedule a recv */
566 /* scsipi_free_xs will call start. Harmless. */
567 if (error) {
568 /* Reschedule after a delay */
569 callout_schedule(&sc->sc_recv_ch, se_poll);
570 } else {
571 int n, ntimeo;
572 n = se_read(sc, xs->data, xs->datalen - xs->resid);
573 #ifdef SE_DEBUG
574 if (n > se_max_received)
575 se_max_received = n;
576 #endif
577 if (n == 0)
578 ntimeo = se_poll;
579 else if (n >= RDATA_MAX)
580 ntimeo = se_poll0;
581 else {
582 ntimeo = sc->sc_last_timeout;
583 ntimeo = (ntimeo * RDATA_GOAL)/n;
584 ntimeo = (ntimeo < se_poll0?
585 se_poll0: ntimeo);
586 ntimeo = (ntimeo > se_poll?
587 se_poll: ntimeo);
588 }
589 sc->sc_last_timeout = ntimeo;
590 callout_schedule(&sc->sc_recv_ch, ntimeo);
591 }
592 }
593 }
594
595 /*
596 * Setup a receive command by queuing the work.
597 * Usually called from a callout, but also from se_init().
598 */
599 static void
600 se_recv_callout(void *v)
601 {
602 /* do a recv command */
603 struct se_softc *sc = (struct se_softc *) v;
604
605 if (sc->sc_enabled == 0)
606 return;
607
608 mutex_enter(&sc->sc_iflock);
609 if (sc->sc_recv_work_pending == true) {
610 callout_schedule(&sc->sc_recv_ch, se_poll);
611 mutex_exit(&sc->sc_iflock);
612 return;
613 }
614
615 sc->sc_recv_work_pending = true;
616 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
617 mutex_exit(&sc->sc_iflock);
618 }
619
620 /*
621 * Invoke the receive workqueue
622 */
623 static void
624 se_recv_worker(struct work *wk, void *cookie)
625 {
626 struct se_softc *sc = (struct se_softc *) cookie;
627
628 mutex_enter(&sc->sc_iflock);
629 sc->sc_recv_work_pending = false;
630 mutex_exit(&sc->sc_iflock);
631 se_recv(sc);
632
633 }
634
635 /*
636 * Do the actual work of receiving data.
637 */
638 static void
639 se_recv(struct se_softc *sc)
640 {
641 struct scsi_ctron_ether_recv recv_cmd;
642 int error;
643
644 /* do a recv command */
645 PROTOCMD(ctron_ether_recv, recv_cmd);
646
647 error = se_scsipi_cmd(sc->sc_periph,
648 (void *)&recv_cmd, sizeof(recv_cmd),
649 sc->sc_rbuf, RBUF_LEN, SERETRIES, SETIMEOUT, NULL,
650 XS_CTL_NOSLEEP | XS_CTL_DATA_IN);
651 if (error)
652 callout_schedule(&sc->sc_recv_ch, se_poll);
653 }
654
655 /*
656 * We copy the data into mbufs. When full cluster sized units are present
657 * we copy into clusters.
658 */
659 static struct mbuf *
660 se_get(struct se_softc *sc, char *data, int totlen)
661 {
662 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
663 struct mbuf *m, *m0, *newm;
664 int len;
665
666 MGETHDR(m0, M_DONTWAIT, MT_DATA);
667 if (m0 == 0)
668 return (0);
669 m_set_rcvif(m0, ifp);
670 m0->m_pkthdr.len = totlen;
671 len = MHLEN;
672 m = m0;
673
674 while (totlen > 0) {
675 if (totlen >= MINCLSIZE) {
676 MCLGET(m, M_DONTWAIT);
677 if ((m->m_flags & M_EXT) == 0)
678 goto bad;
679 len = MCLBYTES;
680 }
681
682 if (m == m0) {
683 char *newdata = (char *)
684 ALIGN(m->m_data + sizeof(struct ether_header)) -
685 sizeof(struct ether_header);
686 len -= newdata - m->m_data;
687 m->m_data = newdata;
688 }
689
690 m->m_len = len = uimin(totlen, len);
691 memcpy(mtod(m, void *), data, len);
692 data += len;
693
694 totlen -= len;
695 if (totlen > 0) {
696 MGET(newm, M_DONTWAIT, MT_DATA);
697 if (newm == 0)
698 goto bad;
699 len = MLEN;
700 m = m->m_next = newm;
701 }
702 }
703
704 return (m0);
705
706 bad:
707 m_freem(m0);
708 return (0);
709 }
710
711 /*
712 * Pass packets to higher levels.
713 */
714 static int
715 se_read(struct se_softc *sc, char *data, int datalen)
716 {
717 struct mbuf *m;
718 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
719 int n;
720
721 n = 0;
722 while (datalen >= 2) {
723 int len = _2btol(data);
724 data += 2;
725 datalen -= 2;
726
727 if (len == 0)
728 break;
729 #ifdef SEDEBUG
730 if (sc->sc_debug) {
731 printf("se_read: datalen = %d, packetlen = %d, proto = 0x%04x\n", datalen, len,
732 ntohs(((struct ether_header *)data)->ether_type));
733 }
734 #endif
735 if (len <= sizeof(struct ether_header) ||
736 len > MAX_SNAP) {
737 #ifdef SEDEBUG
738 printf("%s: invalid packet size %d; dropping\n",
739 device_xname(sc->sc_dev), len);
740 #endif
741 if_statinc(ifp, if_ierrors);
742 goto next_packet;
743 }
744
745 /* Don't need crc. Must keep ether header for BPF */
746 m = se_get(sc, data, len - ETHER_CRC);
747 if (m == 0) {
748 #ifdef SEDEBUG
749 if (sc->sc_debug)
750 printf("se_read: se_get returned null\n");
751 #endif
752 if_statinc(ifp, if_ierrors);
753 goto next_packet;
754 }
755 if ((ifp->if_flags & IFF_PROMISC) != 0) {
756 m_adj(m, SE_PREFIX);
757 }
758
759 /* Pass the packet up. */
760 if_percpuq_enqueue(sc->sc_ipq, m);
761
762 next_packet:
763 data += len;
764 datalen -= len;
765 n++;
766 }
767 return (n);
768 }
769
770
771 static void
772 sewatchdog(struct ifnet *ifp)
773 {
774 struct se_softc *sc = ifp->if_softc;
775
776 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
777 if_statinc(ifp, if_oerrors);
778
779 se_reset(sc);
780 }
781
782 static void
783 se_reset(struct se_softc *sc)
784 {
785 #if 0
786 /* Maybe we don't *really* want to reset the entire bus
787 * because the ctron isn't working. We would like to send a
788 * "BUS DEVICE RESET" message, but don't think the ctron
789 * understands it.
790 */
791 se_scsipi_cmd(sc->sc_periph, 0, 0, 0, 0, SERETRIES, 2000, NULL,
792 XS_CTL_RESET);
793 #endif
794 se_init(sc);
795 }
796
797 static int
798 se_add_proto(struct se_softc *sc, int proto)
799 {
800 int error;
801 struct scsi_ctron_ether_generic add_proto_cmd;
802 uint8_t data[2];
803 _lto2b(proto, data);
804 #ifdef SEDEBUG
805 if (sc->sc_debug)
806 printf("se: adding proto 0x%02x%02x\n", data[0], data[1]);
807 #endif
808
809 PROTOCMD(ctron_ether_add_proto, add_proto_cmd);
810 _lto2b(sizeof(data), add_proto_cmd.length);
811 error = se_scsipi_cmd(sc->sc_periph,
812 (void *)&add_proto_cmd, sizeof(add_proto_cmd),
813 data, sizeof(data), SERETRIES, SETIMEOUT, NULL,
814 XS_CTL_DATA_OUT);
815 return (error);
816 }
817
818 static int
819 se_get_addr(struct se_softc *sc, uint8_t *myaddr)
820 {
821 int error;
822 struct scsi_ctron_ether_generic get_addr_cmd;
823
824 PROTOCMD(ctron_ether_get_addr, get_addr_cmd);
825 _lto2b(ETHER_ADDR_LEN, get_addr_cmd.length);
826 error = se_scsipi_cmd(sc->sc_periph,
827 (void *)&get_addr_cmd, sizeof(get_addr_cmd),
828 myaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
829 XS_CTL_DATA_IN);
830 printf("%s: ethernet address %s\n", device_xname(sc->sc_dev),
831 ether_sprintf(myaddr));
832 return (error);
833 }
834
835
836 static int
837 se_set_media(struct se_softc *sc, int type)
838 {
839 int error;
840 struct scsi_ctron_ether_generic set_media_cmd;
841
842 PROTOCMD(ctron_ether_set_media, set_media_cmd);
843 set_media_cmd.byte3 = type;
844 error = se_scsipi_cmd(sc->sc_periph,
845 (void *)&set_media_cmd, sizeof(set_media_cmd),
846 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
847 return (error);
848 }
849
850 static int
851 se_set_mode(struct se_softc *sc, int len, int mode)
852 {
853 int error;
854 struct scsi_ctron_ether_set_mode set_mode_cmd;
855
856 PROTOCMD(ctron_ether_set_mode, set_mode_cmd);
857 set_mode_cmd.mode = mode;
858 _lto2b(len, set_mode_cmd.length);
859 error = se_scsipi_cmd(sc->sc_periph,
860 (void *)&set_mode_cmd, sizeof(set_mode_cmd),
861 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
862 return (error);
863 }
864
865
866 static int
867 se_init(struct se_softc *sc)
868 {
869 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
870 struct scsi_ctron_ether_generic set_addr_cmd;
871 uint8_t enaddr[ETHER_ADDR_LEN];
872 int error;
873
874 if (ifp->if_flags & IFF_PROMISC) {
875 error = se_set_mode(sc, MAX_SNAP, 1);
876 }
877 else
878 error = se_set_mode(sc, ETHERMTU + sizeof(struct ether_header),
879 0);
880 if (error != 0)
881 return (error);
882
883 PROTOCMD(ctron_ether_set_addr, set_addr_cmd);
884 _lto2b(ETHER_ADDR_LEN, set_addr_cmd.length);
885 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
886 error = se_scsipi_cmd(sc->sc_periph,
887 (void *)&set_addr_cmd, sizeof(set_addr_cmd),
888 enaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
889 XS_CTL_DATA_OUT);
890 if (error != 0)
891 return (error);
892
893 if ((sc->protos & PROTO_IP) &&
894 (error = se_add_proto(sc, ETHERTYPE_IP)) != 0)
895 return (error);
896 if ((sc->protos & PROTO_ARP) &&
897 (error = se_add_proto(sc, ETHERTYPE_ARP)) != 0)
898 return (error);
899 if ((sc->protos & PROTO_REVARP) &&
900 (error = se_add_proto(sc, ETHERTYPE_REVARP)) != 0)
901 return (error);
902 #ifdef NETATALK
903 if ((sc->protos & PROTO_AT) &&
904 (error = se_add_proto(sc, ETHERTYPE_ATALK)) != 0)
905 return (error);
906 if ((sc->protos & PROTO_AARP) &&
907 (error = se_add_proto(sc, ETHERTYPE_AARP)) != 0)
908 return (error);
909 #endif
910
911 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == IFF_UP) {
912 ifp->if_flags |= IFF_RUNNING;
913 mutex_enter(&sc->sc_iflock);
914 if (!sc->sc_recv_work_pending) {
915 sc->sc_recv_work_pending = true;
916 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work,
917 NULL);
918 }
919 mutex_exit(&sc->sc_iflock);
920 ifp->if_flags &= ~IFF_OACTIVE;
921 mutex_enter(&sc->sc_iflock);
922 if (!sc->sc_send_work_pending) {
923 sc->sc_send_work_pending = true;
924 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work,
925 NULL);
926 }
927 mutex_exit(&sc->sc_iflock);
928 }
929 return (error);
930 }
931
932 static int
933 se_set_multi(struct se_softc *sc, uint8_t *addr)
934 {
935 struct scsi_ctron_ether_generic set_multi_cmd;
936 int error;
937
938 if (sc->sc_debug)
939 printf("%s: set_set_multi: %s\n", device_xname(sc->sc_dev),
940 ether_sprintf(addr));
941
942 PROTOCMD(ctron_ether_set_multi, set_multi_cmd);
943 _lto2b(ETHER_ADDR_LEN, set_multi_cmd.length);
944 error = se_scsipi_cmd(sc->sc_periph,
945 (void *)&set_multi_cmd, sizeof(set_multi_cmd),
946 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
947 return (error);
948 }
949
950 static int
951 se_remove_multi(struct se_softc *sc, uint8_t *addr)
952 {
953 struct scsi_ctron_ether_generic remove_multi_cmd;
954 int error;
955
956 if (sc->sc_debug)
957 printf("%s: se_remove_multi: %s\n", device_xname(sc->sc_dev),
958 ether_sprintf(addr));
959
960 PROTOCMD(ctron_ether_remove_multi, remove_multi_cmd);
961 _lto2b(ETHER_ADDR_LEN, remove_multi_cmd.length);
962 error = se_scsipi_cmd(sc->sc_periph,
963 (void *)&remove_multi_cmd, sizeof(remove_multi_cmd),
964 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
965 return (error);
966 }
967
968 #if 0 /* not used --thorpej */
969 static int
970 sc_set_all_multi(struct se_softc *sc, int set)
971 {
972 int error = 0;
973 uint8_t *addr;
974 struct ethercom *ec = &sc->sc_ethercom;
975 struct ether_multi *enm;
976 struct ether_multistep step;
977
978 ETHER_LOCK(ec);
979 ETHER_FIRST_MULTI(step, ec, enm);
980 while (enm != NULL) {
981 if (ETHER_CMP(enm->enm_addrlo, enm->enm_addrhi)) {
982 /*
983 * We must listen to a range of multicast addresses.
984 * For now, just accept all multicasts, rather than
985 * trying to set only those filter bits needed to match
986 * the range. (At this time, the only use of address
987 * ranges is for IP multicast routing, for which the
988 * range is big enough to require all bits set.)
989 */
990 /* We have no way of adding a range to this device.
991 * stepping through all addresses in the range is
992 * typically not possible. The only real alternative
993 * is to go into promicuous mode and filter by hand.
994 */
995 ETHER_UNLOCK(ec);
996 return (ENODEV);
997
998 }
999
1000 addr = enm->enm_addrlo;
1001 if ((error = set ? se_set_multi(sc, addr) :
1002 se_remove_multi(sc, addr)) != 0)
1003 return (error);
1004 ETHER_NEXT_MULTI(step, enm);
1005 }
1006 ETHER_UNLOCK(ec);
1007
1008 return (error);
1009 }
1010 #endif /* not used */
1011
1012 static void
1013 se_stop(struct se_softc *sc)
1014 {
1015
1016 /* Don't schedule any reads */
1017 callout_halt(&sc->sc_recv_ch, &sc->sc_iflock);
1018
1019 /* Wait for the workqueues to finish */
1020 mutex_enter(&sc->sc_iflock);
1021 workqueue_wait(sc->sc_recv_wq, &sc->sc_recv_work);
1022 workqueue_wait(sc->sc_send_wq, &sc->sc_send_work);
1023 mutex_exit(&sc->sc_iflock);
1024
1025 /* Abort any scsi cmds in progress */
1026 mutex_enter(chan_mtx(sc->sc_periph->periph_channel));
1027 scsipi_kill_pending(sc->sc_periph);
1028 mutex_exit(chan_mtx(sc->sc_periph->periph_channel));
1029 }
1030
1031
1032 /*
1033 * Process an ioctl request.
1034 */
1035 static int
1036 se_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1037 {
1038 struct se_softc *sc = ifp->if_softc;
1039 struct ifaddr *ifa = (struct ifaddr *)data;
1040 struct ifreq *ifr = (struct ifreq *)data;
1041 struct sockaddr *sa;
1042 int error = 0;
1043
1044
1045 switch (cmd) {
1046
1047 case SIOCINITIFADDR:
1048 mutex_enter(&sc->sc_iflock);
1049 if ((error = se_enable(sc)) != 0)
1050 break;
1051 ifp->if_flags |= IFF_UP;
1052 mutex_exit(&sc->sc_iflock);
1053
1054 if ((error = se_set_media(sc, CMEDIA_AUTOSENSE)) != 0)
1055 break;
1056
1057 switch (ifa->ifa_addr->sa_family) {
1058 #ifdef INET
1059 case AF_INET:
1060 sc->protos |= (PROTO_IP | PROTO_ARP | PROTO_REVARP);
1061 if ((error = se_init(sc)) != 0)
1062 break;
1063 arp_ifinit(ifp, ifa);
1064 break;
1065 #endif
1066 #ifdef NETATALK
1067 case AF_APPLETALK:
1068 sc->protos |= (PROTO_AT | PROTO_AARP);
1069 if ((error = se_init(sc)) != 0)
1070 break;
1071 break;
1072 #endif
1073 default:
1074 error = se_init(sc);
1075 break;
1076 }
1077 break;
1078
1079
1080 case SIOCSIFFLAGS:
1081 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1082 break;
1083 /* XXX re-use ether_ioctl() */
1084 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1085 case IFF_RUNNING:
1086 /*
1087 * If interface is marked down and it is running, then
1088 * stop it.
1089 */
1090 se_stop(sc);
1091 mutex_enter(&sc->sc_iflock);
1092 ifp->if_flags &= ~IFF_RUNNING;
1093 se_disable(sc);
1094 mutex_exit(&sc->sc_iflock);
1095 break;
1096 case IFF_UP:
1097 /*
1098 * If interface is marked up and it is stopped, then
1099 * start it.
1100 */
1101 mutex_enter(&sc->sc_iflock);
1102 error = se_enable(sc);
1103 mutex_exit(&sc->sc_iflock);
1104 if (error)
1105 break;
1106 error = se_init(sc);
1107 break;
1108 default:
1109 /*
1110 * Reset the interface to pick up changes in any other
1111 * flags that affect hardware registers.
1112 */
1113 if (sc->sc_enabled)
1114 error = se_init(sc);
1115 break;
1116 }
1117 #ifdef SEDEBUG
1118 if (ifp->if_flags & IFF_DEBUG)
1119 sc->sc_debug = 1;
1120 else
1121 sc->sc_debug = 0;
1122 #endif
1123 break;
1124
1125 case SIOCADDMULTI:
1126 case SIOCDELMULTI:
1127 mutex_enter(&sc->sc_iflock);
1128 sa = sockaddr_dup(ifreq_getaddr(cmd, ifr), M_WAITOK);
1129 mutex_exit(&sc->sc_iflock);
1130 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1131 if (ifp->if_flags & IFF_RUNNING) {
1132 error = (cmd == SIOCADDMULTI) ?
1133 se_set_multi(sc, sa->sa_data) :
1134 se_remove_multi(sc, sa->sa_data);
1135 } else
1136 error = 0;
1137 }
1138 mutex_enter(&sc->sc_iflock);
1139 sockaddr_free(sa);
1140 mutex_exit(&sc->sc_iflock);
1141 break;
1142
1143 default:
1144
1145 error = ether_ioctl(ifp, cmd, data);
1146 break;
1147 }
1148
1149 return (error);
1150 }
1151
1152 /*
1153 * Enable the network interface.
1154 */
1155 int
1156 se_enable(struct se_softc *sc)
1157 {
1158 struct scsipi_periph *periph = sc->sc_periph;
1159 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1160 int error = 0;
1161
1162 if (sc->sc_enabled == 0) {
1163 if ((error = scsipi_adapter_addref(adapt)) == 0)
1164 sc->sc_enabled = 1;
1165 else
1166 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1167 }
1168 return (error);
1169 }
1170
1171 /*
1172 * Disable the network interface.
1173 */
1174 void
1175 se_disable(struct se_softc *sc)
1176 {
1177 struct scsipi_periph *periph = sc->sc_periph;
1178 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1179
1180 if (sc->sc_enabled != 0) {
1181 scsipi_adapter_delref(adapt);
1182 sc->sc_enabled = 0;
1183 }
1184 }
1185
1186 #define SEUNIT(z) (minor(z))
1187 /*
1188 * open the device.
1189 */
1190 int
1191 seopen(dev_t dev, int flag, int fmt, struct lwp *l)
1192 {
1193 int unit, error;
1194 struct se_softc *sc;
1195 struct scsipi_periph *periph;
1196 struct scsipi_adapter *adapt;
1197
1198 unit = SEUNIT(dev);
1199 sc = device_lookup_private(&se_cd, unit);
1200 if (sc == NULL)
1201 return (ENXIO);
1202
1203 periph = sc->sc_periph;
1204 adapt = periph->periph_channel->chan_adapter;
1205
1206 if ((error = scsipi_adapter_addref(adapt)) != 0)
1207 return (error);
1208
1209 SC_DEBUG(periph, SCSIPI_DB1,
1210 ("scopen: dev=0x%"PRIx64" (unit %d (of %d))\n", dev, unit,
1211 se_cd.cd_ndevs));
1212
1213 periph->periph_flags |= PERIPH_OPEN;
1214
1215 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
1216 return (0);
1217 }
1218
1219 /*
1220 * close the device.. only called if we are the LAST
1221 * occurrence of an open device
1222 */
1223 int
1224 seclose(dev_t dev, int flag, int fmt, struct lwp *l)
1225 {
1226 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1227 struct scsipi_periph *periph = sc->sc_periph;
1228 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1229
1230 SC_DEBUG(sc->sc_periph, SCSIPI_DB1, ("closing\n"));
1231
1232 scsipi_wait_drain(periph);
1233
1234 scsipi_adapter_delref(adapt);
1235 periph->periph_flags &= ~PERIPH_OPEN;
1236
1237 return (0);
1238 }
1239
1240 /*
1241 * Perform special action on behalf of the user
1242 * Only does generic scsi ioctls.
1243 */
1244 int
1245 seioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1246 {
1247 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1248
1249 return (scsipi_do_ioctl(sc->sc_periph, dev, cmd, addr, flag, l));
1250 }
1251