if_se.c revision 1.109 1 /* $NetBSD: if_se.c,v 1.109 2020/07/22 17:17:36 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Ian W. Dall <ian.dall (at) dsto.defence.gov.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Ian W. Dall.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Driver for Cabletron EA41x scsi ethernet adaptor.
35 *
36 * Written by Ian Dall <ian.dall (at) dsto.defence.gov.au> Feb 3, 1997
37 *
38 * Acknowledgement: Thanks are due to Philip L. Budne <budd (at) cs.bu.edu>
39 * who reverse engineered the EA41x. In developing this code,
40 * Phil's userland daemon "etherd", was refered to extensively in lieu
41 * of accurate documentation for the device.
42 *
43 * This is a weird device! It doesn't conform to the scsi spec in much
44 * at all. About the only standard command supported is inquiry. Most
45 * commands are 6 bytes long, but the recv data is only 1 byte. Data
46 * must be received by periodically polling the device with the recv
47 * command.
48 *
49 * This driver is also a bit unusual. It must look like a network
50 * interface and it must also appear to be a scsi device to the scsi
51 * system. Hence there are cases where there are two entry points. eg
52 * sedone is to be called from the scsi subsytem and se_ifstart from
53 * the network interface subsystem. In addition, to facilitate scsi
54 * commands issued by userland programs, there are open, close and
55 * ioctl entry points. This allows a user program to, for example,
56 * display the ea41x stats and download new code into the adaptor ---
57 * functions which can't be performed through the ifconfig interface.
58 * Normal operation does not require any special userland program.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: if_se.c,v 1.109 2020/07/22 17:17:36 riastradh Exp $");
63
64 #ifdef _KERNEL_OPT
65 #include "opt_inet.h"
66 #include "opt_net_mpsafe.h"
67 #include "opt_atalk.h"
68 #endif
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/syslog.h>
74 #include <sys/kernel.h>
75 #include <sys/file.h>
76 #include <sys/stat.h>
77 #include <sys/ioctl.h>
78 #include <sys/buf.h>
79 #include <sys/uio.h>
80 #include <sys/malloc.h>
81 #include <sys/errno.h>
82 #include <sys/device.h>
83 #include <sys/disklabel.h>
84 #include <sys/disk.h>
85 #include <sys/proc.h>
86 #include <sys/conf.h>
87 #include <sys/mutex.h>
88 #include <sys/workqueue.h>
89
90 #include <dev/scsipi/scsipi_all.h>
91 #include <dev/scsipi/scsi_ctron_ether.h>
92 #include <dev/scsipi/scsiconf.h>
93
94 #include <sys/mbuf.h>
95
96 #include <sys/socket.h>
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_ether.h>
100 #include <net/if_media.h>
101 #include <net/bpf.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/if_inarp.h>
106 #endif
107
108
109 #ifdef NETATALK
110 #include <netatalk/at.h>
111 #endif
112
113
114 #define SETIMEOUT 1000
115 #define SEOUTSTANDING 4
116 #define SERETRIES 4
117 #define SE_PREFIX 4
118 #define ETHER_CRC 4
119 #define SEMINSIZE 60
120
121 /* Make this big enough for an ETHERMTU packet in promiscuous mode. */
122 #define MAX_SNAP (ETHERMTU + sizeof(struct ether_header) + \
123 SE_PREFIX + ETHER_CRC)
124
125 /* 10 full length packets appears to be the max ever returned. 16k is OK */
126 #define RBUF_LEN (16 * 1024)
127
128 /* Tuning parameters:
129 * The EA41x only returns a maximum of 10 packets (regardless of size).
130 * We will attempt to adapt to polling fast enough to get RDATA_GOAL packets
131 * per read
132 */
133 #define RDATA_MAX 10
134 #define RDATA_GOAL 8
135
136 /* se_poll and se_poll0 are the normal polling rate and the minimum
137 * polling rate respectively. se_poll0 should be chosen so that at
138 * maximum ethernet speed, we will read nearly RDATA_MAX packets. se_poll
139 * should be chosen for reasonable maximum latency.
140 * In practice, if we are being saturated with min length packets, we
141 * can't poll fast enough. Polling with zero delay actually
142 * worsens performance. se_poll0 is enforced to be always at least 1
143 */
144 #define SE_POLL 40 /* default in milliseconds */
145 #define SE_POLL0 10 /* default in milliseconds */
146 int se_poll = 0; /* Delay in ticks set at attach time */
147 int se_poll0 = 0;
148 #ifdef SE_DEBUG
149 int se_max_received = 0; /* Instrumentation */
150 #endif
151
152 #define PROTOCMD(p, d) \
153 ((d) = (p))
154
155 #define PROTOCMD_DECL(name) \
156 static const struct scsi_ctron_ether_generic name
157
158 #define PROTOCMD_DECL_SPECIAL(name) \
159 static const struct __CONCAT(scsi_, name) name
160
161 /* Command initializers for commands using scsi_ctron_ether_generic */
162 PROTOCMD_DECL(ctron_ether_send) = {CTRON_ETHER_SEND, 0, {0,0}, 0};
163 PROTOCMD_DECL(ctron_ether_add_proto) = {CTRON_ETHER_ADD_PROTO, 0, {0,0}, 0};
164 PROTOCMD_DECL(ctron_ether_get_addr) = {CTRON_ETHER_GET_ADDR, 0, {0,0}, 0};
165 PROTOCMD_DECL(ctron_ether_set_media) = {CTRON_ETHER_SET_MEDIA, 0, {0,0}, 0};
166 PROTOCMD_DECL(ctron_ether_set_addr) = {CTRON_ETHER_SET_ADDR, 0, {0,0}, 0};
167 PROTOCMD_DECL(ctron_ether_set_multi) = {CTRON_ETHER_SET_MULTI, 0, {0,0}, 0};
168 PROTOCMD_DECL(ctron_ether_remove_multi) =
169 {CTRON_ETHER_REMOVE_MULTI, 0, {0,0}, 0};
170
171 /* Command initializers for commands using their own structures */
172 PROTOCMD_DECL_SPECIAL(ctron_ether_recv) = {CTRON_ETHER_RECV};
173 PROTOCMD_DECL_SPECIAL(ctron_ether_set_mode) =
174 {CTRON_ETHER_SET_MODE, 0, {0,0}, 0};
175
176 struct se_softc {
177 device_t sc_dev;
178 struct ethercom sc_ethercom; /* Ethernet common part */
179 struct scsipi_periph *sc_periph;/* contains our targ, lun, etc. */
180
181 struct callout sc_recv_ch;
182 struct kmutex sc_iflock;
183 struct if_percpuq *sc_ipq;
184 struct workqueue *sc_recv_wq, *sc_send_wq;
185 struct work sc_recv_work, sc_send_work;
186 int sc_recv_work_pending, sc_send_work_pending;
187
188 char *sc_tbuf;
189 char *sc_rbuf;
190 int protos;
191 #define PROTO_IP 0x01
192 #define PROTO_ARP 0x02
193 #define PROTO_REVARP 0x04
194 #define PROTO_AT 0x08
195 #define PROTO_AARP 0x10
196 int sc_debug;
197 int sc_flags;
198 int sc_last_timeout;
199 int sc_enabled;
200 int sc_attach_state;
201 };
202
203 static int sematch(device_t, cfdata_t, void *);
204 static void seattach(device_t, device_t, void *);
205 static int sedetach(device_t, int);
206
207 static void se_ifstart(struct ifnet *);
208
209 static void sedone(struct scsipi_xfer *, int);
210 static int se_ioctl(struct ifnet *, u_long, void *);
211 static void sewatchdog(struct ifnet *);
212
213 #if 0
214 static inline uint16_t ether_cmp(void *, void *);
215 #endif
216 static void se_recv_callout(void *);
217 static void se_recv_worker(struct work *wk, void *cookie);
218 static void se_recv(struct se_softc *);
219 static struct mbuf *se_get(struct se_softc *, char *, int);
220 static int se_read(struct se_softc *, char *, int);
221 static void se_reset(struct se_softc *);
222 static int se_add_proto(struct se_softc *, int);
223 static int se_get_addr(struct se_softc *, uint8_t *);
224 static int se_set_media(struct se_softc *, int);
225 static int se_init(struct se_softc *);
226 static int se_set_multi(struct se_softc *, uint8_t *);
227 static int se_remove_multi(struct se_softc *, uint8_t *);
228 #if 0
229 static int sc_set_all_multi(struct se_softc *, int);
230 #endif
231 static void se_stop(struct se_softc *);
232 static inline int se_scsipi_cmd(struct scsipi_periph *periph,
233 struct scsipi_generic *scsipi_cmd,
234 int cmdlen, u_char *data_addr, int datalen,
235 int retries, int timeout, struct buf *bp,
236 int flags);
237 static void se_send_worker(struct work *wk, void *cookie);
238 static int se_set_mode(struct se_softc *, int, int);
239
240 int se_enable(struct se_softc *);
241 void se_disable(struct se_softc *);
242
243 CFATTACH_DECL_NEW(se, sizeof(struct se_softc),
244 sematch, seattach, sedetach, NULL);
245
246 extern struct cfdriver se_cd;
247
248 dev_type_open(seopen);
249 dev_type_close(seclose);
250 dev_type_ioctl(seioctl);
251
252 const struct cdevsw se_cdevsw = {
253 .d_open = seopen,
254 .d_close = seclose,
255 .d_read = noread,
256 .d_write = nowrite,
257 .d_ioctl = seioctl,
258 .d_stop = nostop,
259 .d_tty = notty,
260 .d_poll = nopoll,
261 .d_mmap = nommap,
262 .d_kqfilter = nokqfilter,
263 .d_discard = nodiscard,
264 .d_flag = D_OTHER | D_MPSAFE
265 };
266
267 const struct scsipi_periphsw se_switch = {
268 NULL, /* Use default error handler */
269 NULL, /* have no queue */
270 NULL, /* have no async handler */
271 sedone, /* deal with send/recv completion */
272 };
273
274 const struct scsipi_inquiry_pattern se_patterns[] = {
275 {T_PROCESSOR, T_FIXED,
276 "CABLETRN", "EA412", ""},
277 {T_PROCESSOR, T_FIXED,
278 "Cabletrn", "EA412", ""},
279 };
280
281 #if 0
282 /*
283 * Compare two Ether/802 addresses for equality, inlined and
284 * unrolled for speed.
285 * Note: use this like memcmp()
286 */
287 static inline uint16_t
288 ether_cmp(void *one, void *two)
289 {
290 uint16_t *a = (uint16_t *) one;
291 uint16_t *b = (uint16_t *) two;
292 uint16_t diff;
293
294 diff = (a[0] - b[0]) | (a[1] - b[1]) | (a[2] - b[2]);
295
296 return (diff);
297 }
298
299 #define ETHER_CMP ether_cmp
300 #endif
301
302 static int
303 sematch(device_t parent, cfdata_t match, void *aux)
304 {
305 struct scsipibus_attach_args *sa = aux;
306 int priority;
307
308 (void)scsipi_inqmatch(&sa->sa_inqbuf,
309 se_patterns, sizeof(se_patterns) / sizeof(se_patterns[0]),
310 sizeof(se_patterns[0]), &priority);
311 return (priority);
312 }
313
314 /*
315 * The routine called by the low level scsi routine when it discovers
316 * a device suitable for this driver.
317 */
318 static void
319 seattach(device_t parent, device_t self, void *aux)
320 {
321 struct se_softc *sc = device_private(self);
322 struct scsipibus_attach_args *sa = aux;
323 struct scsipi_periph *periph = sa->sa_periph;
324 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
325 uint8_t myaddr[ETHER_ADDR_LEN];
326 char wqname[MAXCOMLEN];
327 int rv;
328
329 sc->sc_dev = self;
330
331 printf("\n");
332 SC_DEBUG(periph, SCSIPI_DB2, ("seattach: "));
333
334 sc->sc_attach_state = 0;
335 callout_init(&sc->sc_recv_ch, CALLOUT_MPSAFE);
336 mutex_init(&sc->sc_iflock, MUTEX_DEFAULT, IPL_SOFTNET);
337
338 /*
339 * Store information needed to contact our base driver
340 */
341 sc->sc_periph = periph;
342 periph->periph_dev = sc->sc_dev;
343 periph->periph_switch = &se_switch;
344
345 se_poll = (SE_POLL * hz) / 1000;
346 se_poll = se_poll? se_poll: 1;
347 se_poll0 = (SE_POLL0 * hz) / 1000;
348 se_poll0 = se_poll0? se_poll0: 1;
349
350 /*
351 * Initialize and attach send and receive buffers
352 */
353 sc->sc_tbuf = malloc(ETHERMTU + sizeof(struct ether_header),
354 M_DEVBUF, M_WAITOK);
355 sc->sc_rbuf = malloc(RBUF_LEN, M_DEVBUF, M_WAITOK);
356
357 /* Initialize ifnet structure. */
358 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
359 ifp->if_softc = sc;
360 ifp->if_start = se_ifstart;
361 ifp->if_ioctl = se_ioctl;
362 ifp->if_watchdog = sewatchdog;
363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
364 ifp->if_extflags = IFEF_MPSAFE;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 se_get_addr(sc, myaddr);
368 sc->sc_attach_state = 1;
369
370 /* Attach the interface. */
371 rv = if_initialize(ifp);
372 if (rv != 0) {
373 sedetach(sc->sc_dev, 0);
374 return; /* Error */
375 }
376
377 snprintf(wqname, sizeof(wqname), "%sRx", device_xname(sc->sc_dev));
378 rv = workqueue_create(&sc->sc_recv_wq, wqname, se_recv_worker, sc,
379 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
380 if (rv != 0) {
381 aprint_error_dev(sc->sc_dev,
382 "unable to create recv Rx workqueue\n");
383 sedetach(sc->sc_dev, 0);
384 return; /* Error */
385 }
386 sc->sc_recv_work_pending = false;
387 sc->sc_attach_state = 2;
388
389 snprintf(wqname, sizeof(wqname), "%sTx", device_xname(sc->sc_dev));
390 rv = workqueue_create(&sc->sc_send_wq, wqname, se_send_worker, ifp,
391 PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
392 if (rv != 0) {
393 aprint_error_dev(sc->sc_dev,
394 "unable to create send Tx workqueue\n");
395 sedetach(sc->sc_dev, 0);
396 return; /* Error */
397 }
398 sc->sc_send_work_pending = false;
399 sc->sc_attach_state = 3;
400
401 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
402 ether_ifattach(ifp, myaddr);
403 if_register(ifp);
404 sc->sc_attach_state = 4;
405 }
406
407 static int
408 sedetach(device_t self, int flags)
409 {
410 struct se_softc *sc = device_private(self);
411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
412
413 switch(sc->sc_attach_state) {
414 case 4:
415 se_stop(sc);
416 mutex_enter(&sc->sc_iflock);
417 ifp->if_flags &= ~IFF_RUNNING;
418 se_disable(sc);
419 callout_halt(&sc->sc_recv_ch, NULL);
420 ether_ifdetach(ifp);
421 if_detach(ifp);
422 mutex_exit(&sc->sc_iflock);
423 if_percpuq_destroy(sc->sc_ipq);
424 /*FALLTHROUGH*/
425 case 3:
426 workqueue_destroy(sc->sc_send_wq);
427 /*FALLTHROUGH*/
428 case 2:
429 workqueue_destroy(sc->sc_recv_wq);
430 /*FALLTHROUGH*/
431 case 1:
432 free(sc->sc_rbuf, M_DEVBUF);
433 free(sc->sc_tbuf, M_DEVBUF);
434 callout_destroy(&sc->sc_recv_ch);
435 mutex_destroy(&sc->sc_iflock);
436 break;
437 default:
438 aprint_error_dev(sc->sc_dev, "detach failed (state %d)\n",
439 sc->sc_attach_state);
440 return 1;
441 break;
442 }
443 return 0;
444 }
445
446 /*
447 * Send a command to the device
448 */
449 static inline int
450 se_scsipi_cmd(struct scsipi_periph *periph, struct scsipi_generic *cmd,
451 int cmdlen, u_char *data_addr, int datalen, int retries, int timeout,
452 struct buf *bp, int flags)
453 {
454 int error;
455
456 error = scsipi_command(periph, cmd, cmdlen, data_addr,
457 datalen, retries, timeout, bp, flags);
458 return (error);
459 }
460
461 /*
462 * Start routine for calling from network sub system
463 */
464 static void
465 se_ifstart(struct ifnet *ifp)
466 {
467 struct se_softc *sc = ifp->if_softc;
468 int i = 100;
469
470 mutex_enter(&sc->sc_iflock);
471 while (i && sc->sc_send_work_pending == true) {
472 i--;
473 delay(10);
474 }
475 if (i) {
476 sc->sc_send_work_pending = true;
477 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
478 } else
479 if_statinc(ifp, if_oerrors);
480 mutex_exit(&sc->sc_iflock);
481 }
482
483 /*
484 * Invoke the transmit workqueue and transmission on the interface.
485 */
486 static void
487 se_send_worker(struct work *wk, void *cookie)
488 {
489 struct ifnet *ifp = cookie;
490 struct se_softc *sc = ifp->if_softc;
491 struct scsi_ctron_ether_generic send_cmd;
492 struct mbuf *m, *m0;
493 int len, error;
494 u_char *cp;
495
496 mutex_enter(&sc->sc_iflock);
497 sc->sc_send_work_pending = false;
498 mutex_exit(&sc->sc_iflock);
499
500 KASSERT(if_is_mpsafe(ifp));
501
502 /* Don't transmit if interface is busy or not running */
503 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
504 return;
505
506 while (1) {
507 IFQ_DEQUEUE(&ifp->if_snd, m0);
508 if (m0 == 0)
509 break;
510
511 /* If BPF is listening on this interface, let it see the
512 * packet before we commit it to the wire.
513 */
514 bpf_mtap(ifp, m0, BPF_D_OUT);
515
516 /* We need to use m->m_pkthdr.len, so require the header */
517 if ((m0->m_flags & M_PKTHDR) == 0)
518 panic("ctscstart: no header mbuf");
519 len = m0->m_pkthdr.len;
520
521 /* Mark the interface busy. */
522 ifp->if_flags |= IFF_OACTIVE;
523
524 /* Chain; copy into linear buffer allocated at attach time. */
525 cp = sc->sc_tbuf;
526 for (m = m0; m != NULL; ) {
527 memcpy(cp, mtod(m, u_char *), m->m_len);
528 cp += m->m_len;
529 m = m0 = m_free(m);
530 }
531 if (len < SEMINSIZE) {
532 #ifdef SEDEBUG
533 if (sc->sc_debug)
534 printf("se: packet size %d (%zu) < %d\n", len,
535 cp - (u_char *)sc->sc_tbuf, SEMINSIZE);
536 #endif
537 memset(cp, 0, SEMINSIZE - len);
538 len = SEMINSIZE;
539 }
540
541 /* Fill out SCSI command. */
542 PROTOCMD(ctron_ether_send, send_cmd);
543 _lto2b(len, send_cmd.length);
544
545 /* Send command to device. */
546 error = se_scsipi_cmd(sc->sc_periph,
547 (void *)&send_cmd, sizeof(send_cmd),
548 sc->sc_tbuf, len, SERETRIES,
549 SETIMEOUT, NULL, XS_CTL_NOSLEEP | XS_CTL_DATA_OUT);
550 if (error) {
551 aprint_error_dev(sc->sc_dev,
552 "not queued, error %d\n", error);
553 if_statinc(ifp, if_oerrors);
554 ifp->if_flags &= ~IFF_OACTIVE;
555 } else
556 if_statinc(ifp, if_opackets);
557 }
558 }
559
560
561 /*
562 * Called from the scsibus layer via our scsi device switch.
563 */
564 static void
565 sedone(struct scsipi_xfer *xs, int error)
566 {
567 struct se_softc *sc = device_private(xs->xs_periph->periph_dev);
568 struct scsipi_generic *cmd = xs->cmd;
569 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
570
571 if (IS_SEND(cmd)) {
572 ifp->if_flags &= ~IFF_OACTIVE;
573 } else if (IS_RECV(cmd)) {
574 /* RECV complete */
575 /* pass data up. reschedule a recv */
576 /* scsipi_free_xs will call start. Harmless. */
577 if (error) {
578 /* Reschedule after a delay */
579 callout_reset(&sc->sc_recv_ch, se_poll,
580 se_recv_callout, (void *)sc);
581 } else {
582 int n, ntimeo;
583 n = se_read(sc, xs->data, xs->datalen - xs->resid);
584 #ifdef SE_DEBUG
585 if (n > se_max_received)
586 se_max_received = n;
587 #endif
588 if (n == 0)
589 ntimeo = se_poll;
590 else if (n >= RDATA_MAX)
591 ntimeo = se_poll0;
592 else {
593 ntimeo = sc->sc_last_timeout;
594 ntimeo = (ntimeo * RDATA_GOAL)/n;
595 ntimeo = (ntimeo < se_poll0?
596 se_poll0: ntimeo);
597 ntimeo = (ntimeo > se_poll?
598 se_poll: ntimeo);
599 }
600 sc->sc_last_timeout = ntimeo;
601 callout_reset(&sc->sc_recv_ch, ntimeo,
602 se_recv_callout, (void *)sc);
603 }
604 }
605 }
606
607 /*
608 * Setup a receive command by queuing the work.
609 * Usually called from a callout, but also from se_init().
610 */
611 static void
612 se_recv_callout(void *v)
613 {
614 /* do a recv command */
615 struct se_softc *sc = (struct se_softc *) v;
616
617 if (sc->sc_enabled == 0)
618 return;
619
620 mutex_enter(&sc->sc_iflock);
621 if (sc->sc_recv_work_pending == true) {
622 callout_reset(&sc->sc_recv_ch, se_poll,
623 se_recv_callout, (void *)sc);
624 return;
625 }
626
627 sc->sc_recv_work_pending = true;
628 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
629 mutex_exit(&sc->sc_iflock);
630 }
631
632 /*
633 * Invoke the receive workqueue
634 */
635 static void
636 se_recv_worker(struct work *wk, void *cookie)
637 {
638 struct se_softc *sc = (struct se_softc *) cookie;
639
640 mutex_enter(&sc->sc_iflock);
641 sc->sc_recv_work_pending = false;
642 mutex_exit(&sc->sc_iflock);
643 se_recv(sc);
644
645 }
646
647 /*
648 * Do the actual work of receiving data.
649 */
650 static void
651 se_recv(struct se_softc *sc)
652 {
653 struct scsi_ctron_ether_recv recv_cmd;
654 int error;
655
656 /* do a recv command */
657 PROTOCMD(ctron_ether_recv, recv_cmd);
658
659 error = se_scsipi_cmd(sc->sc_periph,
660 (void *)&recv_cmd, sizeof(recv_cmd),
661 sc->sc_rbuf, RBUF_LEN, SERETRIES, SETIMEOUT, NULL,
662 XS_CTL_NOSLEEP | XS_CTL_DATA_IN);
663 if (error)
664 callout_reset(&sc->sc_recv_ch, se_poll,
665 se_recv_callout, (void *)sc);
666 }
667
668 /*
669 * We copy the data into mbufs. When full cluster sized units are present
670 * we copy into clusters.
671 */
672 static struct mbuf *
673 se_get(struct se_softc *sc, char *data, int totlen)
674 {
675 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
676 struct mbuf *m, *m0, *newm;
677 int len;
678
679 MGETHDR(m0, M_DONTWAIT, MT_DATA);
680 if (m0 == 0)
681 return (0);
682 m_set_rcvif(m0, ifp);
683 m0->m_pkthdr.len = totlen;
684 len = MHLEN;
685 m = m0;
686
687 while (totlen > 0) {
688 if (totlen >= MINCLSIZE) {
689 MCLGET(m, M_DONTWAIT);
690 if ((m->m_flags & M_EXT) == 0)
691 goto bad;
692 len = MCLBYTES;
693 }
694
695 if (m == m0) {
696 char *newdata = (char *)
697 ALIGN(m->m_data + sizeof(struct ether_header)) -
698 sizeof(struct ether_header);
699 len -= newdata - m->m_data;
700 m->m_data = newdata;
701 }
702
703 m->m_len = len = uimin(totlen, len);
704 memcpy(mtod(m, void *), data, len);
705 data += len;
706
707 totlen -= len;
708 if (totlen > 0) {
709 MGET(newm, M_DONTWAIT, MT_DATA);
710 if (newm == 0)
711 goto bad;
712 len = MLEN;
713 m = m->m_next = newm;
714 }
715 }
716
717 return (m0);
718
719 bad:
720 m_freem(m0);
721 return (0);
722 }
723
724 /*
725 * Pass packets to higher levels.
726 */
727 static int
728 se_read(struct se_softc *sc, char *data, int datalen)
729 {
730 struct mbuf *m;
731 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
732 int n;
733
734 n = 0;
735 while (datalen >= 2) {
736 int len = _2btol(data);
737 data += 2;
738 datalen -= 2;
739
740 if (len == 0)
741 break;
742 #ifdef SEDEBUG
743 if (sc->sc_debug) {
744 printf("se_read: datalen = %d, packetlen = %d, proto = 0x%04x\n", datalen, len,
745 ntohs(((struct ether_header *)data)->ether_type));
746 }
747 #endif
748 if (len <= sizeof(struct ether_header) ||
749 len > MAX_SNAP) {
750 #ifdef SEDEBUG
751 printf("%s: invalid packet size %d; dropping\n",
752 device_xname(sc->sc_dev), len);
753 #endif
754 if_statinc(ifp, if_ierrors);
755 goto next_packet;
756 }
757
758 /* Don't need crc. Must keep ether header for BPF */
759 m = se_get(sc, data, len - ETHER_CRC);
760 if (m == 0) {
761 #ifdef SEDEBUG
762 if (sc->sc_debug)
763 printf("se_read: se_get returned null\n");
764 #endif
765 if_statinc(ifp, if_ierrors);
766 goto next_packet;
767 }
768 if ((ifp->if_flags & IFF_PROMISC) != 0) {
769 m_adj(m, SE_PREFIX);
770 }
771
772 /* Pass the packet up. */
773 if_percpuq_enqueue(sc->sc_ipq, m);
774
775 next_packet:
776 data += len;
777 datalen -= len;
778 n++;
779 }
780 return (n);
781 }
782
783
784 static void
785 sewatchdog(struct ifnet *ifp)
786 {
787 struct se_softc *sc = ifp->if_softc;
788
789 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
790 if_statinc(ifp, if_oerrors);
791
792 se_reset(sc);
793 }
794
795 static void
796 se_reset(struct se_softc *sc)
797 {
798 #if 0
799 /* Maybe we don't *really* want to reset the entire bus
800 * because the ctron isn't working. We would like to send a
801 * "BUS DEVICE RESET" message, but don't think the ctron
802 * understands it.
803 */
804 se_scsipi_cmd(sc->sc_periph, 0, 0, 0, 0, SERETRIES, 2000, NULL,
805 XS_CTL_RESET);
806 #endif
807 se_init(sc);
808 }
809
810 static int
811 se_add_proto(struct se_softc *sc, int proto)
812 {
813 int error;
814 struct scsi_ctron_ether_generic add_proto_cmd;
815 uint8_t data[2];
816 _lto2b(proto, data);
817 #ifdef SEDEBUG
818 if (sc->sc_debug)
819 printf("se: adding proto 0x%02x%02x\n", data[0], data[1]);
820 #endif
821
822 PROTOCMD(ctron_ether_add_proto, add_proto_cmd);
823 _lto2b(sizeof(data), add_proto_cmd.length);
824 error = se_scsipi_cmd(sc->sc_periph,
825 (void *)&add_proto_cmd, sizeof(add_proto_cmd),
826 data, sizeof(data), SERETRIES, SETIMEOUT, NULL,
827 XS_CTL_DATA_OUT);
828 return (error);
829 }
830
831 static int
832 se_get_addr(struct se_softc *sc, uint8_t *myaddr)
833 {
834 int error;
835 struct scsi_ctron_ether_generic get_addr_cmd;
836
837 PROTOCMD(ctron_ether_get_addr, get_addr_cmd);
838 _lto2b(ETHER_ADDR_LEN, get_addr_cmd.length);
839 error = se_scsipi_cmd(sc->sc_periph,
840 (void *)&get_addr_cmd, sizeof(get_addr_cmd),
841 myaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
842 XS_CTL_DATA_IN);
843 printf("%s: ethernet address %s\n", device_xname(sc->sc_dev),
844 ether_sprintf(myaddr));
845 return (error);
846 }
847
848
849 static int
850 se_set_media(struct se_softc *sc, int type)
851 {
852 int error;
853 struct scsi_ctron_ether_generic set_media_cmd;
854
855 PROTOCMD(ctron_ether_set_media, set_media_cmd);
856 set_media_cmd.byte3 = type;
857 error = se_scsipi_cmd(sc->sc_periph,
858 (void *)&set_media_cmd, sizeof(set_media_cmd),
859 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
860 return (error);
861 }
862
863 static int
864 se_set_mode(struct se_softc *sc, int len, int mode)
865 {
866 int error;
867 struct scsi_ctron_ether_set_mode set_mode_cmd;
868
869 PROTOCMD(ctron_ether_set_mode, set_mode_cmd);
870 set_mode_cmd.mode = mode;
871 _lto2b(len, set_mode_cmd.length);
872 error = se_scsipi_cmd(sc->sc_periph,
873 (void *)&set_mode_cmd, sizeof(set_mode_cmd),
874 0, 0, SERETRIES, SETIMEOUT, NULL, 0);
875 return (error);
876 }
877
878
879 static int
880 se_init(struct se_softc *sc)
881 {
882 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
883 struct scsi_ctron_ether_generic set_addr_cmd;
884 uint8_t enaddr[ETHER_ADDR_LEN];
885 int error;
886
887 if (ifp->if_flags & IFF_PROMISC) {
888 error = se_set_mode(sc, MAX_SNAP, 1);
889 }
890 else
891 error = se_set_mode(sc, ETHERMTU + sizeof(struct ether_header),
892 0);
893 if (error != 0)
894 return (error);
895
896 PROTOCMD(ctron_ether_set_addr, set_addr_cmd);
897 _lto2b(ETHER_ADDR_LEN, set_addr_cmd.length);
898 memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
899 error = se_scsipi_cmd(sc->sc_periph,
900 (void *)&set_addr_cmd, sizeof(set_addr_cmd),
901 enaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
902 XS_CTL_DATA_OUT);
903 if (error != 0)
904 return (error);
905
906 if ((sc->protos & PROTO_IP) &&
907 (error = se_add_proto(sc, ETHERTYPE_IP)) != 0)
908 return (error);
909 if ((sc->protos & PROTO_ARP) &&
910 (error = se_add_proto(sc, ETHERTYPE_ARP)) != 0)
911 return (error);
912 if ((sc->protos & PROTO_REVARP) &&
913 (error = se_add_proto(sc, ETHERTYPE_REVARP)) != 0)
914 return (error);
915 #ifdef NETATALK
916 if ((sc->protos & PROTO_AT) &&
917 (error = se_add_proto(sc, ETHERTYPE_ATALK)) != 0)
918 return (error);
919 if ((sc->protos & PROTO_AARP) &&
920 (error = se_add_proto(sc, ETHERTYPE_AARP)) != 0)
921 return (error);
922 #endif
923
924 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == IFF_UP) {
925 ifp->if_flags |= IFF_RUNNING;
926 mutex_enter(&sc->sc_iflock);
927 sc->sc_recv_work_pending = true;
928 workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
929 mutex_exit(&sc->sc_iflock);
930 ifp->if_flags &= ~IFF_OACTIVE;
931 mutex_enter(&sc->sc_iflock);
932 workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
933 mutex_exit(&sc->sc_iflock);
934 }
935 return (error);
936 }
937
938 static int
939 se_set_multi(struct se_softc *sc, uint8_t *addr)
940 {
941 struct scsi_ctron_ether_generic set_multi_cmd;
942 int error;
943
944 if (sc->sc_debug)
945 printf("%s: set_set_multi: %s\n", device_xname(sc->sc_dev),
946 ether_sprintf(addr));
947
948 PROTOCMD(ctron_ether_set_multi, set_multi_cmd);
949 _lto2b(ETHER_ADDR_LEN, set_multi_cmd.length);
950 error = se_scsipi_cmd(sc->sc_periph,
951 (void *)&set_multi_cmd, sizeof(set_multi_cmd),
952 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
953 return (error);
954 }
955
956 static int
957 se_remove_multi(struct se_softc *sc, uint8_t *addr)
958 {
959 struct scsi_ctron_ether_generic remove_multi_cmd;
960 int error;
961
962 if (sc->sc_debug)
963 printf("%s: se_remove_multi: %s\n", device_xname(sc->sc_dev),
964 ether_sprintf(addr));
965
966 PROTOCMD(ctron_ether_remove_multi, remove_multi_cmd);
967 _lto2b(ETHER_ADDR_LEN, remove_multi_cmd.length);
968 error = se_scsipi_cmd(sc->sc_periph,
969 (void *)&remove_multi_cmd, sizeof(remove_multi_cmd),
970 addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
971 return (error);
972 }
973
974 #if 0 /* not used --thorpej */
975 static int
976 sc_set_all_multi(struct se_softc *sc, int set)
977 {
978 int error = 0;
979 uint8_t *addr;
980 struct ethercom *ec = &sc->sc_ethercom;
981 struct ether_multi *enm;
982 struct ether_multistep step;
983
984 ETHER_LOCK(ec);
985 ETHER_FIRST_MULTI(step, ec, enm);
986 while (enm != NULL) {
987 if (ETHER_CMP(enm->enm_addrlo, enm->enm_addrhi)) {
988 /*
989 * We must listen to a range of multicast addresses.
990 * For now, just accept all multicasts, rather than
991 * trying to set only those filter bits needed to match
992 * the range. (At this time, the only use of address
993 * ranges is for IP multicast routing, for which the
994 * range is big enough to require all bits set.)
995 */
996 /* We have no way of adding a range to this device.
997 * stepping through all addresses in the range is
998 * typically not possible. The only real alternative
999 * is to go into promicuous mode and filter by hand.
1000 */
1001 ETHER_UNLOCK(ec);
1002 return (ENODEV);
1003
1004 }
1005
1006 addr = enm->enm_addrlo;
1007 if ((error = set ? se_set_multi(sc, addr) :
1008 se_remove_multi(sc, addr)) != 0)
1009 return (error);
1010 ETHER_NEXT_MULTI(step, enm);
1011 }
1012 ETHER_UNLOCK(ec);
1013
1014 return (error);
1015 }
1016 #endif /* not used */
1017
1018 static void
1019 se_stop(struct se_softc *sc)
1020 {
1021
1022 /* Don't schedule any reads */
1023 callout_stop(&sc->sc_recv_ch);
1024
1025 /* Wait for the workqueues to finish */
1026 mutex_enter(&sc->sc_iflock);
1027 workqueue_wait(sc->sc_recv_wq, &sc->sc_recv_work);
1028 workqueue_wait(sc->sc_send_wq, &sc->sc_send_work);
1029 mutex_exit(&sc->sc_iflock);
1030
1031 /* Abort any scsi cmds in progress */
1032 mutex_enter(chan_mtx(sc->sc_periph->periph_channel));
1033 scsipi_kill_pending(sc->sc_periph);
1034 mutex_exit(chan_mtx(sc->sc_periph->periph_channel));
1035 }
1036
1037
1038 /*
1039 * Process an ioctl request.
1040 */
1041 static int
1042 se_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1043 {
1044 struct se_softc *sc = ifp->if_softc;
1045 struct ifaddr *ifa = (struct ifaddr *)data;
1046 struct ifreq *ifr = (struct ifreq *)data;
1047 struct sockaddr *sa;
1048 int error = 0;
1049
1050
1051 switch (cmd) {
1052
1053 case SIOCINITIFADDR:
1054 mutex_enter(&sc->sc_iflock);
1055 if ((error = se_enable(sc)) != 0)
1056 break;
1057 ifp->if_flags |= IFF_UP;
1058 mutex_exit(&sc->sc_iflock);
1059
1060 if ((error = se_set_media(sc, CMEDIA_AUTOSENSE)) != 0)
1061 break;
1062
1063 switch (ifa->ifa_addr->sa_family) {
1064 #ifdef INET
1065 case AF_INET:
1066 sc->protos |= (PROTO_IP | PROTO_ARP | PROTO_REVARP);
1067 if ((error = se_init(sc)) != 0)
1068 break;
1069 arp_ifinit(ifp, ifa);
1070 break;
1071 #endif
1072 #ifdef NETATALK
1073 case AF_APPLETALK:
1074 sc->protos |= (PROTO_AT | PROTO_AARP);
1075 if ((error = se_init(sc)) != 0)
1076 break;
1077 break;
1078 #endif
1079 default:
1080 error = se_init(sc);
1081 break;
1082 }
1083 break;
1084
1085
1086 case SIOCSIFFLAGS:
1087 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1088 break;
1089 /* XXX re-use ether_ioctl() */
1090 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1091 case IFF_RUNNING:
1092 /*
1093 * If interface is marked down and it is running, then
1094 * stop it.
1095 */
1096 se_stop(sc);
1097 mutex_enter(&sc->sc_iflock);
1098 ifp->if_flags &= ~IFF_RUNNING;
1099 se_disable(sc);
1100 mutex_exit(&sc->sc_iflock);
1101 break;
1102 case IFF_UP:
1103 /*
1104 * If interface is marked up and it is stopped, then
1105 * start it.
1106 */
1107 mutex_enter(&sc->sc_iflock);
1108 error = se_enable(sc);
1109 mutex_exit(&sc->sc_iflock);
1110 if (error)
1111 break;
1112 error = se_init(sc);
1113 break;
1114 default:
1115 /*
1116 * Reset the interface to pick up changes in any other
1117 * flags that affect hardware registers.
1118 */
1119 if (sc->sc_enabled)
1120 error = se_init(sc);
1121 break;
1122 }
1123 #ifdef SEDEBUG
1124 if (ifp->if_flags & IFF_DEBUG)
1125 sc->sc_debug = 1;
1126 else
1127 sc->sc_debug = 0;
1128 #endif
1129 break;
1130
1131 case SIOCADDMULTI:
1132 case SIOCDELMULTI:
1133 mutex_enter(&sc->sc_iflock);
1134 sa = sockaddr_dup(ifreq_getaddr(cmd, ifr), M_WAITOK);
1135 mutex_exit(&sc->sc_iflock);
1136 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1137 if (ifp->if_flags & IFF_RUNNING) {
1138 error = (cmd == SIOCADDMULTI) ?
1139 se_set_multi(sc, sa->sa_data) :
1140 se_remove_multi(sc, sa->sa_data);
1141 } else
1142 error = 0;
1143 }
1144 mutex_enter(&sc->sc_iflock);
1145 sockaddr_free(sa);
1146 mutex_exit(&sc->sc_iflock);
1147 break;
1148
1149 default:
1150
1151 error = ether_ioctl(ifp, cmd, data);
1152 break;
1153 }
1154
1155 return (error);
1156 }
1157
1158 /*
1159 * Enable the network interface.
1160 */
1161 int
1162 se_enable(struct se_softc *sc)
1163 {
1164 struct scsipi_periph *periph = sc->sc_periph;
1165 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1166 int error = 0;
1167
1168 if (sc->sc_enabled == 0) {
1169 if ((error = scsipi_adapter_addref(adapt)) == 0)
1170 sc->sc_enabled = 1;
1171 else
1172 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1173 }
1174 return (error);
1175 }
1176
1177 /*
1178 * Disable the network interface.
1179 */
1180 void
1181 se_disable(struct se_softc *sc)
1182 {
1183 struct scsipi_periph *periph = sc->sc_periph;
1184 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1185
1186 if (sc->sc_enabled != 0) {
1187 scsipi_adapter_delref(adapt);
1188 sc->sc_enabled = 0;
1189 }
1190 }
1191
1192 #define SEUNIT(z) (minor(z))
1193 /*
1194 * open the device.
1195 */
1196 int
1197 seopen(dev_t dev, int flag, int fmt, struct lwp *l)
1198 {
1199 int unit, error;
1200 struct se_softc *sc;
1201 struct scsipi_periph *periph;
1202 struct scsipi_adapter *adapt;
1203
1204 unit = SEUNIT(dev);
1205 sc = device_lookup_private(&se_cd, unit);
1206 if (sc == NULL)
1207 return (ENXIO);
1208
1209 periph = sc->sc_periph;
1210 adapt = periph->periph_channel->chan_adapter;
1211
1212 if ((error = scsipi_adapter_addref(adapt)) != 0)
1213 return (error);
1214
1215 SC_DEBUG(periph, SCSIPI_DB1,
1216 ("scopen: dev=0x%"PRIx64" (unit %d (of %d))\n", dev, unit,
1217 se_cd.cd_ndevs));
1218
1219 periph->periph_flags |= PERIPH_OPEN;
1220
1221 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
1222 return (0);
1223 }
1224
1225 /*
1226 * close the device.. only called if we are the LAST
1227 * occurence of an open device
1228 */
1229 int
1230 seclose(dev_t dev, int flag, int fmt, struct lwp *l)
1231 {
1232 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1233 struct scsipi_periph *periph = sc->sc_periph;
1234 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1235
1236 SC_DEBUG(sc->sc_periph, SCSIPI_DB1, ("closing\n"));
1237
1238 scsipi_wait_drain(periph);
1239
1240 scsipi_adapter_delref(adapt);
1241 periph->periph_flags &= ~PERIPH_OPEN;
1242
1243 return (0);
1244 }
1245
1246 /*
1247 * Perform special action on behalf of the user
1248 * Only does generic scsi ioctls.
1249 */
1250 int
1251 seioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1252 {
1253 struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1254
1255 return (scsipi_do_ioctl(sc->sc_periph, dev, cmd, addr, flag, l));
1256 }
1257