twe.c revision 1.39 1 /* $NetBSD: twe.c,v 1.39 2003/08/03 18:45:46 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66 */
67
68 /*
69 * Driver for the 3ware Escalade family of RAID controllers.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.39 2003/08/03 18:45:46 jdolecek Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/endian.h>
83 #include <sys/malloc.h>
84 #include <sys/conf.h>
85 #include <sys/disk.h>
86
87 #include <uvm/uvm_extern.h>
88
89 #include <machine/bswap.h>
90 #include <machine/bus.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95 #include <dev/pci/twereg.h>
96 #include <dev/pci/twevar.h>
97 #include <dev/pci/tweio.h>
98
99 #define PCI_CBIO 0x10
100
101 static void twe_aen_handler(struct twe_ccb *, int);
102 static void twe_attach(struct device *, struct device *, void *);
103 static int twe_init_connection(struct twe_softc *);
104 static int twe_intr(void *);
105 static int twe_match(struct device *, struct cfdata *, void *);
106 static int twe_param_get(struct twe_softc *, int, int, size_t,
107 void (*)(struct twe_ccb *, int), struct twe_param **);
108 static int twe_param_set(struct twe_softc *, int, int, size_t, void *);
109 static void twe_poll(struct twe_softc *);
110 static int twe_print(void *, const char *);
111 static int twe_reset(struct twe_softc *);
112 static int twe_submatch(struct device *, struct cfdata *, void *);
113 static int twe_status_check(struct twe_softc *, u_int);
114 static int twe_status_wait(struct twe_softc *, u_int, int);
115 static void twe_describe_controller(struct twe_softc *);
116
117 static inline u_int32_t twe_inl(struct twe_softc *, int);
118 static inline void twe_outl(struct twe_softc *, int, u_int32_t);
119
120 dev_type_open(tweopen);
121 dev_type_close(tweclose);
122 dev_type_ioctl(tweioctl);
123
124 const struct cdevsw twe_cdevsw = {
125 tweopen, tweclose, noread, nowrite, tweioctl,
126 nostop, notty, nopoll, nommap,
127 };
128
129 extern struct cfdriver twe_cd;
130
131 CFATTACH_DECL(twe, sizeof(struct twe_softc),
132 twe_match, twe_attach, NULL, NULL);
133
134 struct {
135 const u_int aen; /* High byte indicates type of message */
136 const char *desc;
137 } static const twe_aen_names[] = {
138 { 0x0000, "queue empty" },
139 { 0x0001, "soft reset" },
140 { 0x0102, "degraded mirror" },
141 { 0x0003, "controller error" },
142 { 0x0104, "rebuild fail" },
143 { 0x0105, "rebuild done" },
144 { 0x0106, "incompatible unit" },
145 { 0x0107, "initialisation done" },
146 { 0x0108, "unclean shutdown detected" },
147 { 0x0109, "drive timeout" },
148 { 0x010a, "drive error" },
149 { 0x010b, "rebuild started" },
150 { 0x010c, "init started" },
151 { 0x010d, "logical unit deleted" },
152 { 0x020f, "SMART threshold exceeded" },
153 { 0x0015, "table undefined" }, /* XXX: Not in FreeBSD's table */
154 { 0x0221, "ATA UDMA downgrade" },
155 { 0x0222, "ATA UDMA upgrade" },
156 { 0x0222, "ATA UDMA upgrade" },
157 { 0x0223, "Sector repair occurred" },
158 { 0x0024, "SBUF integrity check failure" },
159 { 0x0225, "lost cached write" },
160 { 0x0226, "drive ECC error detected" },
161 { 0x0227, "DCB checksum error" },
162 { 0x0228, "DCB unsupported version" },
163 { 0x0129, "verify started" },
164 { 0x012a, "verify failed" },
165 { 0x012b, "verify complete" },
166 { 0x022c, "overwrote bad sector during rebuild" },
167 { 0x022d, "encountered bad sector during rebuild" },
168 { 0x00ff, "aen queue full" },
169 };
170
171 /*
172 * The high byte of the message above determines the format,
173 * currently we know about format 0 (no unit/port specific)
174 * format 1 (unit specific message), and format 2 (port specific message).
175 */
176 static const char * const aenfmt[] = {
177 "", /* No message */
178 "unit %d: ", /* Unit message */
179 "port %d: " /* Port message */
180 };
181
182
183 static inline u_int32_t
184 twe_inl(struct twe_softc *sc, int off)
185 {
186
187 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
188 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
189 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
190 }
191
192 static inline void
193 twe_outl(struct twe_softc *sc, int off, u_int32_t val)
194 {
195
196 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
197 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
198 BUS_SPACE_BARRIER_WRITE);
199 }
200
201 /*
202 * Match a supported board.
203 */
204 static int
205 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
206 {
207 struct pci_attach_args *pa;
208
209 pa = aux;
210
211 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
212 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
213 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
214 }
215
216 /*
217 * Attach a supported board.
218 *
219 * XXX This doesn't fail gracefully.
220 */
221 static void
222 twe_attach(struct device *parent, struct device *self, void *aux)
223 {
224 struct pci_attach_args *pa;
225 struct twe_softc *sc;
226 pci_chipset_tag_t pc;
227 pci_intr_handle_t ih;
228 pcireg_t csr;
229 const char *intrstr;
230 int size, i, rv, rseg;
231 size_t max_segs, max_xfer;
232 struct twe_param *dtp, *ctp;
233 bus_dma_segment_t seg;
234 struct twe_cmd *tc;
235 struct twe_attach_args twea;
236 struct twe_ccb *ccb;
237
238 sc = (struct twe_softc *)self;
239 pa = aux;
240 pc = pa->pa_pc;
241 sc->sc_dmat = pa->pa_dmat;
242 SIMPLEQ_INIT(&sc->sc_ccb_queue);
243 SLIST_INIT(&sc->sc_ccb_freelist);
244
245 aprint_normal(": 3ware Escalade\n");
246
247 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT);
248 if (ccb == NULL) {
249 aprint_error("%s: unable to allocate memory for ccbs\n",
250 sc->sc_dv.dv_xname);
251 return;
252 }
253
254 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
255 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
256 aprint_error("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
257 return;
258 }
259
260 /* Enable the device. */
261 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
262 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
263 csr | PCI_COMMAND_MASTER_ENABLE);
264
265 /* Map and establish the interrupt. */
266 if (pci_intr_map(pa, &ih)) {
267 aprint_error("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
268 return;
269 }
270
271 intrstr = pci_intr_string(pc, ih);
272 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
273 if (sc->sc_ih == NULL) {
274 aprint_error("%s: can't establish interrupt%s%s\n",
275 sc->sc_dv.dv_xname,
276 (intrstr) ? " at " : "",
277 (intrstr) ? intrstr : "");
278 return;
279 }
280
281 if (intrstr != NULL)
282 aprint_normal("%s: interrupting at %s\n",
283 sc->sc_dv.dv_xname, intrstr);
284
285 /*
286 * Allocate and initialise the command blocks and CCBs.
287 */
288 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
289
290 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
291 &rseg, BUS_DMA_NOWAIT)) != 0) {
292 aprint_error("%s: unable to allocate commands, rv = %d\n",
293 sc->sc_dv.dv_xname, rv);
294 return;
295 }
296
297 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
298 (caddr_t *)&sc->sc_cmds,
299 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
300 aprint_error("%s: unable to map commands, rv = %d\n",
301 sc->sc_dv.dv_xname, rv);
302 return;
303 }
304
305 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
306 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
307 aprint_error("%s: unable to create command DMA map, rv = %d\n",
308 sc->sc_dv.dv_xname, rv);
309 return;
310 }
311
312 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
313 size, NULL, BUS_DMA_NOWAIT)) != 0) {
314 aprint_error("%s: unable to load command DMA map, rv = %d\n",
315 sc->sc_dv.dv_xname, rv);
316 return;
317 }
318
319 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
320 memset(sc->sc_cmds, 0, size);
321
322 sc->sc_ccbs = ccb;
323 tc = (struct twe_cmd *)sc->sc_cmds;
324 max_segs = twe_get_maxsegs();
325 max_xfer = twe_get_maxxfer(max_segs);
326
327 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
328 ccb->ccb_cmd = tc;
329 ccb->ccb_cmdid = i;
330 ccb->ccb_flags = 0;
331 rv = bus_dmamap_create(sc->sc_dmat, max_xfer,
332 max_segs, PAGE_SIZE, 0,
333 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
334 &ccb->ccb_dmamap_xfer);
335 if (rv != 0) {
336 aprint_error("%s: can't create dmamap, rv = %d\n",
337 sc->sc_dv.dv_xname, rv);
338 return;
339 }
340 /* Save one CCB for parameter retrieval. */
341 if (i != 0)
342 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
343 ccb_chain.slist);
344 }
345
346 /* Wait for the controller to become ready. */
347 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
348 aprint_error("%s: microcontroller not ready\n",
349 sc->sc_dv.dv_xname);
350 return;
351 }
352
353 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
354
355 /* Reset the controller. */
356 if (twe_reset(sc)) {
357 aprint_error("%s: reset failed\n", sc->sc_dv.dv_xname);
358 return;
359 }
360
361 /* Find attached units. */
362 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
363 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp);
364 if (rv != 0) {
365 aprint_error("%s: can't detect attached units (%d)\n",
366 sc->sc_dv.dv_xname, rv);
367 return;
368 }
369
370 /* For each detected unit, collect size and store in an array. */
371 for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) {
372 /* Unit present? */
373 if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) {
374 sc->sc_dsize[i] = 0;
375 continue;
376 }
377
378 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + i,
379 TWE_PARAM_UNITINFO_Capacity, 4, NULL, &ctp);
380 if (rv != 0) {
381 aprint_error("%s: error %d fetching capacity for unit %d\n",
382 sc->sc_dv.dv_xname, rv, i);
383 continue;
384 }
385
386 sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data);
387 free(ctp, M_DEVBUF);
388 sc->sc_nunits++;
389 }
390 free(dtp, M_DEVBUF);
391
392 /* Initialise connection with controller and enable interrupts. */
393 twe_init_connection(sc);
394 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
395 TWE_CTL_UNMASK_RESP_INTR |
396 TWE_CTL_ENABLE_INTRS);
397
398 twe_describe_controller(sc);
399
400 /* Attach sub-devices. */
401 for (i = 0; i < TWE_MAX_UNITS; i++) {
402 if (sc->sc_dsize[i] == 0)
403 continue;
404 twea.twea_unit = i;
405 config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch);
406 }
407 }
408
409 /*
410 * Reset the controller. Currently only useful at attach time; must be
411 * called with interrupts blocked.
412 */
413 static int
414 twe_reset(struct twe_softc *sc)
415 {
416 struct twe_param *tp;
417 u_int aen, status;
418 volatile u_int32_t junk;
419 int got, rv;
420
421 /* Issue a soft reset. */
422 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
423 TWE_CTL_CLEAR_HOST_INTR |
424 TWE_CTL_CLEAR_ATTN_INTR |
425 TWE_CTL_MASK_CMD_INTR |
426 TWE_CTL_MASK_RESP_INTR |
427 TWE_CTL_CLEAR_ERROR_STS |
428 TWE_CTL_DISABLE_INTRS);
429
430 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
431 printf("%s: no attention interrupt\n",
432 sc->sc_dv.dv_xname);
433 return (-1);
434 }
435
436 /* Pull AENs out of the controller; look for a soft reset AEN. */
437 for (got = 0;;) {
438 rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode,
439 2, NULL, &tp);
440 if (rv != 0)
441 printf("%s: error %d while draining response queue\n",
442 sc->sc_dv.dv_xname, rv);
443 aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data));
444 free(tp, M_DEVBUF);
445 if (aen == TWE_AEN_QUEUE_EMPTY)
446 break;
447 if (aen == TWE_AEN_SOFT_RESET)
448 got = 1;
449 }
450 if (!got) {
451 printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
452 return (-1);
453 }
454
455 /* Check controller status. */
456 status = twe_inl(sc, TWE_REG_STS);
457 if (twe_status_check(sc, status)) {
458 printf("%s: controller errors detected\n",
459 sc->sc_dv.dv_xname);
460 return (-1);
461 }
462
463 /* Drain the response queue. */
464 for (;;) {
465 status = twe_inl(sc, TWE_REG_STS);
466 if (twe_status_check(sc, status) != 0) {
467 printf("%s: can't drain response queue\n",
468 sc->sc_dv.dv_xname);
469 return (-1);
470 }
471 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
472 break;
473 junk = twe_inl(sc, TWE_REG_RESP_QUEUE);
474 }
475
476 return (0);
477 }
478
479 /*
480 * Print autoconfiguration message for a sub-device.
481 */
482 static int
483 twe_print(void *aux, const char *pnp)
484 {
485 struct twe_attach_args *twea;
486
487 twea = aux;
488
489 if (pnp != NULL)
490 aprint_normal("block device at %s", pnp);
491 aprint_normal(" unit %d", twea->twea_unit);
492 return (UNCONF);
493 }
494
495 /*
496 * Match a sub-device.
497 */
498 static int
499 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
500 {
501 struct twe_attach_args *twea;
502
503 twea = aux;
504
505 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
506 cf->tweacf_unit != twea->twea_unit)
507 return (0);
508
509 return (config_match(parent, cf, aux));
510 }
511
512 /*
513 * Interrupt service routine.
514 */
515 static int
516 twe_intr(void *arg)
517 {
518 struct twe_softc *sc;
519 u_int status;
520 int caught, rv;
521
522 sc = arg;
523 caught = 0;
524 status = twe_inl(sc, TWE_REG_STS);
525 twe_status_check(sc, status);
526
527 /* Host interrupts - purpose unknown. */
528 if ((status & TWE_STS_HOST_INTR) != 0) {
529 #ifdef DEBUG
530 printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
531 #endif
532 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
533 caught = 1;
534 }
535
536 /*
537 * Attention interrupts, signalled when a controller or child device
538 * state change has occurred.
539 */
540 if ((status & TWE_STS_ATTN_INTR) != 0) {
541 if ((sc->sc_flags & TWEF_AEN) == 0) {
542 rv = twe_param_get(sc, TWE_PARAM_AEN,
543 TWE_PARAM_AEN_UnitCode, 2, twe_aen_handler,
544 NULL);
545 if (rv != 0) {
546 printf("%s: unable to retrieve AEN (%d)\n",
547 sc->sc_dv.dv_xname, rv);
548 twe_outl(sc, TWE_REG_CTL,
549 TWE_CTL_CLEAR_ATTN_INTR);
550 } else
551 sc->sc_flags |= TWEF_AEN;
552 }
553 caught = 1;
554 }
555
556 /*
557 * Command interrupts, signalled when the controller can accept more
558 * commands. We don't use this; instead, we try to submit commands
559 * when we receive them, and when other commands have completed.
560 * Mask it so we don't get another one.
561 */
562 if ((status & TWE_STS_CMD_INTR) != 0) {
563 #ifdef DEBUG
564 printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
565 #endif
566 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
567 caught = 1;
568 }
569
570 if ((status & TWE_STS_RESP_INTR) != 0) {
571 twe_poll(sc);
572 caught = 1;
573 }
574
575 return (caught);
576 }
577
578 /*
579 * Handle an AEN returned by the controller.
580 */
581 static void
582 twe_aen_handler(struct twe_ccb *ccb, int error)
583 {
584 struct twe_softc *sc;
585 struct twe_param *tp;
586 const char *str;
587 u_int aen;
588 int i, hu, rv;
589
590 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
591 tp = ccb->ccb_tx.tx_context;
592 twe_ccb_unmap(sc, ccb);
593
594 if (error) {
595 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
596 aen = TWE_AEN_QUEUE_EMPTY;
597 } else
598 aen = le16toh(*(u_int16_t *)tp->tp_data);
599 free(tp, M_DEVBUF);
600 twe_ccb_free(sc, ccb);
601
602 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
603 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
604 sc->sc_flags &= ~TWEF_AEN;
605 return;
606 }
607
608 str = "<unknown>";
609 i = 0;
610 hu = 0;
611
612 while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) {
613 if (TWE_AEN_CODE(twe_aen_names[i].aen) == TWE_AEN_CODE(aen)) {
614 str = twe_aen_names[i].desc;
615 hu = TWE_AEN_UNIT(twe_aen_names[i].aen);
616 break;
617 }
618 i++;
619 }
620 printf("%s: ", sc->sc_dv.dv_xname);
621 printf(aenfmt[hu], TWE_AEN_UNIT(aen));
622 printf("AEN 0x%04x (%s) received\n", TWE_AEN_CODE(aen), str);
623
624 /*
625 * Chain another retrieval in case interrupts have been
626 * coalesced.
627 */
628 rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2,
629 twe_aen_handler, NULL);
630 if (rv != 0)
631 printf("%s: unable to retrieve AEN (%d)\n",
632 sc->sc_dv.dv_xname, rv);
633 }
634
635 /*
636 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided,
637 * it will be called with generated context when the command has completed.
638 * If no callback is provided, the command will be executed synchronously
639 * and a pointer to a buffer containing the data returned.
640 *
641 * The caller or callback is responsible for freeing the buffer.
642 */
643 static int
644 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
645 void (*func)(struct twe_ccb *, int), struct twe_param **pbuf)
646 {
647 struct twe_ccb *ccb;
648 struct twe_cmd *tc;
649 struct twe_param *tp;
650 int rv, s;
651
652 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
653 if (tp == NULL)
654 return ENOMEM;
655
656 rv = twe_ccb_alloc(sc, &ccb,
657 TWE_CCB_PARAM | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
658 if (rv != 0)
659 goto done;
660
661 ccb->ccb_data = tp;
662 ccb->ccb_datasize = TWE_SECTOR_SIZE;
663 ccb->ccb_tx.tx_handler = func;
664 ccb->ccb_tx.tx_context = tp;
665 ccb->ccb_tx.tx_dv = &sc->sc_dv;
666
667 tc = ccb->ccb_cmd;
668 tc->tc_size = 2;
669 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
670 tc->tc_unit = 0;
671 tc->tc_count = htole16(1);
672
673 /* Fill in the outbound parameter data. */
674 tp->tp_table_id = htole16(table_id);
675 tp->tp_param_id = param_id;
676 tp->tp_param_size = size;
677
678 /* Map the transfer. */
679 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
680 twe_ccb_free(sc, ccb);
681 goto done;
682 }
683
684 /* Submit the command and either wait or let the callback handle it. */
685 if (func == NULL) {
686 s = splbio();
687 rv = twe_ccb_poll(sc, ccb, 5);
688 twe_ccb_unmap(sc, ccb);
689 twe_ccb_free(sc, ccb);
690 splx(s);
691 } else {
692 #ifdef DEBUG
693 if (pbuf != NULL)
694 panic("both func and pbuf defined");
695 #endif
696 twe_ccb_enqueue(sc, ccb);
697 return 0;
698 }
699
700 done:
701 if (pbuf == NULL || rv != 0)
702 free(tp, M_DEVBUF);
703 else if (pbuf != NULL && rv == 0)
704 *pbuf = tp;
705 return rv;
706 }
707
708 /*
709 * Execute a TWE_OP_SET_PARAM command.
710 */
711 static int
712 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size,
713 void *buf)
714 {
715 struct twe_ccb *ccb;
716 struct twe_cmd *tc;
717 struct twe_param *tp;
718 int rv, s;
719
720 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
721 if (tp == NULL)
722 return ENOMEM;
723
724 rv = twe_ccb_alloc(sc, &ccb,
725 TWE_CCB_PARAM | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
726 if (rv != 0)
727 goto done;
728
729 ccb->ccb_data = tp;
730 ccb->ccb_datasize = TWE_SECTOR_SIZE;
731 ccb->ccb_tx.tx_handler = 0;
732 ccb->ccb_tx.tx_context = tp;
733 ccb->ccb_tx.tx_dv = &sc->sc_dv;
734
735 tc = ccb->ccb_cmd;
736 tc->tc_size = 2;
737 tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5);
738 tc->tc_unit = 0;
739 tc->tc_count = htole16(1);
740
741 /* Fill in the outbound parameter data. */
742 tp->tp_table_id = htole16(table_id);
743 tp->tp_param_id = param_id;
744 tp->tp_param_size = size;
745 memcpy(tp->tp_data, buf, size);
746
747 /* Map the transfer. */
748 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
749 twe_ccb_free(sc, ccb);
750 goto done;
751 }
752
753 /* Submit the command and wait. */
754 s = splbio();
755 rv = twe_ccb_poll(sc, ccb, 5);
756 twe_ccb_unmap(sc, ccb);
757 twe_ccb_free(sc, ccb);
758 splx(s);
759 done:
760 free(tp, M_DEVBUF);
761 return (rv);
762 }
763
764 /*
765 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error.
766 * Must be called with interrupts blocked.
767 */
768 static int
769 twe_init_connection(struct twe_softc *sc)
770 /*###762 [cc] warning: `twe_init_connection' was used with no prototype before its definition%%%*/
771 /*###762 [cc] warning: `twe_init_connection' was declared implicitly `extern' and later `static'%%%*/
772 {
773 struct twe_ccb *ccb;
774 struct twe_cmd *tc;
775 int rv;
776
777 if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0)
778 return (rv);
779
780 /* Build the command. */
781 tc = ccb->ccb_cmd;
782 tc->tc_size = 3;
783 tc->tc_opcode = TWE_OP_INIT_CONNECTION;
784 tc->tc_unit = 0;
785 tc->tc_count = htole16(TWE_MAX_CMDS);
786 tc->tc_args.init_connection.response_queue_pointer = 0;
787
788 /* Submit the command for immediate execution. */
789 rv = twe_ccb_poll(sc, ccb, 5);
790 twe_ccb_free(sc, ccb);
791 return (rv);
792 }
793
794 /*
795 * Poll the controller for completed commands. Must be called with
796 * interrupts blocked.
797 */
798 static void
799 twe_poll(struct twe_softc *sc)
800 {
801 struct twe_ccb *ccb;
802 int found;
803 u_int status, cmdid;
804
805 found = 0;
806
807 for (;;) {
808 status = twe_inl(sc, TWE_REG_STS);
809 twe_status_check(sc, status);
810
811 if ((status & TWE_STS_RESP_QUEUE_EMPTY))
812 break;
813
814 found = 1;
815 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE);
816 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
817 if (cmdid >= TWE_MAX_QUEUECNT) {
818 printf("%s: bad completion\n", sc->sc_dv.dv_xname);
819 continue;
820 }
821
822 ccb = sc->sc_ccbs + cmdid;
823 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
824 printf("%s: bad completion (not active)\n",
825 sc->sc_dv.dv_xname);
826 continue;
827 }
828 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
829
830 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
831 (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
832 sizeof(struct twe_cmd),
833 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
834
835 /* Pass notification to upper layers. */
836 if (ccb->ccb_tx.tx_handler != NULL)
837 (*ccb->ccb_tx.tx_handler)(ccb,
838 ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
839 }
840
841 /* If any commands have completed, run the software queue. */
842 if (found)
843 twe_ccb_enqueue(sc, NULL);
844 }
845
846 /*
847 * Wait for `status' to be set in the controller status register. Return
848 * zero if found, non-zero if the operation timed out.
849 */
850 static int
851 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
852 {
853
854 for (timo *= 10; timo != 0; timo--) {
855 if ((twe_inl(sc, TWE_REG_STS) & status) == status)
856 break;
857 delay(100000);
858 }
859
860 return (timo == 0);
861 }
862
863 /*
864 * Complain if the status bits aren't what we expect.
865 */
866 static int
867 twe_status_check(struct twe_softc *sc, u_int status)
868 {
869 int rv;
870
871 rv = 0;
872
873 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
874 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
875 status & ~TWE_STS_EXPECTED_BITS);
876 rv = -1;
877 }
878
879 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
880 printf("%s: unexpected status bits: 0x%08x\n",
881 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
882 rv = -1;
883 }
884
885 return (rv);
886 }
887
888 /*
889 * Allocate and initialise a CCB.
890 */
891 int
892 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags)
893 {
894 struct twe_cmd *tc;
895 struct twe_ccb *ccb;
896 int s;
897
898 s = splbio();
899 if ((flags & TWE_CCB_PARAM) != 0)
900 ccb = sc->sc_ccbs;
901 else {
902 /* Allocate a CCB and command block. */
903 if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) {
904 splx(s);
905 return (EAGAIN);
906 }
907 ccb = SLIST_FIRST(&sc->sc_ccb_freelist);
908 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
909 }
910 #ifdef DIAGNOSTIC
911 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
912 panic("twe_ccb_alloc: CCB already allocated");
913 flags |= TWE_CCB_ALLOCED;
914 #endif
915 splx(s);
916
917 /* Initialise some fields and return. */
918 ccb->ccb_tx.tx_handler = NULL;
919 ccb->ccb_flags = flags;
920 tc = ccb->ccb_cmd;
921 tc->tc_status = 0;
922 tc->tc_flags = 0;
923 tc->tc_cmdid = ccb->ccb_cmdid;
924 *ccbp = ccb;
925
926 return (0);
927 }
928
929 /*
930 * Free a CCB.
931 */
932 void
933 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
934 {
935 int s;
936
937 s = splbio();
938 if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0)
939 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
940 ccb->ccb_flags = 0;
941 splx(s);
942 }
943
944 /*
945 * Map the specified CCB's command block and data buffer (if any) into
946 * controller visible space. Perform DMA synchronisation.
947 */
948 int
949 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
950 {
951 struct twe_cmd *tc;
952 int flags, nsegs, i, s, rv;
953 void *data;
954
955 /*
956 * The data as a whole must be 512-byte aligned.
957 */
958 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
959 s = splvm();
960 /* XXX */
961 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, NULL,
962 ccb->ccb_datasize, UVM_KMF_NOWAIT);
963 splx(s);
964 data = (void *)ccb->ccb_abuf;
965 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
966 memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
967 } else {
968 ccb->ccb_abuf = (vaddr_t)0;
969 data = ccb->ccb_data;
970 }
971
972 /*
973 * Map the data buffer into bus space and build the S/G list.
974 */
975 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
976 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
977 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
978 BUS_DMA_READ : BUS_DMA_WRITE));
979 if (rv != 0) {
980 if (ccb->ccb_abuf != (vaddr_t)0) {
981 s = splvm();
982 /* XXX */
983 uvm_km_free(kmem_map, ccb->ccb_abuf,
984 ccb->ccb_datasize);
985 splx(s);
986 }
987 return (rv);
988 }
989
990 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
991 tc = ccb->ccb_cmd;
992 tc->tc_size += 2 * nsegs;
993
994 /* The location of the S/G list is dependant upon command type. */
995 switch (tc->tc_opcode >> 5) {
996 case 2:
997 for (i = 0; i < nsegs; i++) {
998 tc->tc_args.param.sgl[i].tsg_address =
999 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1000 tc->tc_args.param.sgl[i].tsg_length =
1001 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1002 }
1003 /* XXX Needed? */
1004 for (; i < TWE_SG_SIZE; i++) {
1005 tc->tc_args.param.sgl[i].tsg_address = 0;
1006 tc->tc_args.param.sgl[i].tsg_length = 0;
1007 }
1008 break;
1009 case 3:
1010 for (i = 0; i < nsegs; i++) {
1011 tc->tc_args.io.sgl[i].tsg_address =
1012 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1013 tc->tc_args.io.sgl[i].tsg_length =
1014 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1015 }
1016 /* XXX Needed? */
1017 for (; i < TWE_SG_SIZE; i++) {
1018 tc->tc_args.io.sgl[i].tsg_address = 0;
1019 tc->tc_args.io.sgl[i].tsg_length = 0;
1020 }
1021 break;
1022 #ifdef DEBUG
1023 default:
1024 panic("twe_ccb_map: oops");
1025 #endif
1026 }
1027
1028 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1029 flags = BUS_DMASYNC_PREREAD;
1030 else
1031 flags = 0;
1032 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1033 flags |= BUS_DMASYNC_PREWRITE;
1034
1035 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1036 ccb->ccb_datasize, flags);
1037 return (0);
1038 }
1039
1040 /*
1041 * Unmap the specified CCB's command block and data buffer (if any) and
1042 * perform DMA synchronisation.
1043 */
1044 void
1045 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
1046 {
1047 int flags, s;
1048
1049 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1050 flags = BUS_DMASYNC_POSTREAD;
1051 else
1052 flags = 0;
1053 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1054 flags |= BUS_DMASYNC_POSTWRITE;
1055
1056 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1057 ccb->ccb_datasize, flags);
1058 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
1059
1060 if (ccb->ccb_abuf != (vaddr_t)0) {
1061 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1062 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
1063 ccb->ccb_datasize);
1064 s = splvm();
1065 /* XXX */
1066 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
1067 splx(s);
1068 }
1069 }
1070
1071 /*
1072 * Submit a command to the controller and poll on completion. Return
1073 * non-zero on timeout (but don't check status, as some command types don't
1074 * return status). Must be called with interrupts blocked.
1075 */
1076 int
1077 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
1078 {
1079 int rv;
1080
1081 if ((rv = twe_ccb_submit(sc, ccb)) != 0)
1082 return (rv);
1083
1084 for (timo *= 1000; timo != 0; timo--) {
1085 twe_poll(sc);
1086 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
1087 break;
1088 DELAY(100);
1089 }
1090
1091 return (timo == 0);
1092 }
1093
1094 /*
1095 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1096 * the order that they were enqueued and try to submit their command blocks
1097 * to the controller for execution.
1098 */
1099 void
1100 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
1101 {
1102 int s;
1103
1104 s = splbio();
1105
1106 if (ccb != NULL)
1107 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
1108
1109 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
1110 if (twe_ccb_submit(sc, ccb))
1111 break;
1112 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq);
1113 }
1114
1115 splx(s);
1116 }
1117
1118 /*
1119 * Submit the command block associated with the specified CCB to the
1120 * controller for execution. Must be called with interrupts blocked.
1121 */
1122 int
1123 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
1124 {
1125 bus_addr_t pa;
1126 int rv;
1127 u_int status;
1128
1129 /* Check to see if we can post a command. */
1130 status = twe_inl(sc, TWE_REG_STS);
1131 twe_status_check(sc, status);
1132
1133 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
1134 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1135 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
1136 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1137 ccb->ccb_flags |= TWE_CCB_ACTIVE;
1138 pa = sc->sc_cmds_paddr +
1139 ccb->ccb_cmdid * sizeof(struct twe_cmd);
1140 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1141 rv = 0;
1142 } else
1143 rv = EBUSY;
1144
1145 return (rv);
1146 }
1147
1148
1149 /*
1150 * Accept an open operation on the control device.
1151 */
1152 int
1153 tweopen(dev_t dev, int flag, int mode, struct proc *p)
1154 {
1155 struct twe_softc *twe;
1156
1157 if ((twe = device_lookup(&twe_cd, minor(dev))) == NULL)
1158 return (ENXIO);
1159 if ((twe->sc_flags & TWEF_OPEN) != 0)
1160 return (EBUSY);
1161
1162 twe->sc_flags |= TWEF_OPEN;
1163 return (0);
1164 }
1165
1166 /*
1167 * Accept the last close on the control device.
1168 */
1169 int
1170 tweclose(dev_t dev, int flag, int mode, struct proc *p)
1171 {
1172 struct twe_softc *twe;
1173
1174 twe = device_lookup(&twe_cd, minor(dev));
1175 twe->sc_flags &= ~TWEF_OPEN;
1176 return (0);
1177 }
1178
1179 /*
1180 * Handle control operations.
1181 */
1182 int
1183 tweioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1184 {
1185 struct twe_softc *twe;
1186 struct twe_ccb *ccb;
1187 struct twe_param *param;
1188 struct twe_usercommand *tu;
1189 struct twe_paramcommand *tp;
1190 union twe_statrequest *ts;
1191 void *pdata = NULL;
1192 int rv, s, error = 0;
1193 u_int8_t cmdid;
1194
1195 if (securelevel >= 2)
1196 return (EPERM);
1197
1198 twe = device_lookup(&twe_cd, minor(dev));
1199 tu = (struct twe_usercommand *)data;
1200 tp = (struct twe_paramcommand *)data;
1201 ts = (union twe_statrequest *)data;
1202
1203 /* Hmm, compatible with FreeBSD */
1204 switch (cmd) {
1205 case TWEIO_COMMAND:
1206 if (tu->tu_size > 0) {
1207 if (tu->tu_size > TWE_SECTOR_SIZE)
1208 return EINVAL;
1209 pdata = malloc(tu->tu_size, M_DEVBUF, M_WAITOK);
1210 error = copyin(tu->tu_data, pdata, tu->tu_size);
1211 if (error != 0)
1212 goto done;
1213 error = twe_ccb_alloc(twe, &ccb, TWE_CCB_PARAM |
1214 TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1215 } else {
1216 error = twe_ccb_alloc(twe, &ccb, 0);
1217 }
1218 if (rv != 0)
1219 goto done;
1220 cmdid = ccb->ccb_cmdid;
1221 memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd));
1222 ccb->ccb_cmdid = cmdid;
1223 if (ccb->ccb_flags & TWE_CCB_PARAM) {
1224 ccb->ccb_data = pdata;
1225 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1226 ccb->ccb_tx.tx_handler = 0;
1227 ccb->ccb_tx.tx_context = pdata;
1228 ccb->ccb_tx.tx_dv = &twe->sc_dv;
1229 }
1230 /* Map the transfer. */
1231 if ((error = twe_ccb_map(twe, ccb)) != 0) {
1232 twe_ccb_free(twe, ccb);
1233 goto done;
1234 }
1235
1236 /* Submit the command and wait. */
1237 s = splbio();
1238 rv = twe_ccb_poll(twe, ccb, 5);
1239 twe_ccb_unmap(twe, ccb);
1240 twe_ccb_free(twe, ccb);
1241 splx(s);
1242
1243 if (tu->tu_size > 0)
1244 error = copyout(pdata, tu->tu_data, tu->tu_size);
1245 goto done;
1246
1247 case TWEIO_STATS:
1248 return (ENOENT);
1249
1250 case TWEIO_AEN_POLL:
1251 if ((twe->sc_flags & TWEF_AEN) == 0)
1252 return (ENOENT);
1253 return (0);
1254
1255 case TWEIO_AEN_WAIT:
1256 s = splbio();
1257 while ((twe->sc_flags & TWEF_AEN) == 0) {
1258 /* tsleep(); */
1259 }
1260 splx(s);
1261 return (0);
1262
1263 case TWEIO_GET_PARAM:
1264 error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id,
1265 tp->tp_size, 0, ¶m);
1266 if (error != 0)
1267 return (error);
1268 if (param->tp_param_size > tp->tp_size) {
1269 error = EFAULT;
1270 goto done;
1271 }
1272 error = copyout(param->tp_data, tp->tp_data,
1273 param->tp_param_size);
1274 goto done;
1275
1276 case TWEIO_SET_PARAM:
1277 pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK);
1278 if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0)
1279 goto done;
1280 error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id,
1281 tp->tp_size, pdata);
1282 goto done;
1283
1284 case TWEIO_RESET:
1285 twe_reset(twe);
1286 return (0);
1287
1288 default:
1289 return EINVAL;
1290 }
1291 done:
1292 if (pdata)
1293 free(pdata, M_DEVBUF);
1294 return error;
1295 }
1296
1297 /*
1298 * Print some information about the controller
1299 */
1300 static void
1301 twe_describe_controller(struct twe_softc *sc)
1302 {
1303 struct twe_param *p[7];
1304 int rv = 0;
1305
1306 /* get the port count */
1307 rv |= twe_param_get(sc, TWE_PARAM_CONTROLLER,
1308 TWE_PARAM_CONTROLLER_PortCount, 1, NULL, &p[0]);
1309
1310 /* get version strings */
1311 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon,
1312 16, NULL, &p[1]);
1313 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW,
1314 16, NULL, &p[2]);
1315 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS,
1316 16, NULL, &p[3]);
1317 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB,
1318 8, NULL, &p[4]);
1319 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA,
1320 8, NULL, &p[5]);
1321 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI,
1322 8, NULL, &p[6]);
1323
1324 if (rv) {
1325 /* some error occurred */
1326 aprint_error("%s: failed to fetch version information\n",
1327 sc->sc_dv.dv_xname);
1328 return;
1329 }
1330
1331 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
1332 sc->sc_dv.dv_xname,
1333 p[0]->tp_data[0], p[2]->tp_data, p[3]->tp_data);
1334
1335 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
1336 sc->sc_dv.dv_xname,
1337 p[1]->tp_data, p[4]->tp_data,
1338 p[5]->tp_data, p[6]->tp_data);
1339
1340 free(p[0], M_DEVBUF);
1341 free(p[1], M_DEVBUF);
1342 free(p[2], M_DEVBUF);
1343 free(p[3], M_DEVBUF);
1344 free(p[4], M_DEVBUF);
1345 free(p[5], M_DEVBUF);
1346 free(p[6], M_DEVBUF);
1347 }
1348