twe.c revision 1.108.10.1 1 /* $NetBSD: twe.c,v 1.108.10.1 2021/03/22 02:01:01 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 2000 Michael Smith
34 * Copyright (c) 2000 BSDi
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
59 */
60
61 /*
62 * Driver for the 3ware Escalade family of RAID controllers.
63 */
64
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.108.10.1 2021/03/22 02:01:01 thorpej Exp $");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/device.h>
72 #include <sys/queue.h>
73 #include <sys/proc.h>
74 #include <sys/buf.h>
75 #include <sys/endian.h>
76 #include <sys/malloc.h>
77 #include <sys/conf.h>
78 #include <sys/disk.h>
79 #include <sys/sysctl.h>
80 #include <sys/syslog.h>
81 #include <sys/kauth.h>
82 #include <sys/module.h>
83 #include <sys/bswap.h>
84 #include <sys/bus.h>
85
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 #include <dev/pci/twereg.h>
90 #include <dev/pci/twevar.h>
91 #include <dev/pci/tweio.h>
92
93 #include "locators.h"
94 #include "ioconf.h"
95
96 #define PCI_CBIO 0x10
97
98 static int twe_aen_get(struct twe_softc *, uint16_t *);
99 static void twe_aen_handler(struct twe_ccb *, int);
100 static void twe_aen_enqueue(struct twe_softc *sc, uint16_t, int);
101 static uint16_t twe_aen_dequeue(struct twe_softc *);
102
103 static void twe_attach(device_t, device_t, void *);
104 static int twe_rescan(device_t, const char *, const int *);
105 static int twe_init_connection(struct twe_softc *);
106 static int twe_intr(void *);
107 static int twe_match(device_t, cfdata_t, void *);
108 static int twe_param_set(struct twe_softc *, int, int, size_t, void *);
109 static void twe_poll(struct twe_softc *);
110 static int twe_print(void *, const char *);
111 static int twe_reset(struct twe_softc *);
112 static int twe_status_check(struct twe_softc *, u_int);
113 static int twe_status_wait(struct twe_softc *, u_int, int);
114 static void twe_describe_controller(struct twe_softc *);
115 static void twe_clear_pci_abort(struct twe_softc *sc);
116 static void twe_clear_pci_parity_error(struct twe_softc *sc);
117
118 static int twe_add_unit(struct twe_softc *, int);
119 static int twe_del_unit(struct twe_softc *, int);
120 static int twe_init_connection(struct twe_softc *);
121
122 static inline u_int32_t twe_inl(struct twe_softc *, int);
123 static inline void twe_outl(struct twe_softc *, int, u_int32_t);
124
125 extern struct cfdriver twe_cd;
126
127 CFATTACH_DECL3_NEW(twe, sizeof(struct twe_softc),
128 twe_match, twe_attach, NULL, NULL, twe_rescan, NULL, 0);
129
130 /* FreeBSD driver revision for sysctl expected by the 3ware cli */
131 const char twever[] = "1.50.01.002";
132
133 /*
134 * Tables to convert numeric codes to strings.
135 */
136 const struct twe_code_table twe_table_status[] = {
137 { 0x00, "successful completion" },
138
139 /* info */
140 { 0x42, "command in progress" },
141 { 0x6c, "retrying interface CRC error from UDMA command" },
142
143 /* warning */
144 { 0x81, "redundant/inconsequential request ignored" },
145 { 0x8e, "failed to write zeroes to LBA 0" },
146 { 0x8f, "failed to profile TwinStor zones" },
147
148 /* fatal */
149 { 0xc1, "aborted due to system command or reconfiguration" },
150 { 0xc4, "aborted" },
151 { 0xc5, "access error" },
152 { 0xc6, "access violation" },
153 { 0xc7, "device failure" }, /* high byte may be port # */
154 { 0xc8, "controller error" },
155 { 0xc9, "timed out" },
156 { 0xcb, "invalid unit number" },
157 { 0xcf, "unit not available" },
158 { 0xd2, "undefined opcode" },
159 { 0xdb, "request incompatible with unit" },
160 { 0xdc, "invalid request" },
161 { 0xff, "firmware error, reset requested" },
162
163 { 0, NULL }
164 };
165
166 const struct twe_code_table twe_table_unitstate[] = {
167 { TWE_PARAM_UNITSTATUS_Normal, "Normal" },
168 { TWE_PARAM_UNITSTATUS_Initialising, "Initializing" },
169 { TWE_PARAM_UNITSTATUS_Degraded, "Degraded" },
170 { TWE_PARAM_UNITSTATUS_Rebuilding, "Rebuilding" },
171 { TWE_PARAM_UNITSTATUS_Verifying, "Verifying" },
172 { TWE_PARAM_UNITSTATUS_Corrupt, "Corrupt" },
173 { TWE_PARAM_UNITSTATUS_Missing, "Missing" },
174
175 { 0, NULL }
176 };
177
178 const struct twe_code_table twe_table_unittype[] = {
179 /* array descriptor configuration */
180 { TWE_AD_CONFIG_RAID0, "RAID0" },
181 { TWE_AD_CONFIG_RAID1, "RAID1" },
182 { TWE_AD_CONFIG_TwinStor, "TwinStor" },
183 { TWE_AD_CONFIG_RAID5, "RAID5" },
184 { TWE_AD_CONFIG_RAID10, "RAID10" },
185 { TWE_UD_CONFIG_JBOD, "JBOD" },
186
187 { 0, NULL }
188 };
189
190 const struct twe_code_table twe_table_stripedepth[] = {
191 { TWE_AD_STRIPE_4k, "4K" },
192 { TWE_AD_STRIPE_8k, "8K" },
193 { TWE_AD_STRIPE_16k, "16K" },
194 { TWE_AD_STRIPE_32k, "32K" },
195 { TWE_AD_STRIPE_64k, "64K" },
196 { TWE_AD_STRIPE_128k, "128K" },
197 { TWE_AD_STRIPE_256k, "256K" },
198 { TWE_AD_STRIPE_512k, "512K" },
199 { TWE_AD_STRIPE_1024k, "1024K" },
200
201 { 0, NULL }
202 };
203
204 /*
205 * Asynchronous event notification messages are qualified:
206 * a - not unit/port specific
207 * u - unit specific
208 * p - port specific
209 *
210 * They are further qualified with a severity:
211 * E - LOG_EMERG
212 * a - LOG_ALERT
213 * c - LOG_CRIT
214 * e - LOG_ERR
215 * w - LOG_WARNING
216 * n - LOG_NOTICE
217 * i - LOG_INFO
218 * d - LOG_DEBUG
219 * blank - just use printf
220 */
221 const struct twe_code_table twe_table_aen[] = {
222 { 0x00, "a queue empty" },
223 { 0x01, "a soft reset" },
224 { 0x02, "uc degraded mode" },
225 { 0x03, "aa controller error" },
226 { 0x04, "uE rebuild fail" },
227 { 0x05, "un rebuild done" },
228 { 0x06, "ue incomplete unit" },
229 { 0x07, "un initialization done" },
230 { 0x08, "uw unclean shutdown detected" },
231 { 0x09, "pe drive timeout" },
232 { 0x0a, "pc drive error" },
233 { 0x0b, "un rebuild started" },
234 { 0x0c, "un initialization started" },
235 { 0x0d, "ui logical unit deleted" },
236 { 0x0f, "pc SMART threshold exceeded" },
237 { 0x15, "a table undefined" }, /* XXX: Not in FreeBSD's table */
238 { 0x21, "pe ATA UDMA downgrade" },
239 { 0x22, "pi ATA UDMA upgrade" },
240 { 0x23, "pw sector repair occurred" },
241 { 0x24, "aa SBUF integrity check failure" },
242 { 0x25, "pa lost cached write" },
243 { 0x26, "pa drive ECC error detected" },
244 { 0x27, "pe DCB checksum error" },
245 { 0x28, "pn DCB unsupported version" },
246 { 0x29, "ui verify started" },
247 { 0x2a, "ua verify failed" },
248 { 0x2b, "ui verify complete" },
249 { 0x2c, "pw overwrote bad sector during rebuild" },
250 { 0x2d, "pa encountered bad sector during rebuild" },
251 { 0x2e, "pe replacement drive too small" },
252 { 0x2f, "ue array not previously initialized" },
253 { 0x30, "p drive not supported" },
254 { 0xff, "a aen queue full" },
255
256 { 0, NULL },
257 };
258
259 const char *
260 twe_describe_code(const struct twe_code_table *table, uint32_t code)
261 {
262
263 for (; table->string != NULL; table++) {
264 if (table->code == code)
265 return (table->string);
266 }
267 return (NULL);
268 }
269
270 static inline u_int32_t
271 twe_inl(struct twe_softc *sc, int off)
272 {
273
274 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
275 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
276 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
277 }
278
279 static inline void
280 twe_outl(struct twe_softc *sc, int off, u_int32_t val)
281 {
282
283 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
284 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
285 BUS_SPACE_BARRIER_WRITE);
286 }
287
288 /*
289 * Match a supported board.
290 */
291 static int
292 twe_match(device_t parent, cfdata_t cfdata, void *aux)
293 {
294 struct pci_attach_args *pa;
295
296 pa = aux;
297
298 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
299 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
300 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
301 }
302
303 /*
304 * Attach a supported board.
305 *
306 * XXX This doesn't fail gracefully.
307 */
308 static void
309 twe_attach(device_t parent, device_t self, void *aux)
310 {
311 struct pci_attach_args *pa;
312 struct twe_softc *sc;
313 pci_chipset_tag_t pc;
314 pci_intr_handle_t ih;
315 pcireg_t csr;
316 const char *intrstr;
317 int s, size, i, rv, rseg;
318 size_t max_segs, max_xfer;
319 bus_dma_segment_t seg;
320 const struct sysctlnode *node;
321 struct twe_cmd *tc;
322 struct twe_ccb *ccb;
323 char intrbuf[PCI_INTRSTR_LEN];
324
325 sc = device_private(self);
326 sc->sc_dev = self;
327 pa = aux;
328 pc = pa->pa_pc;
329 sc->sc_dmat = pa->pa_dmat;
330 SIMPLEQ_INIT(&sc->sc_ccb_queue);
331 SLIST_INIT(&sc->sc_ccb_freelist);
332
333 aprint_naive(": RAID controller\n");
334 aprint_normal(": 3ware Escalade\n");
335
336
337 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
338 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
339 aprint_error_dev(self, "can't map i/o space\n");
340 return;
341 }
342
343 /* Enable the device. */
344 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
345 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
346 csr | PCI_COMMAND_MASTER_ENABLE);
347
348 /* Map and establish the interrupt. */
349 if (pci_intr_map(pa, &ih)) {
350 aprint_error_dev(self, "can't map interrupt\n");
351 return;
352 }
353
354 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
355 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, twe_intr, sc,
356 device_xname(self));
357 if (sc->sc_ih == NULL) {
358 aprint_error_dev(self, "can't establish interrupt%s%s\n",
359 (intrstr) ? " at " : "",
360 (intrstr) ? intrstr : "");
361 return;
362 }
363
364 if (intrstr != NULL)
365 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
366
367 /*
368 * Allocate and initialise the command blocks and CCBs.
369 */
370 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
371
372 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
373 &rseg, BUS_DMA_NOWAIT)) != 0) {
374 aprint_error_dev(self,
375 "unable to allocate commands, rv = %d\n", rv);
376 return;
377 }
378
379 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
380 (void **)&sc->sc_cmds,
381 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
382 aprint_error_dev(self,
383 "unable to map commands, rv = %d\n", rv);
384 return;
385 }
386
387 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
388 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
389 aprint_error_dev(self,
390 "unable to create command DMA map, rv = %d\n", rv);
391 return;
392 }
393
394 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
395 size, NULL, BUS_DMA_NOWAIT)) != 0) {
396 aprint_error_dev(self,
397 "unable to load command DMA map, rv = %d\n", rv);
398 return;
399 }
400
401 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
402 memset(sc->sc_cmds, 0, size);
403
404 tc = (struct twe_cmd *)sc->sc_cmds;
405 max_segs = twe_get_maxsegs();
406 max_xfer = twe_get_maxxfer(max_segs);
407
408 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_WAITOK);
409 sc->sc_ccbs = ccb;
410
411 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
412 ccb->ccb_cmd = tc;
413 ccb->ccb_cmdid = i;
414 ccb->ccb_flags = 0;
415 rv = bus_dmamap_create(sc->sc_dmat, max_xfer,
416 max_segs, PAGE_SIZE, 0,
417 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
418 &ccb->ccb_dmamap_xfer);
419 if (rv != 0) {
420 aprint_error_dev(self,
421 "can't create dmamap, rv = %d\n", rv);
422 return;
423 }
424
425 /* Save the first CCB for AEN retrieval. */
426 if (i != 0)
427 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
428 ccb_chain.slist);
429 }
430
431 /* Wait for the controller to become ready. */
432 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
433 aprint_error_dev(self, "microcontroller not ready\n");
434 return;
435 }
436
437 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
438
439 /* Reset the controller. */
440 s = splbio();
441 rv = twe_reset(sc);
442 splx(s);
443 if (rv) {
444 aprint_error_dev(self, "reset failed\n");
445 return;
446 }
447
448 /* Initialise connection with controller. */
449 twe_init_connection(sc);
450
451 twe_describe_controller(sc);
452
453 /* Find and attach RAID array units. */
454 twe_rescan(self, "twe", 0);
455
456 /* ...and finally, enable interrupts. */
457 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
458 TWE_CTL_UNMASK_RESP_INTR |
459 TWE_CTL_ENABLE_INTRS);
460
461 /* sysctl set-up for 3ware cli */
462 if (sysctl_createv(NULL, 0, NULL, &node,
463 0, CTLTYPE_NODE, device_xname(self),
464 SYSCTL_DESCR("twe driver information"),
465 NULL, 0, NULL, 0,
466 CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
467 aprint_error_dev(self, "could not create %s.%s sysctl node\n",
468 "hw", device_xname(self));
469 return;
470 }
471 if ((i = sysctl_createv(NULL, 0, NULL, NULL,
472 0, CTLTYPE_STRING, "driver_version",
473 SYSCTL_DESCR("twe0 driver version"),
474 NULL, 0, __UNCONST(&twever), 0,
475 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
476 != 0) {
477 aprint_error_dev(self,
478 "could not create %s.%s.driver_version sysctl\n",
479 "hw", device_xname(self));
480 return;
481 }
482 }
483
484 static int
485 twe_rescan(device_t self, const char *attr, const int *flags)
486 {
487 struct twe_softc *sc;
488 int i;
489
490 sc = device_private(self);
491 sc->sc_nunits = 0;
492 for (i = 0; i < TWE_MAX_UNITS; i++)
493 (void) twe_add_unit(sc, i);
494 return 0;
495 }
496
497
498 void
499 twe_register_callbacks(struct twe_softc *sc, int unit,
500 const struct twe_callbacks *tcb)
501 {
502
503 sc->sc_units[unit].td_callbacks = tcb;
504 }
505
506 static void
507 twe_recompute_openings(struct twe_softc *sc)
508 {
509 struct twe_drive *td;
510 int unit, openings;
511
512 if (sc->sc_nunits != 0)
513 openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits;
514 else
515 openings = 0;
516 if (openings == sc->sc_openings)
517 return;
518 sc->sc_openings = openings;
519
520 #ifdef TWE_DEBUG
521 printf("%s: %d array%s, %d openings per array\n",
522 device_xname(sc->sc_dev), sc->sc_nunits,
523 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
524 #endif
525
526 for (unit = 0; unit < TWE_MAX_UNITS; unit++) {
527 td = &sc->sc_units[unit];
528 if (td->td_dev != NULL)
529 (*td->td_callbacks->tcb_openings)(td->td_dev,
530 sc->sc_openings);
531 }
532 }
533
534 static int
535 twe_add_unit(struct twe_softc *sc, int unit)
536 {
537 struct twe_param *dtp, *atp;
538 struct twe_array_descriptor *ad;
539 struct twe_drive *td;
540 struct twe_attach_args twea;
541 uint32_t newsize;
542 int rv;
543 uint16_t dsize;
544 uint8_t newtype, newstripe;
545 int locs[TWECF_NLOCS];
546
547 if (unit < 0 || unit >= TWE_MAX_UNITS)
548 return (EINVAL);
549
550 /* Find attached units. */
551 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
552 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp);
553 if (rv != 0) {
554 aprint_error_dev(sc->sc_dev,
555 "error %d fetching unit summary\n", rv);
556 return (rv);
557 }
558
559 /* For each detected unit, collect size and store in an array. */
560 td = &sc->sc_units[unit];
561
562 /* Unit present? */
563 if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) {
564 /*
565 * XXX Should we check to see if a device has been
566 * XXX attached at this index and detach it if it
567 * XXX has? ("rescan" semantics)
568 */
569 rv = 0;
570 goto out;
571 }
572
573 rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit,
574 TWE_PARAM_UNITINFO_DescriptorSize, &dsize);
575 if (rv != 0) {
576 aprint_error_dev(sc->sc_dev,
577 "error %d fetching descriptor size for unit %d\n",
578 rv, unit);
579 goto out;
580 }
581
582 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit,
583 TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp);
584 if (rv != 0) {
585 aprint_error_dev(sc->sc_dev,
586 "error %d fetching array descriptor for unit %d\n",
587 rv, unit);
588 goto out;
589 }
590
591 ad = (struct twe_array_descriptor *)atp->tp_data;
592 newtype = ad->configuration;
593 newstripe = ad->stripe_size;
594 free(atp, M_DEVBUF);
595
596 rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit,
597 TWE_PARAM_UNITINFO_Capacity, &newsize);
598 if (rv != 0) {
599 aprint_error_dev(sc->sc_dev,
600 "error %d fetching capacity for unit %d\n",
601 rv, unit);
602 goto out;
603 }
604
605 /*
606 * Have a device, so we need to attach it. If there is currently
607 * something sitting at the slot, and the parameters are different,
608 * then we detach the old device before attaching the new one.
609 */
610 if (td->td_dev != NULL &&
611 td->td_size == newsize &&
612 td->td_type == newtype &&
613 td->td_stripe == newstripe) {
614 /* Same as the old device; just keep using it. */
615 rv = 0;
616 goto out;
617 } else if (td->td_dev != NULL) {
618 /* Detach the old device first. */
619 (void) config_detach(td->td_dev, DETACH_FORCE);
620 td->td_dev = NULL;
621 } else if (td->td_size == 0)
622 sc->sc_nunits++;
623
624 /*
625 * Committed to the new array unit; assign its parameters and
626 * recompute the number of available command openings.
627 */
628 td->td_size = newsize;
629 td->td_type = newtype;
630 td->td_stripe = newstripe;
631 twe_recompute_openings(sc);
632
633 twea.twea_unit = unit;
634
635 locs[TWECF_UNIT] = unit;
636
637 td->td_dev = config_found(sc->sc_dev, &twea, twe_print,
638 CFARG_SUBMATCH, config_stdsubmatch,
639 CFARG_IATTR, "twe",
640 CFARG_LOCATORS, locs,
641 CFARG_EOL);
642
643 rv = 0;
644 out:
645 free(dtp, M_DEVBUF);
646 return (rv);
647 }
648
649 static int
650 twe_del_unit(struct twe_softc *sc, int unit)
651 {
652 struct twe_drive *td;
653
654 if (unit < 0 || unit >= TWE_MAX_UNITS)
655 return (EINVAL);
656
657 td = &sc->sc_units[unit];
658 if (td->td_size != 0)
659 sc->sc_nunits--;
660 td->td_size = 0;
661 td->td_type = 0;
662 td->td_stripe = 0;
663 if (td->td_dev != NULL) {
664 (void) config_detach(td->td_dev, DETACH_FORCE);
665 td->td_dev = NULL;
666 }
667 twe_recompute_openings(sc);
668 return (0);
669 }
670
671 /*
672 * Reset the controller.
673 * MUST BE CALLED AT splbio()!
674 */
675 static int
676 twe_reset(struct twe_softc *sc)
677 {
678 uint16_t aen;
679 u_int status;
680 int got, rv;
681
682 /* Issue a soft reset. */
683 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
684 TWE_CTL_CLEAR_HOST_INTR |
685 TWE_CTL_CLEAR_ATTN_INTR |
686 TWE_CTL_MASK_CMD_INTR |
687 TWE_CTL_MASK_RESP_INTR |
688 TWE_CTL_CLEAR_ERROR_STS |
689 TWE_CTL_DISABLE_INTRS);
690
691 /* Wait for attention... */
692 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) {
693 aprint_error_dev(sc->sc_dev,
694 "timeout waiting for attention interrupt\n");
695 return (-1);
696 }
697
698 /* ...and ACK it. */
699 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
700
701 /*
702 * Pull AENs out of the controller; look for a soft reset AEN.
703 * Open code this, since we want to detect reset even if the
704 * queue for management tools is full.
705 *
706 * Note that since:
707 * - interrupts are blocked
708 * - we have reset the controller
709 * - acknowledged the pending ATTENTION
710 * that there is no way a pending asynchronous AEN fetch would
711 * finish, so clear the flag.
712 */
713 sc->sc_flags &= ~TWEF_AEN;
714 for (got = 0;;) {
715 rv = twe_aen_get(sc, &aen);
716 if (rv != 0)
717 printf("%s: error %d while draining event queue\n",
718 device_xname(sc->sc_dev), rv);
719 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY)
720 break;
721 if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET)
722 got = 1;
723 twe_aen_enqueue(sc, aen, 1);
724 }
725
726 if (!got) {
727 printf("%s: reset not reported\n", device_xname(sc->sc_dev));
728 return (-1);
729 }
730
731 /* Check controller status. */
732 status = twe_inl(sc, TWE_REG_STS);
733 if (twe_status_check(sc, status)) {
734 printf("%s: controller errors detected\n",
735 device_xname(sc->sc_dev));
736 return (-1);
737 }
738
739 /* Drain the response queue. */
740 for (;;) {
741 status = twe_inl(sc, TWE_REG_STS);
742 if (twe_status_check(sc, status) != 0) {
743 aprint_error_dev(sc->sc_dev,
744 "can't drain response queue\n");
745 return (-1);
746 }
747 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
748 break;
749 (void)twe_inl(sc, TWE_REG_RESP_QUEUE);
750 }
751
752 return (0);
753 }
754
755 /*
756 * Print autoconfiguration message for a sub-device.
757 */
758 static int
759 twe_print(void *aux, const char *pnp)
760 {
761 struct twe_attach_args *twea;
762
763 twea = aux;
764
765 if (pnp != NULL)
766 aprint_normal("block device at %s", pnp);
767 aprint_normal(" unit %d", twea->twea_unit);
768 return (UNCONF);
769 }
770
771 /*
772 * Interrupt service routine.
773 */
774 static int
775 twe_intr(void *arg)
776 {
777 struct twe_softc *sc;
778 u_int status;
779 int caught, rv;
780
781 sc = arg;
782 caught = 0;
783 status = twe_inl(sc, TWE_REG_STS);
784 twe_status_check(sc, status);
785
786 /* Host interrupts - purpose unknown. */
787 if ((status & TWE_STS_HOST_INTR) != 0) {
788 #ifdef DEBUG
789 printf("%s: host interrupt\n", device_xname(sc->sc_dev));
790 #endif
791 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
792 caught = 1;
793 }
794
795 /*
796 * Attention interrupts, signalled when a controller or child device
797 * state change has occurred.
798 */
799 if ((status & TWE_STS_ATTN_INTR) != 0) {
800 rv = twe_aen_get(sc, NULL);
801 if (rv != 0)
802 aprint_error_dev(sc->sc_dev,
803 "unable to retrieve AEN (%d)\n", rv);
804 else
805 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
806 caught = 1;
807 }
808
809 /*
810 * Command interrupts, signalled when the controller can accept more
811 * commands. We don't use this; instead, we try to submit commands
812 * when we receive them, and when other commands have completed.
813 * Mask it so we don't get another one.
814 */
815 if ((status & TWE_STS_CMD_INTR) != 0) {
816 #ifdef DEBUG
817 printf("%s: command interrupt\n", device_xname(sc->sc_dev));
818 #endif
819 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
820 caught = 1;
821 }
822
823 if ((status & TWE_STS_RESP_INTR) != 0) {
824 twe_poll(sc);
825 caught = 1;
826 }
827
828 return (caught);
829 }
830
831 /*
832 * Fetch an AEN. Even though this is really like parameter
833 * retrieval, we handle this specially, because we issue this
834 * AEN retrieval command from interrupt context, and thus
835 * reserve a CCB for it to avoid resource shortage.
836 *
837 * XXX There are still potential resource shortages we could
838 * XXX encounter. Consider pre-allocating all AEN-related
839 * XXX resources.
840 *
841 * MUST BE CALLED AT splbio()!
842 */
843 static int
844 twe_aen_get(struct twe_softc *sc, uint16_t *aenp)
845 {
846 struct twe_ccb *ccb;
847 struct twe_cmd *tc;
848 struct twe_param *tp;
849 int rv;
850
851 /*
852 * If we're already retrieving an AEN, just wait; another
853 * retrieval will be chained after the current one completes.
854 */
855 if (sc->sc_flags & TWEF_AEN) {
856 /*
857 * It is a fatal software programming error to attempt
858 * to fetch an AEN synchronously when an AEN fetch is
859 * already pending.
860 */
861 KASSERT(aenp == NULL);
862 return (0);
863 }
864
865 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
866 if (tp == NULL)
867 return (ENOMEM);
868
869 ccb = twe_ccb_alloc(sc,
870 TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
871 KASSERT(ccb != NULL);
872
873 ccb->ccb_data = tp;
874 ccb->ccb_datasize = TWE_SECTOR_SIZE;
875 ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL;
876 ccb->ccb_tx.tx_context = tp;
877 ccb->ccb_tx.tx_dv = sc->sc_dev;
878
879 tc = ccb->ccb_cmd;
880 tc->tc_size = 2;
881 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
882 tc->tc_unit = 0;
883 tc->tc_count = htole16(1);
884
885 /* Fill in the outbound parameter data. */
886 tp->tp_table_id = htole16(TWE_PARAM_AEN);
887 tp->tp_param_id = TWE_PARAM_AEN_UnitCode;
888 tp->tp_param_size = 2;
889
890 /* Map the transfer. */
891 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
892 twe_ccb_free(sc, ccb);
893 goto done;
894 }
895
896 /* Enqueue the command and wait. */
897 if (aenp != NULL) {
898 rv = twe_ccb_poll(sc, ccb, 5);
899 twe_ccb_unmap(sc, ccb);
900 twe_ccb_free(sc, ccb);
901 if (rv == 0)
902 *aenp = le16toh(*(uint16_t *)tp->tp_data);
903 free(tp, M_DEVBUF);
904 } else {
905 sc->sc_flags |= TWEF_AEN;
906 twe_ccb_enqueue(sc, ccb);
907 rv = 0;
908 }
909
910 done:
911 return (rv);
912 }
913
914 /*
915 * Handle an AEN returned by the controller.
916 * MUST BE CALLED AT splbio()!
917 */
918 static void
919 twe_aen_handler(struct twe_ccb *ccb, int error)
920 {
921 struct twe_softc *sc;
922 struct twe_param *tp;
923 uint16_t aen;
924 int rv;
925
926 sc = device_private(ccb->ccb_tx.tx_dv);
927 tp = ccb->ccb_tx.tx_context;
928 twe_ccb_unmap(sc, ccb);
929
930 sc->sc_flags &= ~TWEF_AEN;
931
932 if (error) {
933 aprint_error_dev(sc->sc_dev, "error retrieving AEN\n");
934 aen = TWE_AEN_QUEUE_EMPTY;
935 } else
936 aen = le16toh(*(u_int16_t *)tp->tp_data);
937 free(tp, M_DEVBUF);
938 twe_ccb_free(sc, ccb);
939
940 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
941 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
942 return;
943 }
944
945 twe_aen_enqueue(sc, aen, 0);
946
947 /*
948 * Chain another retrieval in case interrupts have been
949 * coalesced.
950 */
951 rv = twe_aen_get(sc, NULL);
952 if (rv != 0)
953 aprint_error_dev(sc->sc_dev,
954 "unable to retrieve AEN (%d)\n", rv);
955 }
956
957 static void
958 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet)
959 {
960 const char *str, *msg;
961 int s, next, nextnext, level;
962
963 /*
964 * First report the AEN on the console. Maybe.
965 */
966 if (! quiet) {
967 str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen));
968 if (str == NULL) {
969 aprint_error_dev(sc->sc_dev,
970 "unknown AEN 0x%04x\n", aen);
971 } else {
972 msg = str + 3;
973 switch (str[1]) {
974 case 'E': level = LOG_EMERG; break;
975 case 'a': level = LOG_ALERT; break;
976 case 'c': level = LOG_CRIT; break;
977 case 'e': level = LOG_ERR; break;
978 case 'w': level = LOG_WARNING; break;
979 case 'n': level = LOG_NOTICE; break;
980 case 'i': level = LOG_INFO; break;
981 case 'd': level = LOG_DEBUG; break;
982 default:
983 /* Don't use syslog. */
984 level = -1;
985 }
986
987 if (level < 0) {
988 switch (str[0]) {
989 case 'u':
990 case 'p':
991 printf("%s: %s %d: %s\n",
992 device_xname(sc->sc_dev),
993 str[0] == 'u' ? "unit" : "port",
994 TWE_AEN_UNIT(aen), msg);
995 break;
996
997 default:
998 printf("%s: %s\n",
999 device_xname(sc->sc_dev), msg);
1000 }
1001 } else {
1002 switch (str[0]) {
1003 case 'u':
1004 case 'p':
1005 log(level, "%s: %s %d: %s\n",
1006 device_xname(sc->sc_dev),
1007 str[0] == 'u' ? "unit" : "port",
1008 TWE_AEN_UNIT(aen), msg);
1009 break;
1010
1011 default:
1012 log(level, "%s: %s\n",
1013 device_xname(sc->sc_dev), msg);
1014 }
1015 }
1016 }
1017 }
1018
1019 /* Now enqueue the AEN for mangement tools. */
1020 s = splbio();
1021
1022 next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH;
1023 nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH;
1024
1025 /*
1026 * If this is the last free slot, then queue up a "queue
1027 * full" message.
1028 */
1029 if (nextnext == sc->sc_aen_tail)
1030 aen = TWE_AEN_QUEUE_FULL;
1031
1032 if (next != sc->sc_aen_tail) {
1033 sc->sc_aen_queue[sc->sc_aen_head] = aen;
1034 sc->sc_aen_head = next;
1035 }
1036
1037 if (sc->sc_flags & TWEF_AENQ_WAIT) {
1038 sc->sc_flags &= ~TWEF_AENQ_WAIT;
1039 wakeup(&sc->sc_aen_queue);
1040 }
1041
1042 splx(s);
1043 }
1044
1045 /* NOTE: Must be called at splbio(). */
1046 static uint16_t
1047 twe_aen_dequeue(struct twe_softc *sc)
1048 {
1049 uint16_t aen;
1050
1051 if (sc->sc_aen_tail == sc->sc_aen_head)
1052 aen = TWE_AEN_QUEUE_EMPTY;
1053 else {
1054 aen = sc->sc_aen_queue[sc->sc_aen_tail];
1055 sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH;
1056 }
1057
1058 return (aen);
1059 }
1060
1061 /*
1062 * These are short-hand functions that execute TWE_OP_GET_PARAM to
1063 * fetch 1, 2, and 4 byte parameter values, respectively.
1064 */
1065 int
1066 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id,
1067 uint8_t *valp)
1068 {
1069 struct twe_param *tp;
1070 int rv;
1071
1072 rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp);
1073 if (rv != 0)
1074 return (rv);
1075 *valp = *(uint8_t *)tp->tp_data;
1076 free(tp, M_DEVBUF);
1077 return (0);
1078 }
1079
1080 int
1081 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id,
1082 uint16_t *valp)
1083 {
1084 struct twe_param *tp;
1085 int rv;
1086
1087 rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp);
1088 if (rv != 0)
1089 return (rv);
1090 *valp = le16toh(*(uint16_t *)tp->tp_data);
1091 free(tp, M_DEVBUF);
1092 return (0);
1093 }
1094
1095 int
1096 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id,
1097 uint32_t *valp)
1098 {
1099 struct twe_param *tp;
1100 int rv;
1101
1102 rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp);
1103 if (rv != 0)
1104 return (rv);
1105 *valp = le32toh(*(uint32_t *)tp->tp_data);
1106 free(tp, M_DEVBUF);
1107 return (0);
1108 }
1109
1110 /*
1111 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided,
1112 * it will be called with generated context when the command has completed.
1113 * If no callback is provided, the command will be executed synchronously
1114 * and a pointer to a buffer containing the data returned.
1115 *
1116 * The caller or callback is responsible for freeing the buffer.
1117 *
1118 * NOTE: We assume we can sleep here to wait for a CCB to become available.
1119 */
1120 int
1121 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
1122 void (*func)(struct twe_ccb *, int), struct twe_param **pbuf)
1123 {
1124 struct twe_ccb *ccb;
1125 struct twe_cmd *tc;
1126 struct twe_param *tp;
1127 int rv, s;
1128
1129 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1130 if (tp == NULL)
1131 return ENOMEM;
1132
1133 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1134 KASSERT(ccb != NULL);
1135
1136 ccb->ccb_data = tp;
1137 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1138 ccb->ccb_tx.tx_handler = func;
1139 ccb->ccb_tx.tx_context = tp;
1140 ccb->ccb_tx.tx_dv = sc->sc_dev;
1141
1142 tc = ccb->ccb_cmd;
1143 tc->tc_size = 2;
1144 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
1145 tc->tc_unit = 0;
1146 tc->tc_count = htole16(1);
1147
1148 /* Fill in the outbound parameter data. */
1149 tp->tp_table_id = htole16(table_id);
1150 tp->tp_param_id = param_id;
1151 tp->tp_param_size = size;
1152
1153 /* Map the transfer. */
1154 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1155 twe_ccb_free(sc, ccb);
1156 goto done;
1157 }
1158
1159 /* Submit the command and either wait or let the callback handle it. */
1160 if (func == NULL) {
1161 s = splbio();
1162 rv = twe_ccb_poll(sc, ccb, 5);
1163 twe_ccb_unmap(sc, ccb);
1164 twe_ccb_free(sc, ccb);
1165 splx(s);
1166 } else {
1167 #ifdef DEBUG
1168 if (pbuf != NULL)
1169 panic("both func and pbuf defined");
1170 #endif
1171 twe_ccb_enqueue(sc, ccb);
1172 return 0;
1173 }
1174
1175 done:
1176 if (pbuf == NULL || rv != 0)
1177 free(tp, M_DEVBUF);
1178 else if (pbuf != NULL && rv == 0)
1179 *pbuf = tp;
1180 return rv;
1181 }
1182
1183 /*
1184 * Execute a TWE_OP_SET_PARAM command.
1185 *
1186 * NOTE: We assume we can sleep here to wait for a CCB to become available.
1187 */
1188 static int
1189 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size,
1190 void *sbuf)
1191 {
1192 struct twe_ccb *ccb;
1193 struct twe_cmd *tc;
1194 struct twe_param *tp;
1195 int rv, s;
1196
1197 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1198 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1199 ccb->ccb_data = tp;
1200 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1201 ccb->ccb_tx.tx_handler = 0;
1202 ccb->ccb_tx.tx_context = tp;
1203 ccb->ccb_tx.tx_dv = sc->sc_dev;
1204
1205 tc = ccb->ccb_cmd;
1206 tc->tc_size = 2;
1207 tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5);
1208 tc->tc_unit = 0;
1209 tc->tc_count = htole16(1);
1210
1211 /* Fill in the outbound parameter data. */
1212 tp->tp_table_id = htole16(table_id);
1213 tp->tp_param_id = param_id;
1214 tp->tp_param_size = size;
1215 memcpy(tp->tp_data, sbuf, size);
1216
1217 /* Map the transfer. */
1218 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1219 twe_ccb_free(sc, ccb);
1220 goto done;
1221 }
1222
1223 /* Submit the command and wait. */
1224 s = splbio();
1225 rv = twe_ccb_poll(sc, ccb, 5);
1226 twe_ccb_unmap(sc, ccb);
1227 twe_ccb_free(sc, ccb);
1228 splx(s);
1229 done:
1230 free(tp, M_DEVBUF);
1231 return (rv);
1232 }
1233
1234 /*
1235 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error.
1236 * Must be called with interrupts blocked.
1237 */
1238 static int
1239 twe_init_connection(struct twe_softc *sc)
1240 {
1241 struct twe_ccb *ccb;
1242 struct twe_cmd *tc;
1243 int rv;
1244
1245 if ((ccb = twe_ccb_alloc(sc, 0)) == NULL)
1246 return (EAGAIN);
1247
1248 /* Build the command. */
1249 tc = ccb->ccb_cmd;
1250 tc->tc_size = 3;
1251 tc->tc_opcode = TWE_OP_INIT_CONNECTION;
1252 tc->tc_unit = 0;
1253 tc->tc_count = htole16(TWE_MAX_CMDS);
1254 tc->tc_args.init_connection.response_queue_pointer = 0;
1255
1256 /* Submit the command for immediate execution. */
1257 rv = twe_ccb_poll(sc, ccb, 5);
1258 twe_ccb_free(sc, ccb);
1259 return (rv);
1260 }
1261
1262 /*
1263 * Poll the controller for completed commands. Must be called with
1264 * interrupts blocked.
1265 */
1266 static void
1267 twe_poll(struct twe_softc *sc)
1268 {
1269 struct twe_ccb *ccb;
1270 int found;
1271 u_int status, cmdid;
1272
1273 found = 0;
1274
1275 for (;;) {
1276 status = twe_inl(sc, TWE_REG_STS);
1277 twe_status_check(sc, status);
1278
1279 if ((status & TWE_STS_RESP_QUEUE_EMPTY))
1280 break;
1281
1282 found = 1;
1283 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE);
1284 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
1285 if (cmdid >= TWE_MAX_QUEUECNT) {
1286 aprint_error_dev(sc->sc_dev, "bad cmdid %d\n", cmdid);
1287 continue;
1288 }
1289
1290 ccb = sc->sc_ccbs + cmdid;
1291 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
1292 printf("%s: CCB for cmdid %d not active\n",
1293 device_xname(sc->sc_dev), cmdid);
1294 continue;
1295 }
1296 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
1297
1298 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1299 (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1300 sizeof(struct twe_cmd),
1301 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1302
1303 /* Pass notification to upper layers. */
1304 if (ccb->ccb_tx.tx_handler != NULL)
1305 (*ccb->ccb_tx.tx_handler)(ccb,
1306 ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
1307 }
1308
1309 /* If any commands have completed, run the software queue. */
1310 if (found)
1311 twe_ccb_enqueue(sc, NULL);
1312 }
1313
1314 /*
1315 * Wait for `status' to be set in the controller status register. Return
1316 * zero if found, non-zero if the operation timed out.
1317 */
1318 static int
1319 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
1320 {
1321
1322 for (timo *= 10; timo != 0; timo--) {
1323 if ((twe_inl(sc, TWE_REG_STS) & status) == status)
1324 break;
1325 delay(100000);
1326 }
1327
1328 return (timo == 0);
1329 }
1330
1331 /*
1332 * Clear a PCI parity error.
1333 */
1334 static void
1335 twe_clear_pci_parity_error(struct twe_softc *sc)
1336 {
1337 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0,
1338 TWE_CTL_CLEAR_PARITY_ERROR);
1339
1340 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
1341 }
1342
1343
1344 /*
1345 * Clear a PCI abort.
1346 */
1347 static void
1348 twe_clear_pci_abort(struct twe_softc *sc)
1349 {
1350 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT);
1351
1352 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
1353 }
1354
1355 /*
1356 * Complain if the status bits aren't what we expect.
1357 */
1358 static int
1359 twe_status_check(struct twe_softc *sc, u_int status)
1360 {
1361 int rv;
1362
1363 rv = 0;
1364
1365 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
1366 aprint_error_dev(sc->sc_dev, "missing status bits: 0x%08x\n",
1367 status & ~TWE_STS_EXPECTED_BITS);
1368 rv = -1;
1369 }
1370
1371 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
1372 aprint_error_dev(sc->sc_dev, "unexpected status bits: 0x%08x\n",
1373 status & TWE_STS_UNEXPECTED_BITS);
1374 rv = -1;
1375 if (status & TWE_STS_PCI_PARITY_ERROR) {
1376 aprint_error_dev(sc->sc_dev, "PCI parity error: Reseat"
1377 " card, move card or buggy device present.\n");
1378 twe_clear_pci_parity_error(sc);
1379 }
1380 if (status & TWE_STS_PCI_ABORT) {
1381 aprint_error_dev(sc->sc_dev, "PCI abort, clearing.\n");
1382 twe_clear_pci_abort(sc);
1383 }
1384 }
1385
1386 return (rv);
1387 }
1388
1389 /*
1390 * Allocate and initialise a CCB.
1391 */
1392 static inline void
1393 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags)
1394 {
1395 struct twe_cmd *tc;
1396
1397 ccb->ccb_tx.tx_handler = NULL;
1398 ccb->ccb_flags = flags;
1399 tc = ccb->ccb_cmd;
1400 tc->tc_status = 0;
1401 tc->tc_flags = 0;
1402 tc->tc_cmdid = ccb->ccb_cmdid;
1403 }
1404
1405 struct twe_ccb *
1406 twe_ccb_alloc(struct twe_softc *sc, int flags)
1407 {
1408 struct twe_ccb *ccb;
1409 int s;
1410
1411 s = splbio();
1412 if (__predict_false((flags & TWE_CCB_AEN) != 0)) {
1413 /* Use the reserved CCB. */
1414 ccb = sc->sc_ccbs;
1415 } else {
1416 /* Allocate a CCB and command block. */
1417 if (__predict_false((ccb =
1418 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1419 splx(s);
1420 return (NULL);
1421 }
1422 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1423 }
1424 #ifdef DIAGNOSTIC
1425 if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0)
1426 panic("twe_ccb_alloc: got reserved CCB for non-AEN");
1427 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1428 panic("twe_ccb_alloc: CCB %ld already allocated",
1429 (long)(ccb - sc->sc_ccbs));
1430 flags |= TWE_CCB_ALLOCED;
1431 #endif
1432 splx(s);
1433
1434 twe_ccb_init(sc, ccb, flags);
1435 return (ccb);
1436 }
1437
1438 struct twe_ccb *
1439 twe_ccb_alloc_wait(struct twe_softc *sc, int flags)
1440 {
1441 struct twe_ccb *ccb;
1442 int s;
1443
1444 KASSERT((flags & TWE_CCB_AEN) == 0);
1445
1446 s = splbio();
1447 while (__predict_false((ccb =
1448 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1449 sc->sc_flags |= TWEF_WAIT_CCB;
1450 (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0);
1451 }
1452 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1453 #ifdef DIAGNOSTIC
1454 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1455 panic("twe_ccb_alloc_wait: CCB %ld already allocated",
1456 (long)(ccb - sc->sc_ccbs));
1457 flags |= TWE_CCB_ALLOCED;
1458 #endif
1459 splx(s);
1460
1461 twe_ccb_init(sc, ccb, flags);
1462 return (ccb);
1463 }
1464
1465 /*
1466 * Free a CCB.
1467 */
1468 void
1469 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
1470 {
1471 int s;
1472
1473 s = splbio();
1474 if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) {
1475 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
1476 if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) {
1477 sc->sc_flags &= ~TWEF_WAIT_CCB;
1478 wakeup(&sc->sc_ccb_freelist);
1479 }
1480 }
1481 ccb->ccb_flags = 0;
1482 splx(s);
1483 }
1484
1485 /*
1486 * Map the specified CCB's command block and data buffer (if any) into
1487 * controller visible space. Perform DMA synchronisation.
1488 */
1489 int
1490 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
1491 {
1492 struct twe_cmd *tc;
1493 int flags, nsegs, i, s, rv;
1494 void *data;
1495
1496 /*
1497 * The data as a whole must be 512-byte aligned.
1498 */
1499 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
1500 s = splvm();
1501 /* XXX */
1502 rv = uvm_km_kmem_alloc(kmem_va_arena,
1503 ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT),
1504 (vmem_addr_t *)&ccb->ccb_abuf);
1505 splx(s);
1506 data = (void *)ccb->ccb_abuf;
1507 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1508 memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
1509 } else {
1510 ccb->ccb_abuf = (vaddr_t)0;
1511 data = ccb->ccb_data;
1512 }
1513
1514 /*
1515 * Map the data buffer into bus space and build the S/G list.
1516 */
1517 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
1518 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1519 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
1520 BUS_DMA_READ : BUS_DMA_WRITE));
1521 if (rv != 0) {
1522 if (ccb->ccb_abuf != (vaddr_t)0) {
1523 s = splvm();
1524 /* XXX */
1525 uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1526 ccb->ccb_datasize);
1527 splx(s);
1528 }
1529 return (rv);
1530 }
1531
1532 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
1533 tc = ccb->ccb_cmd;
1534 tc->tc_size += 2 * nsegs;
1535
1536 /* The location of the S/G list is dependent upon command type. */
1537 switch (tc->tc_opcode >> 5) {
1538 case 2:
1539 for (i = 0; i < nsegs; i++) {
1540 tc->tc_args.param.sgl[i].tsg_address =
1541 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1542 tc->tc_args.param.sgl[i].tsg_length =
1543 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1544 }
1545 /* XXX Needed? */
1546 for (; i < TWE_SG_SIZE; i++) {
1547 tc->tc_args.param.sgl[i].tsg_address = 0;
1548 tc->tc_args.param.sgl[i].tsg_length = 0;
1549 }
1550 break;
1551 case 3:
1552 for (i = 0; i < nsegs; i++) {
1553 tc->tc_args.io.sgl[i].tsg_address =
1554 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1555 tc->tc_args.io.sgl[i].tsg_length =
1556 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1557 }
1558 /* XXX Needed? */
1559 for (; i < TWE_SG_SIZE; i++) {
1560 tc->tc_args.io.sgl[i].tsg_address = 0;
1561 tc->tc_args.io.sgl[i].tsg_length = 0;
1562 }
1563 break;
1564 default:
1565 /*
1566 * In all likelihood, this is a command passed from
1567 * management tools in userspace where no S/G list is
1568 * necessary because no data is being passed.
1569 */
1570 break;
1571 }
1572
1573 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1574 flags = BUS_DMASYNC_PREREAD;
1575 else
1576 flags = 0;
1577 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1578 flags |= BUS_DMASYNC_PREWRITE;
1579
1580 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1581 ccb->ccb_datasize, flags);
1582 return (0);
1583 }
1584
1585 /*
1586 * Unmap the specified CCB's command block and data buffer (if any) and
1587 * perform DMA synchronisation.
1588 */
1589 void
1590 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
1591 {
1592 int flags, s;
1593
1594 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1595 flags = BUS_DMASYNC_POSTREAD;
1596 else
1597 flags = 0;
1598 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1599 flags |= BUS_DMASYNC_POSTWRITE;
1600
1601 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1602 ccb->ccb_datasize, flags);
1603 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
1604
1605 if (ccb->ccb_abuf != (vaddr_t)0) {
1606 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1607 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
1608 ccb->ccb_datasize);
1609 s = splvm();
1610 /* XXX */
1611 uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1612 ccb->ccb_datasize);
1613 splx(s);
1614 }
1615 }
1616
1617 /*
1618 * Submit a command to the controller and poll on completion. Return
1619 * non-zero on timeout (but don't check status, as some command types don't
1620 * return status). Must be called with interrupts blocked.
1621 */
1622 int
1623 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
1624 {
1625 int rv;
1626
1627 if ((rv = twe_ccb_submit(sc, ccb)) != 0)
1628 return (rv);
1629
1630 for (timo *= 1000; timo != 0; timo--) {
1631 twe_poll(sc);
1632 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
1633 break;
1634 DELAY(100);
1635 }
1636
1637 return (timo == 0);
1638 }
1639
1640 /*
1641 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1642 * the order that they were enqueued and try to submit their command blocks
1643 * to the controller for execution.
1644 */
1645 void
1646 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
1647 {
1648 int s;
1649
1650 s = splbio();
1651
1652 if (ccb != NULL)
1653 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
1654
1655 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
1656 if (twe_ccb_submit(sc, ccb))
1657 break;
1658 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq);
1659 }
1660
1661 splx(s);
1662 }
1663
1664 /*
1665 * Submit the command block associated with the specified CCB to the
1666 * controller for execution. Must be called with interrupts blocked.
1667 */
1668 int
1669 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
1670 {
1671 bus_addr_t pa;
1672 int rv;
1673 u_int status;
1674
1675 /* Check to see if we can post a command. */
1676 status = twe_inl(sc, TWE_REG_STS);
1677 twe_status_check(sc, status);
1678
1679 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
1680 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1681 (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1682 sizeof(struct twe_cmd),
1683 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1684 #ifdef DIAGNOSTIC
1685 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0)
1686 panic("%s: CCB %ld not ALLOCED\n",
1687 device_xname(sc->sc_dev), (long)(ccb - sc->sc_ccbs));
1688 #endif
1689 ccb->ccb_flags |= TWE_CCB_ACTIVE;
1690 pa = sc->sc_cmds_paddr +
1691 ccb->ccb_cmdid * sizeof(struct twe_cmd);
1692 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1693 rv = 0;
1694 } else
1695 rv = EBUSY;
1696
1697 return (rv);
1698 }
1699
1700
1701 /*
1702 * Accept an open operation on the control device.
1703 */
1704 static int
1705 tweopen(dev_t dev, int flag, int mode, struct lwp *l)
1706 {
1707 struct twe_softc *twe;
1708
1709 if ((twe = device_lookup_private(&twe_cd, minor(dev))) == NULL)
1710 return (ENXIO);
1711 if ((twe->sc_flags & TWEF_OPEN) != 0)
1712 return (EBUSY);
1713
1714 twe->sc_flags |= TWEF_OPEN;
1715 return (0);
1716 }
1717
1718 /*
1719 * Accept the last close on the control device.
1720 */
1721 static int
1722 tweclose(dev_t dev, int flag, int mode,
1723 struct lwp *l)
1724 {
1725 struct twe_softc *twe;
1726
1727 twe = device_lookup_private(&twe_cd, minor(dev));
1728 twe->sc_flags &= ~TWEF_OPEN;
1729 return (0);
1730 }
1731
1732 void
1733 twe_ccb_wait_handler(struct twe_ccb *ccb, int error)
1734 {
1735
1736 /* Just wake up the sleeper. */
1737 wakeup(ccb);
1738 }
1739
1740 /*
1741 * Handle control operations.
1742 */
1743 static int
1744 tweioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1745 {
1746 struct twe_softc *twe;
1747 struct twe_ccb *ccb;
1748 struct twe_param *param;
1749 struct twe_usercommand *tu;
1750 struct twe_paramcommand *tp;
1751 struct twe_drivecommand *td;
1752 void *pdata = NULL;
1753 int s, error = 0;
1754 u_int8_t cmdid;
1755
1756 twe = device_lookup_private(&twe_cd, minor(dev));
1757 tu = (struct twe_usercommand *)data;
1758 tp = (struct twe_paramcommand *)data;
1759 td = (struct twe_drivecommand *)data;
1760
1761 /* This is intended to be compatible with the FreeBSD interface. */
1762 switch (cmd) {
1763 case TWEIO_COMMAND:
1764 error = kauth_authorize_device_passthru(l->l_cred, dev,
1765 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1766 if (error)
1767 return (error);
1768
1769 /* XXX mutex */
1770 if (tu->tu_size > 0) {
1771 /*
1772 * XXX Handle > TWE_SECTOR_SIZE? Let's see if
1773 * it's really necessary, first.
1774 */
1775 if (tu->tu_size > TWE_SECTOR_SIZE) {
1776 #ifdef TWE_DEBUG
1777 printf("%s: TWEIO_COMMAND: tu_size = %zu\n",
1778 device_xname(twe->sc_dev), tu->tu_size);
1779 #endif
1780 return EINVAL;
1781 }
1782 pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1783 error = copyin(tu->tu_data, pdata, tu->tu_size);
1784 if (error != 0)
1785 goto done;
1786 ccb = twe_ccb_alloc_wait(twe,
1787 TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1788 KASSERT(ccb != NULL);
1789 ccb->ccb_data = pdata;
1790 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1791 } else {
1792 ccb = twe_ccb_alloc_wait(twe, 0);
1793 KASSERT(ccb != NULL);
1794 }
1795
1796 ccb->ccb_tx.tx_handler = twe_ccb_wait_handler;
1797 ccb->ccb_tx.tx_context = NULL;
1798 ccb->ccb_tx.tx_dv = twe->sc_dev;
1799
1800 cmdid = ccb->ccb_cmdid;
1801 memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd));
1802 ccb->ccb_cmd->tc_cmdid = cmdid;
1803
1804 /* Map the transfer. */
1805 if ((error = twe_ccb_map(twe, ccb)) != 0) {
1806 twe_ccb_free(twe, ccb);
1807 goto done;
1808 }
1809
1810 /* Submit the command and wait up to 1 minute. */
1811 error = 0;
1812 twe_ccb_enqueue(twe, ccb);
1813 s = splbio();
1814 while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0)
1815 if ((error = tsleep(ccb, PRIBIO, "tweioctl",
1816 60 * hz)) != 0)
1817 break;
1818 splx(s);
1819
1820 /* Copy the command back to the ioctl argument. */
1821 memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd));
1822 #ifdef TWE_DEBUG
1823 printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, "
1824 "tc_status = 0x%02x\n", device_xname(twe->sc_dev),
1825 tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status);
1826 #endif
1827
1828 s = splbio();
1829 twe_ccb_free(twe, ccb);
1830 splx(s);
1831
1832 if (tu->tu_size > 0)
1833 error = copyout(pdata, tu->tu_data, tu->tu_size);
1834 goto done;
1835
1836 case TWEIO_STATS:
1837 return (ENOENT);
1838
1839 case TWEIO_AEN_POLL:
1840 s = splbio();
1841 *(u_int *)data = twe_aen_dequeue(twe);
1842 splx(s);
1843 return (0);
1844
1845 case TWEIO_AEN_WAIT:
1846 s = splbio();
1847 while ((*(u_int *)data =
1848 twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) {
1849 twe->sc_flags |= TWEF_AENQ_WAIT;
1850 error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH,
1851 "tweaen", 0);
1852 if (error == EINTR) {
1853 splx(s);
1854 return (error);
1855 }
1856 }
1857 splx(s);
1858 return (0);
1859
1860 case TWEIO_GET_PARAM:
1861 error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id,
1862 tp->tp_size, 0, ¶m);
1863 if (error != 0)
1864 return (error);
1865 if (param->tp_param_size > tp->tp_size) {
1866 error = EFAULT;
1867 goto done;
1868 }
1869 error = copyout(param->tp_data, tp->tp_data,
1870 param->tp_param_size);
1871 free(param, M_DEVBUF);
1872 goto done;
1873
1874 case TWEIO_SET_PARAM:
1875 pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK);
1876 if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0)
1877 goto done;
1878 error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id,
1879 tp->tp_size, pdata);
1880 goto done;
1881
1882 case TWEIO_RESET:
1883 s = splbio();
1884 twe_reset(twe);
1885 splx(s);
1886 return (0);
1887
1888 case TWEIO_ADD_UNIT:
1889 /* XXX mutex */
1890 return (twe_add_unit(twe, td->td_unit));
1891
1892 case TWEIO_DEL_UNIT:
1893 /* XXX mutex */
1894 return (twe_del_unit(twe, td->td_unit));
1895
1896 default:
1897 return EINVAL;
1898 }
1899 done:
1900 if (pdata)
1901 free(pdata, M_DEVBUF);
1902 return error;
1903 }
1904
1905 const struct cdevsw twe_cdevsw = {
1906 .d_open = tweopen,
1907 .d_close = tweclose,
1908 .d_read = noread,
1909 .d_write = nowrite,
1910 .d_ioctl = tweioctl,
1911 .d_stop = nostop,
1912 .d_tty = notty,
1913 .d_poll = nopoll,
1914 .d_mmap = nommap,
1915 .d_kqfilter = nokqfilter,
1916 .d_discard = nodiscard,
1917 .d_flag = D_OTHER
1918 };
1919
1920 /*
1921 * Print some information about the controller
1922 */
1923 static void
1924 twe_describe_controller(struct twe_softc *sc)
1925 {
1926 struct twe_param *p[6];
1927 int i, rv = 0;
1928 uint32_t dsize;
1929 uint8_t ports;
1930
1931 ports = 0;
1932
1933 /* get the port count */
1934 rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER,
1935 TWE_PARAM_CONTROLLER_PortCount, &ports);
1936
1937 /* get version strings */
1938 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon,
1939 16, NULL, &p[0]);
1940 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW,
1941 16, NULL, &p[1]);
1942 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS,
1943 16, NULL, &p[2]);
1944 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB,
1945 8, NULL, &p[3]);
1946 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA,
1947 8, NULL, &p[4]);
1948 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI,
1949 8, NULL, &p[5]);
1950
1951 if (rv) {
1952 /* some error occurred */
1953 aprint_error_dev(sc->sc_dev,
1954 "failed to fetch version information\n");
1955 return;
1956 }
1957
1958 aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n",
1959 ports, p[1]->tp_data, p[2]->tp_data);
1960
1961 aprint_verbose_dev(sc->sc_dev,
1962 "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
1963 p[0]->tp_data, p[3]->tp_data,
1964 p[4]->tp_data, p[5]->tp_data);
1965
1966 free(p[0], M_DEVBUF);
1967 free(p[1], M_DEVBUF);
1968 free(p[2], M_DEVBUF);
1969 free(p[3], M_DEVBUF);
1970 free(p[4], M_DEVBUF);
1971 free(p[5], M_DEVBUF);
1972
1973 rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY,
1974 TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]);
1975 if (rv) {
1976 aprint_error_dev(sc->sc_dev,
1977 "failed to get drive status summary\n");
1978 return;
1979 }
1980 for (i = 0; i < ports; i++) {
1981 if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present)
1982 continue;
1983 rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i,
1984 TWE_PARAM_DRIVEINFO_Size, &dsize);
1985 if (rv) {
1986 aprint_error_dev(sc->sc_dev,
1987 "unable to get drive size for port %d\n", i);
1988 continue;
1989 }
1990 rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i,
1991 TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]);
1992 if (rv) {
1993 aprint_error_dev(sc->sc_dev,
1994 "unable to get drive model for port %d\n", i);
1995 continue;
1996 }
1997 aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n",
1998 i, p[1]->tp_data, dsize / 2048);
1999 free(p[1], M_DEVBUF);
2000 }
2001 free(p[0], M_DEVBUF);
2002 }
2003
2004 MODULE(MODULE_CLASS_DRIVER, twe, "pci");
2005
2006 #ifdef _MODULE
2007 #include "ioconf.c"
2008 #endif
2009
2010 static int
2011 twe_modcmd(modcmd_t cmd, void *opaque)
2012 {
2013 int error = 0;
2014
2015 #ifdef _MODULE
2016 switch (cmd) {
2017 case MODULE_CMD_INIT:
2018 error = config_init_component(cfdriver_ioconf_twe,
2019 cfattach_ioconf_twe, cfdata_ioconf_twe);
2020 break;
2021 case MODULE_CMD_FINI:
2022 error = config_fini_component(cfdriver_ioconf_twe,
2023 cfattach_ioconf_twe, cfdata_ioconf_twe);
2024 break;
2025 default:
2026 error = ENOTTY;
2027 break;
2028 }
2029 #endif
2030
2031 return error;
2032 }
2033