twe.c revision 1.55 1 /* $NetBSD: twe.c,v 1.55 2004/04/15 02:03:03 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66 */
67
68 /*
69 * Driver for the 3ware Escalade family of RAID controllers.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.55 2004/04/15 02:03:03 thorpej Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/endian.h>
83 #include <sys/malloc.h>
84 #include <sys/conf.h>
85 #include <sys/disk.h>
86 #include <sys/syslog.h>
87
88 #include <uvm/uvm_extern.h>
89
90 #include <machine/bswap.h>
91 #include <machine/bus.h>
92
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcidevs.h>
96 #include <dev/pci/twereg.h>
97 #include <dev/pci/twevar.h>
98 #include <dev/pci/tweio.h>
99
100 #define PCI_CBIO 0x10
101
102 static int twe_aen_get(struct twe_softc *, uint16_t *);
103 static void twe_aen_handler(struct twe_ccb *, int);
104 static void twe_aen_enqueue(struct twe_softc *sc, uint16_t, int);
105 static uint16_t twe_aen_dequeue(struct twe_softc *);
106
107 static void twe_attach(struct device *, struct device *, void *);
108 static int twe_init_connection(struct twe_softc *);
109 static int twe_intr(void *);
110 static int twe_match(struct device *, struct cfdata *, void *);
111 static int twe_param_set(struct twe_softc *, int, int, size_t, void *);
112 static void twe_poll(struct twe_softc *);
113 static int twe_print(void *, const char *);
114 static int twe_reset(struct twe_softc *);
115 static int twe_submatch(struct device *, struct cfdata *, void *);
116 static int twe_status_check(struct twe_softc *, u_int);
117 static int twe_status_wait(struct twe_softc *, u_int, int);
118 static void twe_describe_controller(struct twe_softc *);
119
120 static int twe_add_unit(struct twe_softc *, int);
121 static int twe_del_unit(struct twe_softc *, int);
122
123 static inline u_int32_t twe_inl(struct twe_softc *, int);
124 static inline void twe_outl(struct twe_softc *, int, u_int32_t);
125
126 dev_type_open(tweopen);
127 dev_type_close(tweclose);
128 dev_type_ioctl(tweioctl);
129
130 const struct cdevsw twe_cdevsw = {
131 tweopen, tweclose, noread, nowrite, tweioctl,
132 nostop, notty, nopoll, nommap,
133 };
134
135 extern struct cfdriver twe_cd;
136
137 CFATTACH_DECL(twe, sizeof(struct twe_softc),
138 twe_match, twe_attach, NULL, NULL);
139
140 /*
141 * Tables to convert numeric codes to strings.
142 */
143 const struct twe_code_table twe_table_status[] = {
144 { 0x00, "successful completion" },
145
146 /* info */
147 { 0x42, "command in progress" },
148 { 0x6c, "retrying interface CRC error from UDMA command" },
149
150 /* warning */
151 { 0x81, "redundant/inconsequential request ignored" },
152 { 0x8e, "failed to write zeroes to LBA 0" },
153 { 0x8f, "failed to profile TwinStor zones" },
154
155 /* fatal */
156 { 0xc1, "aborted due to system command or reconfiguration" },
157 { 0xc4, "aborted" },
158 { 0xc5, "access error" },
159 { 0xc6, "access violation" },
160 { 0xc7, "device failure" }, /* high byte may be port # */
161 { 0xc8, "controller error" },
162 { 0xc9, "timed out" },
163 { 0xcb, "invalid unit number" },
164 { 0xcf, "unit not available" },
165 { 0xd2, "undefined opcode" },
166 { 0xdb, "request incompatible with unit" },
167 { 0xdc, "invalid request" },
168 { 0xff, "firmware error, reset requested" },
169
170 { 0, NULL }
171 };
172
173 const struct twe_code_table twe_table_unitstate[] = {
174 { TWE_PARAM_UNITSTATUS_Normal, "Normal" },
175 { TWE_PARAM_UNITSTATUS_Initialising, "Initializing" },
176 { TWE_PARAM_UNITSTATUS_Degraded, "Degraded" },
177 { TWE_PARAM_UNITSTATUS_Rebuilding, "Rebuilding" },
178 { TWE_PARAM_UNITSTATUS_Verifying, "Verifying" },
179 { TWE_PARAM_UNITSTATUS_Corrupt, "Corrupt" },
180 { TWE_PARAM_UNITSTATUS_Missing, "Missing" },
181
182 { 0, NULL }
183 };
184
185 const struct twe_code_table twe_table_unittype[] = {
186 /* array descriptor configuration */
187 { TWE_AD_CONFIG_RAID0, "RAID0" },
188 { TWE_AD_CONFIG_RAID1, "RAID1" },
189 { TWE_AD_CONFIG_TwinStor, "TwinStor" },
190 { TWE_AD_CONFIG_RAID5, "RAID5" },
191 { TWE_AD_CONFIG_RAID10, "RAID10" },
192
193 { 0, NULL }
194 };
195
196 const struct twe_code_table twe_table_stripedepth[] = {
197 { TWE_AD_STRIPE_4k, "4K" },
198 { TWE_AD_STRIPE_8k, "8K" },
199 { TWE_AD_STRIPE_16k, "16K" },
200 { TWE_AD_STRIPE_32k, "32K" },
201 { TWE_AD_STRIPE_64k, "64K" },
202
203 { 0, NULL }
204 };
205
206 /*
207 * Asynchronous event notification messages are qualified:
208 * a - not unit/port specific
209 * u - unit specific
210 * p - port specific
211 *
212 * They are further qualified with a severity:
213 * E - LOG_EMERG
214 * a - LOG_ALERT
215 * c - LOG_CRIT
216 * e - LOG_ERR
217 * w - LOG_WARNING
218 * n - LOG_NOTICE
219 * i - LOG_INFO
220 * d - LOG_DEBUG
221 * blank - just use printf
222 */
223 const struct twe_code_table twe_table_aen[] = {
224 { 0x00, "a queue empty" },
225 { 0x01, "a soft reset" },
226 { 0x02, "uc degraded mode" },
227 { 0x03, "aa controller error" },
228 { 0x04, "uE rebuild fail" },
229 { 0x05, "un rebuild done" },
230 { 0x06, "ue incomplete unit" },
231 { 0x07, "un initialization done" },
232 { 0x08, "uw unclean shutdown detected" },
233 { 0x09, "pe drive timeout" },
234 { 0x0a, "pc drive error" },
235 { 0x0b, "un rebuild started" },
236 { 0x0c, "un initialization started" },
237 { 0x0d, "ui logical unit deleted" },
238 { 0x0f, "pc SMART threshold exceeded" },
239 { 0x15, "a table undefined" }, /* XXX: Not in FreeBSD's table */
240 { 0x21, "pe ATA UDMA downgrade" },
241 { 0x22, "pi ATA UDMA upgrade" },
242 { 0x23, "pw sector repair occurred" },
243 { 0x24, "aa SBUF integrity check failure" },
244 { 0x25, "pa lost cached write" },
245 { 0x26, "pa drive ECC error detected" },
246 { 0x27, "pe DCB checksum error" },
247 { 0x28, "pn DCB unsupported version" },
248 { 0x29, "ui verify started" },
249 { 0x2a, "ua verify failed" },
250 { 0x2b, "ui verify complete" },
251 { 0x2c, "pw overwrote bad sector during rebuild" },
252 { 0x2d, "pa encountered bad sector during rebuild" },
253 { 0x2e, "pe replacement drive too small" },
254 { 0x2f, "ue array not previously initialized" },
255 { 0x30, "p drive not supported" },
256 { 0xff, "a aen queue full" },
257
258 { 0, NULL },
259 };
260
261 const char *
262 twe_describe_code(const struct twe_code_table *table, uint32_t code)
263 {
264
265 for (; table->string != NULL; table++) {
266 if (table->code == code)
267 return (table->string);
268 }
269 return (NULL);
270 }
271
272 static inline u_int32_t
273 twe_inl(struct twe_softc *sc, int off)
274 {
275
276 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
277 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
278 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
279 }
280
281 static inline void
282 twe_outl(struct twe_softc *sc, int off, u_int32_t val)
283 {
284
285 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
286 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
287 BUS_SPACE_BARRIER_WRITE);
288 }
289
290 /*
291 * Match a supported board.
292 */
293 static int
294 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
295 {
296 struct pci_attach_args *pa;
297
298 pa = aux;
299
300 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
301 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
302 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
303 }
304
305 /*
306 * Attach a supported board.
307 *
308 * XXX This doesn't fail gracefully.
309 */
310 static void
311 twe_attach(struct device *parent, struct device *self, void *aux)
312 {
313 struct pci_attach_args *pa;
314 struct twe_softc *sc;
315 pci_chipset_tag_t pc;
316 pci_intr_handle_t ih;
317 pcireg_t csr;
318 const char *intrstr;
319 int s, size, i, rv, rseg;
320 size_t max_segs, max_xfer;
321 bus_dma_segment_t seg;
322 struct twe_cmd *tc;
323 struct twe_ccb *ccb;
324
325 sc = (struct twe_softc *)self;
326 pa = aux;
327 pc = pa->pa_pc;
328 sc->sc_dmat = pa->pa_dmat;
329 SIMPLEQ_INIT(&sc->sc_ccb_queue);
330 SLIST_INIT(&sc->sc_ccb_freelist);
331
332 aprint_naive(": RAID controller\n");
333 aprint_normal(": 3ware Escalade\n");
334
335 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT);
336 if (ccb == NULL) {
337 aprint_error("%s: unable to allocate memory for ccbs\n",
338 sc->sc_dv.dv_xname);
339 return;
340 }
341
342 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
343 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
344 aprint_error("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
345 return;
346 }
347
348 /* Enable the device. */
349 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
350 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
351 csr | PCI_COMMAND_MASTER_ENABLE);
352
353 /* Map and establish the interrupt. */
354 if (pci_intr_map(pa, &ih)) {
355 aprint_error("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
356 return;
357 }
358
359 intrstr = pci_intr_string(pc, ih);
360 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
361 if (sc->sc_ih == NULL) {
362 aprint_error("%s: can't establish interrupt%s%s\n",
363 sc->sc_dv.dv_xname,
364 (intrstr) ? " at " : "",
365 (intrstr) ? intrstr : "");
366 return;
367 }
368
369 if (intrstr != NULL)
370 aprint_normal("%s: interrupting at %s\n",
371 sc->sc_dv.dv_xname, intrstr);
372
373 /*
374 * Allocate and initialise the command blocks and CCBs.
375 */
376 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
377
378 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
379 &rseg, BUS_DMA_NOWAIT)) != 0) {
380 aprint_error("%s: unable to allocate commands, rv = %d\n",
381 sc->sc_dv.dv_xname, rv);
382 return;
383 }
384
385 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
386 (caddr_t *)&sc->sc_cmds,
387 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
388 aprint_error("%s: unable to map commands, rv = %d\n",
389 sc->sc_dv.dv_xname, rv);
390 return;
391 }
392
393 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
394 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
395 aprint_error("%s: unable to create command DMA map, rv = %d\n",
396 sc->sc_dv.dv_xname, rv);
397 return;
398 }
399
400 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
401 size, NULL, BUS_DMA_NOWAIT)) != 0) {
402 aprint_error("%s: unable to load command DMA map, rv = %d\n",
403 sc->sc_dv.dv_xname, rv);
404 return;
405 }
406
407 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
408 memset(sc->sc_cmds, 0, size);
409
410 sc->sc_ccbs = ccb;
411 tc = (struct twe_cmd *)sc->sc_cmds;
412 max_segs = twe_get_maxsegs();
413 max_xfer = twe_get_maxxfer(max_segs);
414
415 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
416 ccb->ccb_cmd = tc;
417 ccb->ccb_cmdid = i;
418 ccb->ccb_flags = 0;
419 rv = bus_dmamap_create(sc->sc_dmat, max_xfer,
420 max_segs, PAGE_SIZE, 0,
421 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
422 &ccb->ccb_dmamap_xfer);
423 if (rv != 0) {
424 aprint_error("%s: can't create dmamap, rv = %d\n",
425 sc->sc_dv.dv_xname, rv);
426 return;
427 }
428
429 /* Save the first CCB for AEN retrieval. */
430 if (i != 0)
431 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
432 ccb_chain.slist);
433 }
434
435 /* Wait for the controller to become ready. */
436 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
437 aprint_error("%s: microcontroller not ready\n",
438 sc->sc_dv.dv_xname);
439 return;
440 }
441
442 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
443
444 /* Reset the controller. */
445 s = splbio();
446 rv = twe_reset(sc);
447 splx(s);
448 if (rv) {
449 aprint_error("%s: reset failed\n", sc->sc_dv.dv_xname);
450 return;
451 }
452
453 /* Initialise connection with controller. */
454 twe_init_connection(sc);
455
456 twe_describe_controller(sc);
457
458 /* Find and attach RAID array units. */
459 sc->sc_nunits = 0;
460 for (i = 0; i < TWE_MAX_UNITS; i++)
461 (void) twe_add_unit(sc, i);
462
463 /* ...and finally, enable interrupts. */
464 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
465 TWE_CTL_UNMASK_RESP_INTR |
466 TWE_CTL_ENABLE_INTRS);
467 }
468
469 void
470 twe_register_callbacks(struct twe_softc *sc, int unit,
471 const struct twe_callbacks *tcb)
472 {
473
474 sc->sc_units[unit].td_callbacks = tcb;
475 }
476
477 static void
478 twe_recompute_openings(struct twe_softc *sc)
479 {
480 struct twe_drive *td;
481 int unit, openings;
482
483 if (sc->sc_nunits != 0)
484 openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits;
485 else
486 openings = 0;
487 if (openings == sc->sc_openings)
488 return;
489 sc->sc_openings = openings;
490
491 #ifdef TWE_DEBUG
492 printf("%s: %d array%s, %d openings per array\n",
493 sc->sc_dv.dv_xname, sc->sc_nunits,
494 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
495 #endif
496
497 for (unit = 0; unit < TWE_MAX_UNITS; unit++) {
498 td = &sc->sc_units[unit];
499 if (td->td_dev != NULL)
500 (*td->td_callbacks->tcb_openings)(td->td_dev,
501 sc->sc_openings);
502 }
503 }
504
505 static int
506 twe_add_unit(struct twe_softc *sc, int unit)
507 {
508 struct twe_param *dtp, *atp;
509 struct twe_array_descriptor *ad;
510 struct twe_drive *td;
511 struct twe_attach_args twea;
512 uint32_t newsize;
513 int rv;
514 uint16_t dsize;
515 uint8_t newtype, newstripe;
516
517 if (unit < 0 || unit >= TWE_MAX_UNITS)
518 return (EINVAL);
519
520 /* Find attached units. */
521 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
522 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp);
523 if (rv != 0) {
524 aprint_error("%s: error %d fetching unit summary\n",
525 sc->sc_dv.dv_xname, rv);
526 return (rv);
527 }
528
529 /* For each detected unit, collect size and store in an array. */
530 td = &sc->sc_units[unit];
531
532 /* Unit present? */
533 if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) {
534 /*
535 * XXX Should we check to see if a device has been
536 * XXX attached at this index and detach it if it
537 * XXX has? ("rescan" semantics)
538 */
539 rv = 0;
540 goto out;
541 }
542
543 rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit,
544 TWE_PARAM_UNITINFO_DescriptorSize, &dsize);
545 if (rv != 0) {
546 aprint_error("%s: error %d fetching descriptor size "
547 "for unit %d\n", sc->sc_dv.dv_xname, rv, unit);
548 goto out;
549 }
550
551 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit,
552 TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp);
553 if (rv != 0) {
554 aprint_error("%s: error %d fetching array descriptor "
555 "for unit %d\n", sc->sc_dv.dv_xname, rv, unit);
556 goto out;
557 }
558
559 ad = (struct twe_array_descriptor *)atp->tp_data;
560 newtype = ad->configuration;
561 newstripe = ad->stripe_size;
562 free(atp, M_DEVBUF);
563
564 rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit,
565 TWE_PARAM_UNITINFO_Capacity, &newsize);
566 if (rv != 0) {
567 aprint_error(
568 "%s: error %d fetching capacity for unit %d\n",
569 sc->sc_dv.dv_xname, rv, unit);
570 goto out;
571 }
572
573 /*
574 * Have a device, so we need to attach it. If there is currently
575 * something sitting at the slot, and the parameters are different,
576 * then we detach the old device before attaching the new one.
577 */
578 if (td->td_dev != NULL &&
579 td->td_size == newsize &&
580 td->td_type == newtype &&
581 td->td_stripe == newstripe) {
582 /* Same as the old device; just keep using it. */
583 rv = 0;
584 goto out;
585 } else if (td->td_dev != NULL) {
586 /* Detach the old device first. */
587 (void) config_detach(td->td_dev, DETACH_FORCE);
588 td->td_dev = NULL;
589 } else if (td->td_size == 0)
590 sc->sc_nunits++;
591
592 /*
593 * Committed to the new array unit; assign its parameters and
594 * recompute the number of available command openings.
595 */
596 td->td_size = newsize;
597 td->td_type = newtype;
598 td->td_stripe = newstripe;
599 twe_recompute_openings(sc);
600
601 twea.twea_unit = unit;
602 td->td_dev = config_found_sm(&sc->sc_dv, &twea, twe_print,
603 twe_submatch);
604
605 rv = 0;
606 out:
607 free(dtp, M_DEVBUF);
608 return (rv);
609 }
610
611 static int
612 twe_del_unit(struct twe_softc *sc, int unit)
613 {
614 struct twe_drive *td;
615
616 if (unit < 0 || unit >= TWE_MAX_UNITS)
617 return (EINVAL);
618
619 td = &sc->sc_units[unit];
620 if (td->td_size != 0)
621 sc->sc_nunits--;
622 td->td_size = 0;
623 td->td_type = 0;
624 td->td_stripe = 0;
625 if (td->td_dev != NULL) {
626 (void) config_detach(td->td_dev, DETACH_FORCE);
627 td->td_dev = NULL;
628 }
629 twe_recompute_openings(sc);
630 return (0);
631 }
632
633 /*
634 * Reset the controller.
635 * MUST BE CALLED AT splbio()!
636 */
637 static int
638 twe_reset(struct twe_softc *sc)
639 {
640 uint16_t aen;
641 u_int status;
642 volatile u_int32_t junk;
643 int got, rv;
644
645 /* Issue a soft reset. */
646 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
647 TWE_CTL_CLEAR_HOST_INTR |
648 TWE_CTL_CLEAR_ATTN_INTR |
649 TWE_CTL_MASK_CMD_INTR |
650 TWE_CTL_MASK_RESP_INTR |
651 TWE_CTL_CLEAR_ERROR_STS |
652 TWE_CTL_DISABLE_INTRS);
653
654 /* Wait for attention... */
655 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
656 printf("%s: no attention interrupt\n",
657 sc->sc_dv.dv_xname);
658 return (-1);
659 }
660
661 /* ...and ACK it. */
662 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
663
664 /*
665 * Pull AENs out of the controller; look for a soft reset AEN.
666 * Open code this, since we want to detect reset even if the
667 * queue for management tools is full.
668 *
669 * Note that since:
670 * - interrupts are blocked
671 * - we have reset the controller
672 * - acknowledged the pending ATTENTION
673 * that there is no way a pending asynchronous AEN fetch would
674 * finish, so clear the flag.
675 */
676 sc->sc_flags &= ~TWEF_AEN;
677 for (got = 0;;) {
678 rv = twe_aen_get(sc, &aen);
679 if (rv != 0)
680 printf("%s: error %d while draining event queue\n",
681 sc->sc_dv.dv_xname, rv);
682 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY)
683 break;
684 if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET)
685 got = 1;
686 twe_aen_enqueue(sc, aen, 1);
687 }
688
689 if (!got) {
690 printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
691 return (-1);
692 }
693
694 /* Check controller status. */
695 status = twe_inl(sc, TWE_REG_STS);
696 if (twe_status_check(sc, status)) {
697 printf("%s: controller errors detected\n",
698 sc->sc_dv.dv_xname);
699 return (-1);
700 }
701
702 /* Drain the response queue. */
703 for (;;) {
704 status = twe_inl(sc, TWE_REG_STS);
705 if (twe_status_check(sc, status) != 0) {
706 printf("%s: can't drain response queue\n",
707 sc->sc_dv.dv_xname);
708 return (-1);
709 }
710 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
711 break;
712 junk = twe_inl(sc, TWE_REG_RESP_QUEUE);
713 }
714
715 return (0);
716 }
717
718 /*
719 * Print autoconfiguration message for a sub-device.
720 */
721 static int
722 twe_print(void *aux, const char *pnp)
723 {
724 struct twe_attach_args *twea;
725
726 twea = aux;
727
728 if (pnp != NULL)
729 aprint_normal("block device at %s", pnp);
730 aprint_normal(" unit %d", twea->twea_unit);
731 return (UNCONF);
732 }
733
734 /*
735 * Match a sub-device.
736 */
737 static int
738 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
739 {
740 struct twe_attach_args *twea;
741
742 twea = aux;
743
744 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
745 cf->tweacf_unit != twea->twea_unit)
746 return (0);
747
748 return (config_match(parent, cf, aux));
749 }
750
751 /*
752 * Interrupt service routine.
753 */
754 static int
755 twe_intr(void *arg)
756 {
757 struct twe_softc *sc;
758 u_int status;
759 int caught, rv;
760
761 sc = arg;
762 caught = 0;
763 status = twe_inl(sc, TWE_REG_STS);
764 twe_status_check(sc, status);
765
766 /* Host interrupts - purpose unknown. */
767 if ((status & TWE_STS_HOST_INTR) != 0) {
768 #ifdef DEBUG
769 printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
770 #endif
771 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
772 caught = 1;
773 }
774
775 /*
776 * Attention interrupts, signalled when a controller or child device
777 * state change has occurred.
778 */
779 if ((status & TWE_STS_ATTN_INTR) != 0) {
780 rv = twe_aen_get(sc, NULL);
781 if (rv != 0)
782 printf("%s: unable to retrieve AEN (%d)\n",
783 sc->sc_dv.dv_xname, rv);
784 else
785 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
786 caught = 1;
787 }
788
789 /*
790 * Command interrupts, signalled when the controller can accept more
791 * commands. We don't use this; instead, we try to submit commands
792 * when we receive them, and when other commands have completed.
793 * Mask it so we don't get another one.
794 */
795 if ((status & TWE_STS_CMD_INTR) != 0) {
796 #ifdef DEBUG
797 printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
798 #endif
799 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
800 caught = 1;
801 }
802
803 if ((status & TWE_STS_RESP_INTR) != 0) {
804 twe_poll(sc);
805 caught = 1;
806 }
807
808 return (caught);
809 }
810
811 /*
812 * Fetch an AEN. Even though this is really like parameter
813 * retrieval, we handle this specially, because we issue this
814 * AEN retrieval command from interrupt context, and thus
815 * reserve a CCB for it to avoid resource shortage.
816 *
817 * XXX There are still potential resource shortages we could
818 * XXX encounter. Consider pre-allocating all AEN-related
819 * XXX resources.
820 *
821 * MUST BE CALLED AT splbio()!
822 */
823 static int
824 twe_aen_get(struct twe_softc *sc, uint16_t *aenp)
825 {
826 struct twe_ccb *ccb;
827 struct twe_cmd *tc;
828 struct twe_param *tp;
829 int rv;
830
831 /*
832 * If we're already retrieving an AEN, just wait; another
833 * retrieval will be chained after the current one completes.
834 */
835 if (sc->sc_flags & TWEF_AEN) {
836 /*
837 * It is a fatal software programming error to attempt
838 * to fetch an AEN synchronously when an AEN fetch is
839 * already pending.
840 */
841 KASSERT(aenp == NULL);
842 return (0);
843 }
844
845 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
846 if (tp == NULL)
847 return (ENOMEM);
848
849 ccb = twe_ccb_alloc(sc,
850 TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
851 KASSERT(ccb != NULL);
852
853 ccb->ccb_data = tp;
854 ccb->ccb_datasize = TWE_SECTOR_SIZE;
855 ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL;
856 ccb->ccb_tx.tx_context = tp;
857 ccb->ccb_tx.tx_dv = &sc->sc_dv;
858
859 tc = ccb->ccb_cmd;
860 tc->tc_size = 2;
861 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
862 tc->tc_unit = 0;
863 tc->tc_count = htole16(1);
864
865 /* Fill in the outbound parameter data. */
866 tp->tp_table_id = htole16(TWE_PARAM_AEN);
867 tp->tp_param_id = TWE_PARAM_AEN_UnitCode;
868 tp->tp_param_size = 2;
869
870 /* Map the transfer. */
871 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
872 twe_ccb_free(sc, ccb);
873 goto done;
874 }
875
876 /* Enqueue the command and wait. */
877 if (aenp != NULL) {
878 rv = twe_ccb_poll(sc, ccb, 5);
879 twe_ccb_unmap(sc, ccb);
880 twe_ccb_free(sc, ccb);
881 if (rv == 0)
882 *aenp = le16toh(*(uint16_t *)tp->tp_data);
883 free(tp, M_DEVBUF);
884 } else {
885 sc->sc_flags |= TWEF_AEN;
886 twe_ccb_enqueue(sc, ccb);
887 rv = 0;
888 }
889
890 done:
891 return (rv);
892 }
893
894 /*
895 * Handle an AEN returned by the controller.
896 * MUST BE CALLED AT splbio()!
897 */
898 static void
899 twe_aen_handler(struct twe_ccb *ccb, int error)
900 {
901 struct twe_softc *sc;
902 struct twe_param *tp;
903 uint16_t aen;
904 int rv;
905
906 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
907 tp = ccb->ccb_tx.tx_context;
908 twe_ccb_unmap(sc, ccb);
909
910 sc->sc_flags &= ~TWEF_AEN;
911
912 if (error) {
913 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
914 aen = TWE_AEN_QUEUE_EMPTY;
915 } else
916 aen = le16toh(*(u_int16_t *)tp->tp_data);
917 free(tp, M_DEVBUF);
918 twe_ccb_free(sc, ccb);
919
920 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
921 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
922 return;
923 }
924
925 twe_aen_enqueue(sc, aen, 0);
926
927 /*
928 * Chain another retrieval in case interrupts have been
929 * coalesced.
930 */
931 rv = twe_aen_get(sc, NULL);
932 if (rv != 0)
933 printf("%s: unable to retrieve AEN (%d)\n",
934 sc->sc_dv.dv_xname, rv);
935 }
936
937 static void
938 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet)
939 {
940 const char *str, *msg;
941 int s, next, nextnext, level;
942
943 /*
944 * First report the AEN on the console. Maybe.
945 */
946 if (! quiet) {
947 str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen));
948 if (str == NULL) {
949 printf("%s: unknown AEN 0x%04x\n",
950 sc->sc_dv.dv_xname, aen);
951 } else {
952 msg = str + 3;
953 switch (str[1]) {
954 case 'E': level = LOG_EMERG; break;
955 case 'a': level = LOG_ALERT; break;
956 case 'c': level = LOG_CRIT; break;
957 case 'e': level = LOG_ERR; break;
958 case 'w': level = LOG_WARNING; break;
959 case 'n': level = LOG_NOTICE; break;
960 case 'i': level = LOG_INFO; break;
961 case 'd': level = LOG_DEBUG; break;
962 default:
963 /* Don't use syslog. */
964 level = -1;
965 }
966
967 if (level < 0) {
968 switch (str[0]) {
969 case 'u':
970 case 'p':
971 printf("%s: %s %d: %s\n",
972 sc->sc_dv.dv_xname,
973 str[0] == 'u' ? "unit" : "port",
974 TWE_AEN_UNIT(aen), msg);
975 break;
976
977 default:
978 printf("%s: %s\n",
979 sc->sc_dv.dv_xname, msg);
980 }
981 } else {
982 switch (str[0]) {
983 case 'u':
984 case 'p':
985 log(level, "%s: %s %d: %s\n",
986 sc->sc_dv.dv_xname,
987 str[0] == 'u' ? "unit" : "port",
988 TWE_AEN_UNIT(aen), msg);
989 break;
990
991 default:
992 log(level, "%s: %s\n",
993 sc->sc_dv.dv_xname, msg);
994 }
995 }
996 }
997 }
998
999 /* Now enqueue the AEN for mangement tools. */
1000 s = splbio();
1001
1002 next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH;
1003 nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH;
1004
1005 /*
1006 * If this is the last free slot, then queue up a "queue
1007 * full" message.
1008 */
1009 if (nextnext == sc->sc_aen_tail)
1010 aen = TWE_AEN_QUEUE_FULL;
1011
1012 if (next != sc->sc_aen_tail) {
1013 sc->sc_aen_queue[sc->sc_aen_head] = aen;
1014 sc->sc_aen_head = next;
1015 }
1016
1017 if (sc->sc_flags & TWEF_AENQ_WAIT) {
1018 sc->sc_flags &= ~TWEF_AENQ_WAIT;
1019 wakeup(&sc->sc_aen_queue);
1020 }
1021
1022 splx(s);
1023 }
1024
1025 /* NOTE: Must be called at splbio(). */
1026 static uint16_t
1027 twe_aen_dequeue(struct twe_softc *sc)
1028 {
1029 uint16_t aen;
1030
1031 if (sc->sc_aen_tail == sc->sc_aen_head)
1032 aen = TWE_AEN_QUEUE_EMPTY;
1033 else {
1034 aen = sc->sc_aen_queue[sc->sc_aen_tail];
1035 sc->sc_aen_tail = (sc->sc_aen_tail + 1) & TWE_AEN_Q_LENGTH;
1036 }
1037
1038 return (aen);
1039 }
1040
1041 /*
1042 * These are short-hand functions that execute TWE_OP_GET_PARAM to
1043 * fetch 1, 2, and 4 byte parameter values, respectively.
1044 */
1045 int
1046 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id,
1047 uint8_t *valp)
1048 {
1049 struct twe_param *tp;
1050 int rv;
1051
1052 rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp);
1053 if (rv != 0)
1054 return (rv);
1055 *valp = *(uint8_t *)tp->tp_data;
1056 free(tp, M_DEVBUF);
1057 return (0);
1058 }
1059
1060 int
1061 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id,
1062 uint16_t *valp)
1063 {
1064 struct twe_param *tp;
1065 int rv;
1066
1067 rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp);
1068 if (rv != 0)
1069 return (rv);
1070 *valp = le16toh(*(uint16_t *)tp->tp_data);
1071 free(tp, M_DEVBUF);
1072 return (0);
1073 }
1074
1075 int
1076 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id,
1077 uint32_t *valp)
1078 {
1079 struct twe_param *tp;
1080 int rv;
1081
1082 rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp);
1083 if (rv != 0)
1084 return (rv);
1085 *valp = le32toh(*(uint32_t *)tp->tp_data);
1086 free(tp, M_DEVBUF);
1087 return (0);
1088 }
1089
1090 /*
1091 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided,
1092 * it will be called with generated context when the command has completed.
1093 * If no callback is provided, the command will be executed synchronously
1094 * and a pointer to a buffer containing the data returned.
1095 *
1096 * The caller or callback is responsible for freeing the buffer.
1097 *
1098 * NOTE: We assume we can sleep here to wait for a CCB to become available.
1099 */
1100 int
1101 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
1102 void (*func)(struct twe_ccb *, int), struct twe_param **pbuf)
1103 {
1104 struct twe_ccb *ccb;
1105 struct twe_cmd *tc;
1106 struct twe_param *tp;
1107 int rv, s;
1108
1109 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1110 if (tp == NULL)
1111 return ENOMEM;
1112
1113 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1114 KASSERT(ccb != NULL);
1115
1116 ccb->ccb_data = tp;
1117 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1118 ccb->ccb_tx.tx_handler = func;
1119 ccb->ccb_tx.tx_context = tp;
1120 ccb->ccb_tx.tx_dv = &sc->sc_dv;
1121
1122 tc = ccb->ccb_cmd;
1123 tc->tc_size = 2;
1124 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
1125 tc->tc_unit = 0;
1126 tc->tc_count = htole16(1);
1127
1128 /* Fill in the outbound parameter data. */
1129 tp->tp_table_id = htole16(table_id);
1130 tp->tp_param_id = param_id;
1131 tp->tp_param_size = size;
1132
1133 /* Map the transfer. */
1134 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1135 twe_ccb_free(sc, ccb);
1136 goto done;
1137 }
1138
1139 /* Submit the command and either wait or let the callback handle it. */
1140 if (func == NULL) {
1141 s = splbio();
1142 rv = twe_ccb_poll(sc, ccb, 5);
1143 twe_ccb_unmap(sc, ccb);
1144 twe_ccb_free(sc, ccb);
1145 splx(s);
1146 } else {
1147 #ifdef DEBUG
1148 if (pbuf != NULL)
1149 panic("both func and pbuf defined");
1150 #endif
1151 twe_ccb_enqueue(sc, ccb);
1152 return 0;
1153 }
1154
1155 done:
1156 if (pbuf == NULL || rv != 0)
1157 free(tp, M_DEVBUF);
1158 else if (pbuf != NULL && rv == 0)
1159 *pbuf = tp;
1160 return rv;
1161 }
1162
1163 /*
1164 * Execute a TWE_OP_SET_PARAM command.
1165 *
1166 * NOTE: We assume we can sleep here to wait for a CCB to become available.
1167 */
1168 static int
1169 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size,
1170 void *buf)
1171 {
1172 struct twe_ccb *ccb;
1173 struct twe_cmd *tc;
1174 struct twe_param *tp;
1175 int rv, s;
1176
1177 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1178 if (tp == NULL)
1179 return ENOMEM;
1180
1181 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1182 KASSERT(ccb != NULL);
1183
1184 ccb->ccb_data = tp;
1185 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1186 ccb->ccb_tx.tx_handler = 0;
1187 ccb->ccb_tx.tx_context = tp;
1188 ccb->ccb_tx.tx_dv = &sc->sc_dv;
1189
1190 tc = ccb->ccb_cmd;
1191 tc->tc_size = 2;
1192 tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5);
1193 tc->tc_unit = 0;
1194 tc->tc_count = htole16(1);
1195
1196 /* Fill in the outbound parameter data. */
1197 tp->tp_table_id = htole16(table_id);
1198 tp->tp_param_id = param_id;
1199 tp->tp_param_size = size;
1200 memcpy(tp->tp_data, buf, size);
1201
1202 /* Map the transfer. */
1203 if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1204 twe_ccb_free(sc, ccb);
1205 goto done;
1206 }
1207
1208 /* Submit the command and wait. */
1209 s = splbio();
1210 rv = twe_ccb_poll(sc, ccb, 5);
1211 twe_ccb_unmap(sc, ccb);
1212 twe_ccb_free(sc, ccb);
1213 splx(s);
1214 done:
1215 free(tp, M_DEVBUF);
1216 return (rv);
1217 }
1218
1219 /*
1220 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error.
1221 * Must be called with interrupts blocked.
1222 */
1223 static int
1224 twe_init_connection(struct twe_softc *sc)
1225 /*###762 [cc] warning: `twe_init_connection' was used with no prototype before its definition%%%*/
1226 /*###762 [cc] warning: `twe_init_connection' was declared implicitly `extern' and later `static'%%%*/
1227 {
1228 struct twe_ccb *ccb;
1229 struct twe_cmd *tc;
1230 int rv;
1231
1232 if ((ccb = twe_ccb_alloc(sc, 0)) == NULL)
1233 return (EAGAIN);
1234
1235 /* Build the command. */
1236 tc = ccb->ccb_cmd;
1237 tc->tc_size = 3;
1238 tc->tc_opcode = TWE_OP_INIT_CONNECTION;
1239 tc->tc_unit = 0;
1240 tc->tc_count = htole16(TWE_MAX_CMDS);
1241 tc->tc_args.init_connection.response_queue_pointer = 0;
1242
1243 /* Submit the command for immediate execution. */
1244 rv = twe_ccb_poll(sc, ccb, 5);
1245 twe_ccb_free(sc, ccb);
1246 return (rv);
1247 }
1248
1249 /*
1250 * Poll the controller for completed commands. Must be called with
1251 * interrupts blocked.
1252 */
1253 static void
1254 twe_poll(struct twe_softc *sc)
1255 {
1256 struct twe_ccb *ccb;
1257 int found;
1258 u_int status, cmdid;
1259
1260 found = 0;
1261
1262 for (;;) {
1263 status = twe_inl(sc, TWE_REG_STS);
1264 twe_status_check(sc, status);
1265
1266 if ((status & TWE_STS_RESP_QUEUE_EMPTY))
1267 break;
1268
1269 found = 1;
1270 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE);
1271 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
1272 if (cmdid >= TWE_MAX_QUEUECNT) {
1273 printf("%s: bad cmdid %d\n", sc->sc_dv.dv_xname, cmdid);
1274 continue;
1275 }
1276
1277 ccb = sc->sc_ccbs + cmdid;
1278 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
1279 printf("%s: CCB for cmdid %d not active\n",
1280 sc->sc_dv.dv_xname, cmdid);
1281 continue;
1282 }
1283 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
1284
1285 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1286 (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
1287 sizeof(struct twe_cmd),
1288 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1289
1290 /* Pass notification to upper layers. */
1291 if (ccb->ccb_tx.tx_handler != NULL)
1292 (*ccb->ccb_tx.tx_handler)(ccb,
1293 ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
1294 }
1295
1296 /* If any commands have completed, run the software queue. */
1297 if (found)
1298 twe_ccb_enqueue(sc, NULL);
1299 }
1300
1301 /*
1302 * Wait for `status' to be set in the controller status register. Return
1303 * zero if found, non-zero if the operation timed out.
1304 */
1305 static int
1306 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
1307 {
1308
1309 for (timo *= 10; timo != 0; timo--) {
1310 if ((twe_inl(sc, TWE_REG_STS) & status) == status)
1311 break;
1312 delay(100000);
1313 }
1314
1315 return (timo == 0);
1316 }
1317
1318 /*
1319 * Complain if the status bits aren't what we expect.
1320 */
1321 static int
1322 twe_status_check(struct twe_softc *sc, u_int status)
1323 {
1324 int rv;
1325
1326 rv = 0;
1327
1328 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
1329 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
1330 status & ~TWE_STS_EXPECTED_BITS);
1331 rv = -1;
1332 }
1333
1334 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
1335 printf("%s: unexpected status bits: 0x%08x\n",
1336 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
1337 rv = -1;
1338 }
1339
1340 return (rv);
1341 }
1342
1343 /*
1344 * Allocate and initialise a CCB.
1345 */
1346 static __inline void
1347 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags)
1348 {
1349 struct twe_cmd *tc;
1350
1351 ccb->ccb_tx.tx_handler = NULL;
1352 ccb->ccb_flags = flags;
1353 tc = ccb->ccb_cmd;
1354 tc->tc_status = 0;
1355 tc->tc_flags = 0;
1356 tc->tc_cmdid = ccb->ccb_cmdid;
1357 }
1358
1359 struct twe_ccb *
1360 twe_ccb_alloc(struct twe_softc *sc, int flags)
1361 {
1362 struct twe_ccb *ccb;
1363 int s;
1364
1365 s = splbio();
1366 if (__predict_false((flags & TWE_CCB_AEN) != 0)) {
1367 /* Use the reserved CCB. */
1368 ccb = sc->sc_ccbs;
1369 } else {
1370 /* Allocate a CCB and command block. */
1371 if (__predict_false((ccb =
1372 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1373 splx(s);
1374 return (NULL);
1375 }
1376 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1377 }
1378 #ifdef DIAGNOSTIC
1379 if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0)
1380 panic("twe_ccb_alloc: got reserved CCB for non-AEN");
1381 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1382 panic("twe_ccb_alloc: CCB %ld already allocated",
1383 (long)(ccb - sc->sc_ccbs));
1384 flags |= TWE_CCB_ALLOCED;
1385 #endif
1386 splx(s);
1387
1388 twe_ccb_init(sc, ccb, flags);
1389 return (ccb);
1390 }
1391
1392 struct twe_ccb *
1393 twe_ccb_alloc_wait(struct twe_softc *sc, int flags)
1394 {
1395 struct twe_ccb *ccb;
1396 int s;
1397
1398 KASSERT((flags & TWE_CCB_AEN) == 0);
1399
1400 s = splbio();
1401 while (__predict_false((ccb =
1402 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1403 sc->sc_flags |= TWEF_WAIT_CCB;
1404 (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0);
1405 }
1406 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1407 #ifdef DIAGNOSTIC
1408 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1409 panic("twe_ccb_alloc_wait: CCB %ld already allocated",
1410 (long)(ccb - sc->sc_ccbs));
1411 flags |= TWE_CCB_ALLOCED;
1412 #endif
1413 splx(s);
1414
1415 twe_ccb_init(sc, ccb, flags);
1416 return (ccb);
1417 }
1418
1419 /*
1420 * Free a CCB.
1421 */
1422 void
1423 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
1424 {
1425 int s;
1426
1427 s = splbio();
1428 if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) {
1429 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
1430 if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) {
1431 sc->sc_flags &= ~TWEF_WAIT_CCB;
1432 wakeup(&sc->sc_ccb_freelist);
1433 }
1434 }
1435 ccb->ccb_flags = 0;
1436 splx(s);
1437 }
1438
1439 /*
1440 * Map the specified CCB's command block and data buffer (if any) into
1441 * controller visible space. Perform DMA synchronisation.
1442 */
1443 int
1444 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
1445 {
1446 struct twe_cmd *tc;
1447 int flags, nsegs, i, s, rv;
1448 void *data;
1449
1450 /*
1451 * The data as a whole must be 512-byte aligned.
1452 */
1453 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
1454 s = splvm();
1455 /* XXX */
1456 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, NULL,
1457 ccb->ccb_datasize, UVM_KMF_NOWAIT);
1458 splx(s);
1459 data = (void *)ccb->ccb_abuf;
1460 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1461 memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
1462 } else {
1463 ccb->ccb_abuf = (vaddr_t)0;
1464 data = ccb->ccb_data;
1465 }
1466
1467 /*
1468 * Map the data buffer into bus space and build the S/G list.
1469 */
1470 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
1471 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1472 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
1473 BUS_DMA_READ : BUS_DMA_WRITE));
1474 if (rv != 0) {
1475 if (ccb->ccb_abuf != (vaddr_t)0) {
1476 s = splvm();
1477 /* XXX */
1478 uvm_km_free(kmem_map, ccb->ccb_abuf,
1479 ccb->ccb_datasize);
1480 splx(s);
1481 }
1482 return (rv);
1483 }
1484
1485 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
1486 tc = ccb->ccb_cmd;
1487 tc->tc_size += 2 * nsegs;
1488
1489 /* The location of the S/G list is dependant upon command type. */
1490 switch (tc->tc_opcode >> 5) {
1491 case 2:
1492 for (i = 0; i < nsegs; i++) {
1493 tc->tc_args.param.sgl[i].tsg_address =
1494 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1495 tc->tc_args.param.sgl[i].tsg_length =
1496 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1497 }
1498 /* XXX Needed? */
1499 for (; i < TWE_SG_SIZE; i++) {
1500 tc->tc_args.param.sgl[i].tsg_address = 0;
1501 tc->tc_args.param.sgl[i].tsg_length = 0;
1502 }
1503 break;
1504 case 3:
1505 for (i = 0; i < nsegs; i++) {
1506 tc->tc_args.io.sgl[i].tsg_address =
1507 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1508 tc->tc_args.io.sgl[i].tsg_length =
1509 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1510 }
1511 /* XXX Needed? */
1512 for (; i < TWE_SG_SIZE; i++) {
1513 tc->tc_args.io.sgl[i].tsg_address = 0;
1514 tc->tc_args.io.sgl[i].tsg_length = 0;
1515 }
1516 break;
1517 #ifdef DEBUG
1518 default:
1519 panic("twe_ccb_map: oops");
1520 #endif
1521 }
1522
1523 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1524 flags = BUS_DMASYNC_PREREAD;
1525 else
1526 flags = 0;
1527 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1528 flags |= BUS_DMASYNC_PREWRITE;
1529
1530 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1531 ccb->ccb_datasize, flags);
1532 return (0);
1533 }
1534
1535 /*
1536 * Unmap the specified CCB's command block and data buffer (if any) and
1537 * perform DMA synchronisation.
1538 */
1539 void
1540 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
1541 {
1542 int flags, s;
1543
1544 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1545 flags = BUS_DMASYNC_POSTREAD;
1546 else
1547 flags = 0;
1548 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1549 flags |= BUS_DMASYNC_POSTWRITE;
1550
1551 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1552 ccb->ccb_datasize, flags);
1553 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
1554
1555 if (ccb->ccb_abuf != (vaddr_t)0) {
1556 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1557 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
1558 ccb->ccb_datasize);
1559 s = splvm();
1560 /* XXX */
1561 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
1562 splx(s);
1563 }
1564 }
1565
1566 /*
1567 * Submit a command to the controller and poll on completion. Return
1568 * non-zero on timeout (but don't check status, as some command types don't
1569 * return status). Must be called with interrupts blocked.
1570 */
1571 int
1572 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
1573 {
1574 int rv;
1575
1576 if ((rv = twe_ccb_submit(sc, ccb)) != 0)
1577 return (rv);
1578
1579 for (timo *= 1000; timo != 0; timo--) {
1580 twe_poll(sc);
1581 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
1582 break;
1583 DELAY(100);
1584 }
1585
1586 return (timo == 0);
1587 }
1588
1589 /*
1590 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
1591 * the order that they were enqueued and try to submit their command blocks
1592 * to the controller for execution.
1593 */
1594 void
1595 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
1596 {
1597 int s;
1598
1599 s = splbio();
1600
1601 if (ccb != NULL)
1602 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
1603
1604 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
1605 if (twe_ccb_submit(sc, ccb))
1606 break;
1607 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq);
1608 }
1609
1610 splx(s);
1611 }
1612
1613 /*
1614 * Submit the command block associated with the specified CCB to the
1615 * controller for execution. Must be called with interrupts blocked.
1616 */
1617 int
1618 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
1619 {
1620 bus_addr_t pa;
1621 int rv;
1622 u_int status;
1623
1624 /* Check to see if we can post a command. */
1625 status = twe_inl(sc, TWE_REG_STS);
1626 twe_status_check(sc, status);
1627
1628 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
1629 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1630 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
1631 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1632 #ifdef DIAGNOSTIC
1633 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0)
1634 panic("%s: CCB %ld not ALLOCED\n",
1635 sc->sc_dv.dv_xname, (long)(ccb - sc->sc_ccbs));
1636 #endif
1637 ccb->ccb_flags |= TWE_CCB_ACTIVE;
1638 pa = sc->sc_cmds_paddr +
1639 ccb->ccb_cmdid * sizeof(struct twe_cmd);
1640 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1641 rv = 0;
1642 } else
1643 rv = EBUSY;
1644
1645 return (rv);
1646 }
1647
1648
1649 /*
1650 * Accept an open operation on the control device.
1651 */
1652 int
1653 tweopen(dev_t dev, int flag, int mode, struct proc *p)
1654 {
1655 struct twe_softc *twe;
1656
1657 if ((twe = device_lookup(&twe_cd, minor(dev))) == NULL)
1658 return (ENXIO);
1659 if ((twe->sc_flags & TWEF_OPEN) != 0)
1660 return (EBUSY);
1661
1662 twe->sc_flags |= TWEF_OPEN;
1663 return (0);
1664 }
1665
1666 /*
1667 * Accept the last close on the control device.
1668 */
1669 int
1670 tweclose(dev_t dev, int flag, int mode, struct proc *p)
1671 {
1672 struct twe_softc *twe;
1673
1674 twe = device_lookup(&twe_cd, minor(dev));
1675 twe->sc_flags &= ~TWEF_OPEN;
1676 return (0);
1677 }
1678
1679 static void
1680 twe_tweio_command_handler(struct twe_ccb *ccb, int error)
1681 {
1682
1683 /* Just wake up the sleeper. */
1684 wakeup(ccb);
1685 }
1686
1687 /*
1688 * Handle control operations.
1689 */
1690 int
1691 tweioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1692 {
1693 struct twe_softc *twe;
1694 struct twe_ccb *ccb;
1695 struct twe_param *param;
1696 struct twe_usercommand *tu;
1697 struct twe_paramcommand *tp;
1698 struct twe_drivecommand *td;
1699 void *pdata = NULL;
1700 int s, error = 0;
1701 u_int8_t cmdid;
1702
1703 if (securelevel >= 2)
1704 return (EPERM);
1705
1706 twe = device_lookup(&twe_cd, minor(dev));
1707 tu = (struct twe_usercommand *)data;
1708 tp = (struct twe_paramcommand *)data;
1709 td = (struct twe_drivecommand *)data;
1710
1711 /* This is intended to be compatible with the FreeBSD interface. */
1712 switch (cmd) {
1713 case TWEIO_COMMAND:
1714 /* XXX mutex */
1715 if (tu->tu_size > 0) {
1716 /*
1717 * XXX Handle > TWE_SECTOR_SIZE? Let's see if
1718 * it's really necessary, first.
1719 */
1720 if (tu->tu_size > TWE_SECTOR_SIZE) {
1721 #ifdef TWE_DEBUG
1722 printf("%s: TWEIO_COMMAND: tu_size = %d\n",
1723 twe->sc_dv.dv_xname, tu->tu_size);
1724 #endif
1725 return EINVAL;
1726 }
1727 pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1728 error = copyin(tu->tu_data, pdata, tu->tu_size);
1729 if (error != 0)
1730 goto done;
1731 ccb = twe_ccb_alloc_wait(twe,
1732 TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1733 KASSERT(ccb != NULL);
1734 ccb->ccb_data = pdata;
1735 ccb->ccb_datasize = TWE_SECTOR_SIZE;
1736 } else {
1737 ccb = twe_ccb_alloc_wait(twe, 0);
1738 KASSERT(ccb != NULL);
1739 }
1740
1741 ccb->ccb_tx.tx_handler = twe_tweio_command_handler;
1742 ccb->ccb_tx.tx_context = NULL;
1743 ccb->ccb_tx.tx_dv = &twe->sc_dv;
1744
1745 cmdid = ccb->ccb_cmdid;
1746 memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd));
1747 ccb->ccb_cmd->tc_cmdid = cmdid;
1748
1749 /* Map the transfer. */
1750 if ((error = twe_ccb_map(twe, ccb)) != 0) {
1751 twe_ccb_free(twe, ccb);
1752 goto done;
1753 }
1754
1755 /* Submit the command and wait up to 1 minute. */
1756 error = 0;
1757 twe_ccb_enqueue(twe, ccb);
1758 s = splbio();
1759 while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0)
1760 if ((error = tsleep(ccb, PRIBIO, "tweioctl",
1761 60 * hz)) != 0)
1762 break;
1763 splx(s);
1764
1765 /* Copy the command back to the ioctl argument. */
1766 memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd));
1767 #ifdef TWE_DEBUG
1768 printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, "
1769 "tc_status = 0x%02x\n", twe->sc_dv.dv_xname,
1770 tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status);
1771 #endif
1772
1773 s = splbio();
1774 twe_ccb_free(twe, ccb);
1775 splx(s);
1776
1777 if (tu->tu_size > 0)
1778 error = copyout(pdata, tu->tu_data, tu->tu_size);
1779 goto done;
1780
1781 case TWEIO_STATS:
1782 return (ENOENT);
1783
1784 case TWEIO_AEN_POLL:
1785 s = splbio();
1786 *(u_int *)data = twe_aen_dequeue(twe);
1787 splx(s);
1788 return (0);
1789
1790 case TWEIO_AEN_WAIT:
1791 s = splbio();
1792 while ((*(u_int *)data =
1793 twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) {
1794 twe->sc_flags |= TWEF_AENQ_WAIT;
1795 error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH,
1796 "tweaen", 0);
1797 if (error == EINTR) {
1798 splx(s);
1799 return (error);
1800 }
1801 }
1802 splx(s);
1803 return (0);
1804
1805 case TWEIO_GET_PARAM:
1806 error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id,
1807 tp->tp_size, 0, ¶m);
1808 if (error != 0)
1809 return (error);
1810 if (param->tp_param_size > tp->tp_size) {
1811 error = EFAULT;
1812 goto done;
1813 }
1814 error = copyout(param->tp_data, tp->tp_data,
1815 param->tp_param_size);
1816 goto done;
1817
1818 case TWEIO_SET_PARAM:
1819 pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK);
1820 if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0)
1821 goto done;
1822 error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id,
1823 tp->tp_size, pdata);
1824 goto done;
1825
1826 case TWEIO_RESET:
1827 s = splbio();
1828 twe_reset(twe);
1829 splx(s);
1830 return (0);
1831
1832 case TWEIO_ADD_UNIT:
1833 /* XXX mutex */
1834 return (twe_add_unit(twe, td->td_unit));
1835
1836 case TWEIO_DEL_UNIT:
1837 /* XXX mutex */
1838 return (twe_del_unit(twe, td->td_unit));
1839
1840 default:
1841 return EINVAL;
1842 }
1843 done:
1844 if (pdata)
1845 free(pdata, M_DEVBUF);
1846 return error;
1847 }
1848
1849 /*
1850 * Print some information about the controller
1851 */
1852 static void
1853 twe_describe_controller(struct twe_softc *sc)
1854 {
1855 struct twe_param *p[6];
1856 int i, rv = 0;
1857 uint32_t dsize;
1858 uint8_t ports;
1859
1860 /* get the port count */
1861 rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER,
1862 TWE_PARAM_CONTROLLER_PortCount, &ports);
1863
1864 /* get version strings */
1865 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon,
1866 16, NULL, &p[0]);
1867 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW,
1868 16, NULL, &p[1]);
1869 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS,
1870 16, NULL, &p[2]);
1871 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB,
1872 8, NULL, &p[3]);
1873 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA,
1874 8, NULL, &p[4]);
1875 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI,
1876 8, NULL, &p[5]);
1877
1878 if (rv) {
1879 /* some error occurred */
1880 aprint_error("%s: failed to fetch version information\n",
1881 sc->sc_dv.dv_xname);
1882 return;
1883 }
1884
1885 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
1886 sc->sc_dv.dv_xname, ports,
1887 p[1]->tp_data, p[2]->tp_data);
1888
1889 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
1890 sc->sc_dv.dv_xname,
1891 p[0]->tp_data, p[3]->tp_data,
1892 p[4]->tp_data, p[5]->tp_data);
1893
1894 free(p[0], M_DEVBUF);
1895 free(p[1], M_DEVBUF);
1896 free(p[2], M_DEVBUF);
1897 free(p[3], M_DEVBUF);
1898 free(p[4], M_DEVBUF);
1899 free(p[5], M_DEVBUF);
1900
1901 rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY,
1902 TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]);
1903 if (rv) {
1904 aprint_error("%s: failed to get drive status summary\n",
1905 sc->sc_dv.dv_xname);
1906 return;
1907 }
1908 for (i = 0; i < ports; i++) {
1909 if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present)
1910 continue;
1911 rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i,
1912 TWE_PARAM_DRIVEINFO_Size, &dsize);
1913 if (rv) {
1914 aprint_error(
1915 "%s: unable to get drive size for port %d\n",
1916 sc->sc_dv.dv_xname, i);
1917 continue;
1918 }
1919 rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i,
1920 TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]);
1921 if (rv) {
1922 aprint_error(
1923 "%s: unable to get drive model for port %d\n",
1924 sc->sc_dv.dv_xname, i);
1925 continue;
1926 }
1927 aprint_verbose("%s: port %d: %.40s %d MB\n", sc->sc_dv.dv_xname,
1928 i, p[1]->tp_data, dsize / 2048);
1929 free(p[1], M_DEVBUF);
1930 }
1931 free(p[0], M_DEVBUF);
1932 }
1933