twe.c revision 1.4 1 /* $NetBSD: twe.c,v 1.4 2000/11/14 18:42:58 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66 */
67
68 /*
69 * Driver for the 3ware Escalade family of RAID controllers.
70 */
71
72 #include "opt_twe.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/device.h>
78 #include <sys/queue.h>
79 #include <sys/proc.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/malloc.h>
83 #include <sys/disk.h>
84
85 #include <uvm/uvm_extern.h>
86
87 #include <machine/bswap.h>
88 #include <machine/bus.h>
89
90 #include <dev/pci/pcireg.h>
91 #include <dev/pci/pcivar.h>
92 #include <dev/pci/pcidevs.h>
93 #include <dev/pci/twereg.h>
94 #include <dev/pci/twevar.h>
95
96 #define TWE_INL(sc, port) \
97 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, port)
98 #define TWE_OUTL(sc, port, val) \
99 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, port, val)
100
101 #if TWE_MAX_QUEUECNT == TWE_MAX_CMDS
102 #define TWE_REAL_MAX_QUEUECNT TWE_MAX_CMDS
103 #else
104 #define TWE_REAL_MAX_QUEUECNT TWE_MAX_CMDS + 1
105 #endif
106
107 #define PCI_CBIO 0x10
108
109 static void twe_aen_handler(struct twe_ccb *, int);
110 static void twe_attach(struct device *, struct device *, void *);
111 static int twe_init_connection(struct twe_softc *);
112 static int twe_intr(void *);
113 static int twe_match(struct device *, struct cfdata *, void *);
114 static void *twe_param_get(struct twe_softc *, int, int, size_t,
115 void (*)(struct twe_ccb *, int));
116 static void twe_poll(struct twe_softc *);
117 static int twe_print(void *, const char *);
118 static int twe_reset(struct twe_softc *);
119 static int twe_submatch(struct device *, struct cfdata *, void *);
120 static int twe_status_check(struct twe_softc *, u_int);
121 static int twe_status_wait(struct twe_softc *, u_int, int);
122
123 struct cfattach twe_ca = {
124 sizeof(struct twe_softc), twe_match, twe_attach
125 };
126
127 struct {
128 const u_int aen; /* High byte non-zero if w/unit */
129 const char *desc;
130 } static const twe_aen_names[] = {
131 { 0x0000, "queue empty" },
132 { 0x0001, "soft reset" },
133 { 0x0102, "degraded mirror" },
134 { 0x0003, "controller error" },
135 { 0x0104, "rebuild fail" },
136 { 0x0105, "rebuild done" },
137 { 0x0106, "incompatible unit" },
138 { 0x0107, "init done" },
139 { 0x0108, "unclean shutdown" },
140 { 0x0109, "aport timeout" },
141 { 0x010a, "drive error" },
142 { 0x010b, "rebuild started" },
143 { 0x0015, "table undefined" },
144 { 0x00ff, "aen queue full" },
145 };
146
147 /*
148 * Match a supported board.
149 */
150 static int
151 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
152 {
153 struct pci_attach_args *pa;
154
155 pa = aux;
156
157 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
158 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE);
159 }
160
161 /*
162 * Attach a supported board.
163 *
164 * XXX This doesn't fail gracefully.
165 */
166 static void
167 twe_attach(struct device *parent, struct device *self, void *aux)
168 {
169 struct pci_attach_args *pa;
170 struct twe_softc *sc;
171 pci_chipset_tag_t pc;
172 pci_intr_handle_t ih;
173 pcireg_t csr;
174 const char *intrstr;
175 int size, i, rv, rseg;
176 struct twe_param *dtp, *ctp;
177 bus_dma_segment_t seg;
178 struct twe_cmd *tc;
179 struct twe_attach_args twea;
180 struct twe_ccb *ccb;
181
182 sc = (struct twe_softc *)self;
183 pa = aux;
184 pc = pa->pa_pc;
185 sc->sc_dmat = pa->pa_dmat;
186 SIMPLEQ_INIT(&sc->sc_ccb_queue);
187 SLIST_INIT(&sc->sc_ccb_freelist);
188
189 printf(": 3ware Escalade\n");
190
191 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
192 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
193 printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
194 return;
195 }
196
197 /* Enable the device. */
198 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
199 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
200 csr | PCI_COMMAND_MASTER_ENABLE);
201
202 /* Map and establish the interrupt. */
203 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
204 pa->pa_intrline, &ih)) {
205 printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
206 return;
207 }
208 intrstr = pci_intr_string(pc, ih);
209 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
210 if (sc->sc_ih == NULL) {
211 printf("%s: can't establish interrupt", sc->sc_dv.dv_xname);
212 if (intrstr != NULL)
213 printf(" at %s", intrstr);
214 printf("\n");
215 return;
216 }
217 if (intrstr != NULL)
218 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
219
220 /*
221 * Allocate and initialise the command blocks and CCBs.
222 */
223 size = sizeof(struct twe_cmd) * TWE_REAL_MAX_QUEUECNT;
224
225 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
226 &rseg, BUS_DMA_NOWAIT)) != 0) {
227 printf("%s: unable to allocate commands, rv = %d\n",
228 sc->sc_dv.dv_xname, rv);
229 return;
230 }
231
232 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
233 (caddr_t *)&sc->sc_cmds,
234 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
235 printf("%s: unable to map commands, rv = %d\n",
236 sc->sc_dv.dv_xname, rv);
237 return;
238 }
239
240 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
241 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
242 printf("%s: unable to create command DMA map, rv = %d\n",
243 sc->sc_dv.dv_xname, rv);
244 return;
245 }
246
247 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
248 size, NULL, BUS_DMA_NOWAIT)) != 0) {
249 printf("%s: unable to load command DMA map, rv = %d\n",
250 sc->sc_dv.dv_xname, rv);
251 return;
252 }
253
254 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
255 memset(sc->sc_cmds, 0, size);
256
257 ccb = malloc(sizeof(*ccb) * TWE_REAL_MAX_QUEUECNT, M_DEVBUF, M_WAITOK);
258 if (ccb == NULL) {
259 printf("%s: unable to allocate CCBs\n", sc->sc_dv.dv_xname);
260 return;
261 }
262
263 sc->sc_ccbs = ccb;
264 tc = (struct twe_cmd *)sc->sc_cmds;
265
266 for (i = 0; i < TWE_REAL_MAX_QUEUECNT; i++, tc++, ccb++) {
267 ccb->ccb_cmd = tc;
268 ccb->ccb_cmdid = i;
269 ccb->ccb_flags = 0;
270 rv = bus_dmamap_create(sc->sc_dmat, TWE_MAX_XFER,
271 TWE_MAX_SEGS, PAGE_SIZE, 0,
272 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
273 &ccb->ccb_dmamap_xfer);
274 if (rv != 0)
275 break;
276 /* Save one CCB for parameter retrieval. */
277 if (i != 0)
278 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
279 ccb_chain.slist);
280 }
281 if ((sc->sc_nccbs = i) <= TWE_MIN_QUEUECNT) {
282 printf("%s: too few CCBs available\n", sc->sc_dv.dv_xname);
283 return;
284 }
285 if (sc->sc_nccbs != TWE_REAL_MAX_QUEUECNT)
286 printf("%s: %d/%d CCBs usable\n", sc->sc_dv.dv_xname,
287 sc->sc_nccbs, TWE_REAL_MAX_QUEUECNT);
288
289 /* Wait for the controller to become ready. */
290 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
291 printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname);
292 return;
293 }
294
295 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
296
297 /* Reset the controller. */
298 if (twe_reset(sc)) {
299 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
300 return;
301 }
302
303 /* Find attached units. */
304 dtp = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
305 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL);
306 if (dtp == NULL) {
307 printf("%s: can't detect attached units\n",
308 sc->sc_dv.dv_xname);
309 return;
310 }
311
312 /* For each detected unit, collect size and store in an array. */
313 for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) {
314 /* Unit present? */
315 if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) {
316 sc->sc_dsize[i] = 0;
317 continue;
318 }
319
320 ctp = twe_param_get(sc, TWE_PARAM_UNITINFO + i,
321 TWE_PARAM_UNITINFO_Capacity, 4, NULL);
322 if (ctp == NULL) {
323 printf("%s: error fetching capacity for unit %d\n",
324 sc->sc_dv.dv_xname, i);
325 continue;
326 }
327
328 sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data);
329 free(ctp, M_DEVBUF);
330 sc->sc_nunits++;
331 }
332 free(dtp, M_DEVBUF);
333
334 /* Initialise connection with controller and enable interrupts. */
335 twe_init_connection(sc);
336 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
337 TWE_CTL_UNMASK_RESP_INTR |
338 TWE_CTL_ENABLE_INTRS);
339
340 /* Attach sub-devices. */
341 for (i = 0; i < TWE_MAX_UNITS; i++) {
342 if (sc->sc_dsize[i] == 0)
343 continue;
344 twea.twea_unit = i;
345 config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch);
346 }
347 }
348
349 /*
350 * Reset the controller. Currently only useful at attach time; must be
351 * called with interrupts blocked.
352 */
353 static int
354 twe_reset(struct twe_softc *sc)
355 {
356 struct twe_param *tp;
357 u_int aen, status;
358 volatile u_int32_t junk;
359 int got;
360
361 /* Issue a soft reset. */
362 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
363 TWE_CTL_CLEAR_HOST_INTR |
364 TWE_CTL_CLEAR_ATTN_INTR |
365 TWE_CTL_MASK_CMD_INTR |
366 TWE_CTL_MASK_RESP_INTR |
367 TWE_CTL_CLEAR_ERROR_STS |
368 TWE_CTL_DISABLE_INTRS);
369
370 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
371 printf("%s: no attention interrupt\n",
372 sc->sc_dv.dv_xname);
373 return (-1);
374 }
375
376 /* Pull AENs out of the controller; look for a soft reset AEN. */
377 for (got = 0;;) {
378 tp = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode,
379 2, NULL);
380 if (tp == NULL)
381 return (-1);
382 aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data));
383 free(tp, M_DEVBUF);
384 if (aen == TWE_AEN_QUEUE_EMPTY)
385 break;
386 if (aen == TWE_AEN_SOFT_RESET)
387 got = 1;
388 }
389 if (!got) {
390 printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
391 return (-1);
392 }
393
394 /* Check controller status. */
395 status = TWE_INL(sc, TWE_REG_STS);
396 if (twe_status_check(sc, status)) {
397 printf("%s: controller errors detected\n",
398 sc->sc_dv.dv_xname);
399 return (-1);
400 }
401
402 /* Drain the response queue. */
403 for (;;) {
404 status = TWE_INL(sc, TWE_REG_STS);
405 if (twe_status_check(sc, status) != 0) {
406 printf("%s: can't drain response queue\n",
407 sc->sc_dv.dv_xname);
408 return (-1);
409 }
410 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
411 break;
412 junk = TWE_INL(sc, TWE_REG_RESP_QUEUE);
413 }
414
415 return (0);
416 }
417
418 /*
419 * Print autoconfiguration message for a sub-device.
420 */
421 static int
422 twe_print(void *aux, const char *pnp)
423 {
424 struct twe_attach_args *twea;
425
426 twea = aux;
427
428 if (pnp != NULL)
429 printf("block device at %s", pnp);
430 printf(" unit %d", twea->twea_unit);
431 return (UNCONF);
432 }
433
434 /*
435 * Match a sub-device.
436 */
437 static int
438 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
439 {
440 struct twe_attach_args *twea;
441
442 twea = aux;
443
444 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
445 cf->tweacf_unit != twea->twea_unit)
446 return (0);
447
448 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
449 }
450
451 /*
452 * Interrupt service routine.
453 */
454 static int
455 twe_intr(void *arg)
456 {
457 struct twe_softc *sc;
458 u_int status;
459 int caught;
460
461 sc = arg;
462 caught = 0;
463 status = TWE_INL(sc, TWE_REG_STS);
464 twe_status_check(sc, status);
465
466 /* Host interrupts - purpose unknown. */
467 if ((status & TWE_STS_HOST_INTR) != 0) {
468 #ifdef DIAGNOSTIC
469 printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
470 #endif
471 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
472 caught = 1;
473 }
474
475 /*
476 * Attention interrupts, signalled when a controller or child device
477 * state change has occured.
478 */
479 if ((status & TWE_STS_ATTN_INTR) != 0) {
480 twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2,
481 twe_aen_handler);
482 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
483 caught = 1;
484 }
485
486 /*
487 * Command interrupts, signalled when the controller can accept more
488 * commands. We don't use this; instead, we try to submit commands
489 * when we receive them, and when other commands have completed.
490 * Mask it so we don't get another one.
491 */
492 if ((status & TWE_STS_CMD_INTR) != 0) {
493 #ifdef DIAGNOSTIC
494 printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
495 #endif
496 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
497 caught = 1;
498 }
499
500 if ((status & TWE_STS_RESP_INTR) != 0) {
501 twe_poll(sc);
502 caught = 1;
503 }
504
505 return (caught);
506 }
507
508 /*
509 * Handle an AEN returned by the controller.
510 */
511 static void
512 twe_aen_handler(struct twe_ccb *ccb, int error)
513 {
514 struct twe_softc *sc;
515 struct twe_param *tp;
516 const char *str;
517 u_int aen;
518 int i, hu;
519
520 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
521 tp = ccb->ccb_tx.tx_context;
522 twe_ccb_unmap(sc, ccb);
523
524 if (error) {
525 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
526 aen = TWE_AEN_QUEUE_EMPTY;
527 } else
528 aen = le16toh(*(u_int16_t *)tp->tp_data);
529 free(tp, M_DEVBUF);
530 twe_ccb_free(sc, ccb);
531
532 if (TWE_AEN_CODE(aen) != TWE_AEN_QUEUE_EMPTY) {
533 str = "<unknown>";
534 i = 0;
535 hu = 0;
536
537 while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) {
538 if (TWE_AEN_CODE(twe_aen_names[i].aen) ==
539 TWE_AEN_CODE(aen)) {
540 str = twe_aen_names[i].desc;
541 hu = (TWE_AEN_UNIT(twe_aen_names[i].aen) != 0);
542 break;
543 }
544 i++;
545 }
546 printf("%s: AEN 0x%04x (%s) received", sc->sc_dv.dv_xname,
547 TWE_AEN_CODE(aen), str);
548 if (hu != 0)
549 printf(" for unit %d", TWE_AEN_UNIT(aen));
550 printf("\n");
551
552 /*
553 * Chain another retrieval in case interrupts have been
554 * coalesced.
555 */
556 twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2,
557 twe_aen_handler);
558 }
559 }
560
561 /*
562 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided,
563 * it will be called with generated context when the command has completed.
564 * If no callback is provided, the command will be executed synchronously
565 * and a pointer to a buffer containing the data returned.
566 *
567 * The caller or callback is responsible for freeing the buffer.
568 */
569 static void *
570 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
571 void (*func)(struct twe_ccb *, int))
572 {
573 struct twe_ccb *ccb;
574 struct twe_cmd *tc;
575 struct twe_param *tp;
576 int rv, s;
577
578 if (twe_ccb_alloc(sc, &ccb, TWE_CCB_PARAM | TWE_CCB_DATA_IN |
579 TWE_CCB_DATA_OUT) != 0)
580 return (NULL);
581 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
582
583 ccb->ccb_data = tp;
584 ccb->ccb_datasize = TWE_SECTOR_SIZE;
585 ccb->ccb_tx.tx_handler = func;
586 ccb->ccb_tx.tx_context = tp;
587 ccb->ccb_tx.tx_dv = &sc->sc_dv;
588
589 tc = ccb->ccb_cmd;
590 tc->tc_size = 2;
591 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
592 tc->tc_unit = 0;
593 tc->tc_count = htole16(1);
594
595 /* Fill in the outbound parameter data. */
596 tp->tp_table_id = htole16(table_id);
597 tp->tp_param_id = param_id;
598 tp->tp_param_size = size;
599
600 /* Map the transfer. */
601 if (twe_ccb_map(sc, ccb) != 0) {
602 twe_ccb_free(sc, ccb);
603 free(tp, M_DEVBUF);
604 return (NULL);
605 }
606
607 /* Submit the command and either wait or let the callback handle it. */
608 if (func == NULL) {
609 s = splbio();
610 if ((rv = twe_ccb_submit(sc, ccb)) == 0)
611 rv = twe_ccb_poll(sc, ccb, 5);
612 twe_ccb_unmap(sc, ccb);
613 twe_ccb_free(sc, ccb);
614 splx(s);
615 if (rv != 0) {
616 free(tp, M_DEVBUF);
617 tp = NULL;
618 }
619 } else {
620 twe_ccb_enqueue(sc, ccb);
621 tp = NULL;
622 }
623
624 return (tp);
625 }
626
627 /*
628 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error.
629 * Must be called with interrupts blocked.
630 */
631 static int
632 twe_init_connection(struct twe_softc *sc)
633 {
634 struct twe_ccb *ccb;
635 struct twe_cmd *tc;
636 int rv;
637
638 if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0)
639 return (rv);
640
641 /* Build the command. */
642 tc = ccb->ccb_cmd;
643 tc->tc_size = 3;
644 tc->tc_opcode = TWE_OP_INIT_CONNECTION;
645 tc->tc_unit = 0;
646 tc->tc_count = htole16(TWE_MAX_CMDS);
647 tc->tc_args.init_connection.response_queue_pointer = 0;
648
649 /* Submit the command for immediate execution. */
650 if ((rv = twe_ccb_submit(sc, ccb)) == 0)
651 rv = twe_ccb_poll(sc, ccb, 5);
652 twe_ccb_free(sc, ccb);
653 return (rv);
654 }
655
656 /*
657 * Poll the controller for completed commands. Must be called with
658 * interrupts blocked.
659 */
660 static void
661 twe_poll(struct twe_softc *sc)
662 {
663 struct twe_ccb *ccb;
664 int found;
665 u_int status, cmdid;
666
667 found = 0;
668
669 for (;;) {
670 status = TWE_INL(sc, TWE_REG_STS);
671 twe_status_check(sc, status);
672
673 if ((status & TWE_STS_RESP_QUEUE_EMPTY))
674 break;
675
676 found = 1;
677 cmdid = TWE_INL(sc, TWE_REG_RESP_QUEUE);
678 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
679 if (cmdid >= TWE_REAL_MAX_QUEUECNT) {
680 printf("%s: bad completion\n", sc->sc_dv.dv_xname);
681 continue;
682 }
683
684 ccb = sc->sc_ccbs + cmdid;
685 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
686 printf("%s: bad completion (not active)\n",
687 sc->sc_dv.dv_xname);
688 continue;
689 }
690 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
691
692 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
693 (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
694 sizeof(struct twe_cmd),
695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
696
697 /* Pass notification to upper layers. */
698 if (ccb->ccb_tx.tx_handler != NULL)
699 (*ccb->ccb_tx.tx_handler)(ccb,
700 ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
701 }
702
703 /* If any commands have completed, run the software queue. */
704 if (found)
705 twe_ccb_enqueue(sc, NULL);
706 }
707
708 /*
709 * Wait for `status' to be set in the controller status register. Return
710 * zero if found, non-zero if the operation timed out.
711 */
712 static int
713 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
714 {
715
716 for (; timo != 0; timo--) {
717 if ((TWE_INL(sc, TWE_REG_STS) & status) == status)
718 break;
719 delay(100000);
720 }
721
722 return (timo == 0);
723 }
724
725 /*
726 * Complain if the status bits aren't what we expect.
727 */
728 static int
729 twe_status_check(struct twe_softc *sc, u_int status)
730 {
731 int rv;
732
733 rv = 0;
734
735 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
736 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
737 status & ~TWE_STS_EXPECTED_BITS);
738 rv = -1;
739 }
740
741 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
742 printf("%s: unexpected status bits: 0x%08x\n",
743 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
744 rv = -1;
745 }
746
747 return (rv);
748 }
749
750 /*
751 * Allocate and initialise a CCB.
752 */
753 int
754 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags)
755 {
756 struct twe_cmd *tc;
757 struct twe_ccb *ccb;
758 int s;
759
760 if ((flags & TWE_CCB_PARAM) != 0)
761 ccb = sc->sc_ccbs;
762 else {
763 s = splbio();
764 /* Allocate a CCB and command block. */
765 if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) {
766 splx(s);
767 return (EAGAIN);
768 }
769 ccb = SLIST_FIRST(&sc->sc_ccb_freelist);
770 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
771 splx(s);
772 }
773
774 #ifdef DIAGNOSTIC
775 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
776 panic("twe_ccb_alloc: CCB already allocated");
777 flags |= TWE_CCB_ALLOCED;
778 #endif
779
780 /* Initialise some fields and return. */
781 ccb->ccb_tx.tx_handler = NULL;
782 ccb->ccb_flags = flags;
783 tc = ccb->ccb_cmd;
784 tc->tc_status = 0;
785 tc->tc_flags = 0;
786 tc->tc_cmdid = ccb->ccb_cmdid;
787 *ccbp = ccb;
788
789 return (0);
790 }
791
792 /*
793 * Free a CCB.
794 */
795 void
796 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
797 {
798 int s;
799
800 s = splbio();
801 if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0)
802 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
803 ccb->ccb_flags = 0;
804 splx(s);
805 }
806
807 /*
808 * Map the specified CCB's command block and data buffer (if any) into
809 * controller visible space. Perform DMA synchronisation.
810 */
811 int
812 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
813 {
814 struct twe_cmd *tc;
815 int flags, nsegs, i, s;
816 void *data;
817
818 /* The data as a whole must be 512-byte aligned. */
819 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
820 s = splimp();
821 /* XXX */
822 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
823 ccb->ccb_datasize, UVM_KMF_NOWAIT);
824 splx(s);
825 data = (void *)ccb->ccb_abuf;
826 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
827 memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
828 } else {
829 ccb->ccb_abuf = (vaddr_t)0;
830 data = ccb->ccb_data;
831 }
832
833 /* Map the data buffer into bus space and build the S/G list. */
834 bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
835 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT);
836
837 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
838 tc = ccb->ccb_cmd;
839 tc->tc_size += 2 * nsegs;
840
841 /* The location of the S/G list is dependant upon command type. */
842 switch (tc->tc_opcode >> 5) {
843 case 2:
844 for (i = 0; i < nsegs; i++) {
845 tc->tc_args.param.sgl[i].tsg_address =
846 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
847 tc->tc_args.param.sgl[i].tsg_length =
848 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
849 }
850 /* XXX Needed? */
851 for (; i < TWE_SG_SIZE; i++) {
852 tc->tc_args.param.sgl[i].tsg_address = 0;
853 tc->tc_args.param.sgl[i].tsg_length = 0;
854 }
855 break;
856 case 3:
857 for (i = 0; i < nsegs; i++) {
858 tc->tc_args.io.sgl[i].tsg_address =
859 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
860 tc->tc_args.io.sgl[i].tsg_length =
861 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
862 }
863 /* XXX Needed? */
864 for (; i < TWE_SG_SIZE; i++) {
865 tc->tc_args.io.sgl[i].tsg_address = 0;
866 tc->tc_args.io.sgl[i].tsg_length = 0;
867 }
868 break;
869 #ifdef DEBUG
870 default:
871 panic("twe_ccb_map: oops");
872 #endif
873 }
874
875 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
876 flags = BUS_DMASYNC_PREREAD;
877 else
878 flags = 0;
879 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
880 flags |= BUS_DMASYNC_PREWRITE;
881
882 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
883 ccb->ccb_datasize, flags);
884 return (0);
885 }
886
887 /*
888 * Unmap the specified CCB's command block and data buffer (if any) and
889 * perform DMA synchronisation.
890 */
891 void
892 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
893 {
894 int flags, s;
895
896 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
897 flags = BUS_DMASYNC_POSTREAD;
898 else
899 flags = 0;
900 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
901 flags |= BUS_DMASYNC_POSTWRITE;
902
903 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
904 ccb->ccb_datasize, flags);
905 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
906
907 if (ccb->ccb_abuf != (vaddr_t)0) {
908 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
909 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
910 ccb->ccb_datasize);
911 s = splimp();
912 /* XXX */
913 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
914 splx(s);
915 }
916 }
917
918 /*
919 * Wait for the specified CCB to complete. Return non-zero on timeout (but
920 * don't check status, as some command types don't return status). Must be
921 * called with interrupts blocked.
922 */
923 int
924 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
925 {
926
927 for (; timo != 0; timo--) {
928 twe_poll(sc);
929 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
930 break;
931 DELAY(100000);
932 }
933
934 return (timo == 0);
935 }
936
937 /*
938 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
939 * the order that they were enqueued and try to submit their command blocks
940 * to the controller for execution.
941 */
942 void
943 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
944 {
945 int s;
946
947 s = splbio();
948
949 if (ccb != NULL)
950 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
951
952 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
953 if (twe_ccb_submit(sc, ccb))
954 break;
955 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
956 }
957
958 splx(s);
959 }
960
961 /*
962 * Submit the command block associated with the specified CCB to the
963 * controller for execution. Must be called with interrupts blocked.
964 */
965 int
966 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
967 {
968 bus_addr_t pa;
969 int rv;
970 u_int status;
971
972 /* Check to see if we can post a command. */
973 status = TWE_INL(sc, TWE_REG_STS);
974 twe_status_check(sc, status);
975
976 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
977 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
978 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
979 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
980 ccb->ccb_flags |= TWE_CCB_ACTIVE;
981 pa = sc->sc_cmds_paddr +
982 ccb->ccb_cmdid * sizeof(struct twe_cmd);
983 TWE_OUTL(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
984 rv = 0;
985 } else
986 rv = EBUSY;
987
988 return (rv);
989 }
990