twe.c revision 1.2 1 /* $NetBSD: twe.c,v 1.2 2000/10/20 15:14:25 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2000 Michael Smith
41 * Copyright (c) 2000 BSDi
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66 */
67
68 /*
69 * Driver for the 3ware Escalade family of RAID controllers.
70 */
71
72 #include "opt_twe.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/device.h>
78 #include <sys/queue.h>
79 #include <sys/proc.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/malloc.h>
83 #include <sys/disk.h>
84
85 #include <uvm/uvm_extern.h>
86
87 #include <machine/bswap.h>
88 #include <machine/bus.h>
89
90 #include <dev/pci/pcireg.h>
91 #include <dev/pci/pcivar.h>
92 #include <dev/pci/pcidevs.h>
93 #include <dev/pci/twereg.h>
94 #include <dev/pci/twevar.h>
95
96 #define TWE_INL(sc, port) \
97 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, port)
98 #define TWE_OUTL(sc, port, val) \
99 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, port, val)
100
101 #define PCI_CBIO 0x10
102
103 #if TWE_MAX_QUEUECNT > TWE_MAX_CMDS
104 #error TWE_MAX_QUEUECNT > TWE_MAX_CMDS
105 #endif
106
107 static void twe_aen_handler(struct twe_ccb *, int);
108 static void twe_attach(struct device *, struct device *, void *);
109 static int twe_init_connection(struct twe_softc *);
110 static int twe_intr(void *);
111 static int twe_match(struct device *, struct cfdata *, void *);
112 static void *twe_param_get(struct twe_softc *, int, int, size_t,
113 void (*)(struct twe_ccb *, int));
114 static void twe_poll(struct twe_softc *);
115 static int twe_print(void *, const char *);
116 static int twe_reset(struct twe_softc *);
117 static int twe_submatch(struct device *, struct cfdata *, void *);
118 static int twe_status_check(struct twe_softc *, u_int);
119 static int twe_status_wait(struct twe_softc *, u_int, int);
120
121 struct cfattach twe_ca = {
122 sizeof(struct twe_softc), twe_match, twe_attach
123 };
124
125 struct {
126 const u_int aen;
127 const char *desc;
128 } static const twe_aen_names[] = {
129 { 0x0000, "queue empty" },
130 { 0x0001, "soft reset" },
131 { 0x0002, "degraded mirror" },
132 { 0x0003, "controller error" },
133 { 0x0004, "rebuild fail" },
134 { 0x0005, "rebuild done" },
135 { 0x00ff, "aen queue full" },
136 };
137
138 /*
139 * Match a supported board.
140 */
141 static int
142 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
143 {
144 struct pci_attach_args *pa;
145
146 pa = aux;
147
148 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
149 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE);
150 }
151
152 /*
153 * Attach a supported board.
154 *
155 * XXX This doesn't fail gracefully.
156 */
157 static void
158 twe_attach(struct device *parent, struct device *self, void *aux)
159 {
160 struct pci_attach_args *pa;
161 struct twe_softc *sc;
162 pci_chipset_tag_t pc;
163 pci_intr_handle_t ih;
164 pcireg_t csr;
165 const char *intrstr;
166 int size, i, rv, rseg;
167 struct twe_param *dtp, *ctp;
168 bus_dma_segment_t seg;
169 struct twe_cmd *tc;
170 struct twe_attach_args twea;
171 struct twe_ccb *ccb;
172
173 sc = (struct twe_softc *)self;
174 pa = aux;
175 pc = pa->pa_pc;
176 sc->sc_dmat = pa->pa_dmat;
177 SIMPLEQ_INIT(&sc->sc_ccb_queue);
178 SLIST_INIT(&sc->sc_ccb_freelist);
179
180 printf(": 3ware Escalade RAID controller\n");
181
182 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
183 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
184 printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
185 return;
186 }
187
188 /* Enable the device. */
189 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
190 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
191 csr | PCI_COMMAND_MASTER_ENABLE);
192
193 /* Map and establish the interrupt. */
194 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
195 pa->pa_intrline, &ih)) {
196 printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
197 return;
198 }
199 intrstr = pci_intr_string(pc, ih);
200 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
201 if (sc->sc_ih == NULL) {
202 printf("%s: can't establish interrupt", sc->sc_dv.dv_xname);
203 if (intrstr != NULL)
204 printf(" at %s", intrstr);
205 printf("\n");
206 return;
207 }
208 if (intrstr != NULL)
209 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
210
211 /*
212 * Allocate and initialise the command blocks and CCBs.
213 */
214 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
215
216 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0, &seg, 1,
217 &rseg, BUS_DMA_NOWAIT)) != 0) {
218 printf("%s: unable to allocate commands, rv = %d\n",
219 sc->sc_dv.dv_xname, rv);
220 return;
221 }
222
223 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
224 (caddr_t *)&sc->sc_cmds,
225 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
226 printf("%s: unable to map commands, rv = %d\n",
227 sc->sc_dv.dv_xname, rv);
228 return;
229 }
230
231 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
232 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
233 printf("%s: unable to create command DMA map, rv = %d\n",
234 sc->sc_dv.dv_xname, rv);
235 return;
236 }
237
238 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
239 size, NULL, BUS_DMA_NOWAIT)) != 0) {
240 printf("%s: unable to load command DMA map, rv = %d\n",
241 sc->sc_dv.dv_xname, rv);
242 return;
243 }
244
245 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
246 memset(sc->sc_cmds, 0, size);
247
248 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_WAITOK);
249 if (ccb == NULL) {
250 printf("%s: unable to allocate CCBs\n", sc->sc_dv.dv_xname);
251 return;
252 }
253
254 sc->sc_ccbs = ccb;
255 tc = (struct twe_cmd *)sc->sc_cmds;
256
257 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
258 ccb->ccb_cmd = tc;
259 ccb->ccb_cmdid = i;
260 ccb->ccb_flags = 0;
261 rv = bus_dmamap_create(sc->sc_dmat, TWE_MAX_XFER,
262 TWE_MAX_SEGS, NBPG, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
263 &ccb->ccb_dmamap_xfer);
264 if (rv != 0)
265 break;
266 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
267 }
268 if (i != TWE_MAX_QUEUECNT)
269 printf("%s: %d/%d CCBs usable\n", sc->sc_dv.dv_xname, i,
270 TWE_MAX_QUEUECNT);
271
272 /* Wait for the controller to become ready. */
273 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
274 printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname);
275 return;
276 }
277
278 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
279
280 /* Reset the controller. */
281 if (twe_reset(sc)) {
282 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
283 return;
284 }
285
286 /* Find attached drives. XXX Magic numbers. */
287 if ((dtp = twe_param_get(sc, 3, 3, TWE_MAX_UNITS, NULL)) == NULL) {
288 printf("%s: can't detect attached units\n",
289 sc->sc_dv.dv_xname);
290 return;
291 }
292
293 /* For each detected unit, collect size and store in an array. */
294 for (i = 0; i < TWE_MAX_UNITS; i++) {
295 /* Unit present? */
296 if (dtp->tp_data[i] == 0) {
297 sc->sc_dsize[i] = 0;
298 continue;
299 }
300
301 ctp = twe_param_get(sc, TWE_UNIT_INFORMATION_TABLE_BASE + i,
302 4, 4, NULL);
303 if (ctp == NULL) {
304 printf("%s: error fetching capacity for unit %d\n",
305 sc->sc_dv.dv_xname, i);
306 continue;
307 }
308
309 sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data);
310 free(ctp, M_DEVBUF);
311 }
312 free(dtp, M_DEVBUF);
313
314 /* Initialise connection with controller and enable interrupts. */
315 twe_init_connection(sc);
316 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
317 TWE_CTL_UNMASK_RESP_INTR |
318 TWE_CTL_ENABLE_INTRS);
319
320 /* Attach sub-devices. */
321 for (i = 0; i < TWE_MAX_UNITS; i++) {
322 if (sc->sc_dsize[i] == 0)
323 continue;
324 twea.twea_unit = i;
325 config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch);
326 }
327 }
328
329 /*
330 * Reset the controller. Currently only useful at attach time; must be
331 * called with interrupts blocked.
332 */
333 static int
334 twe_reset(struct twe_softc *sc)
335 {
336 struct twe_param *tp;
337 u_int aen, status;
338 volatile u_int32_t junk;
339 int got;
340
341 /* Issue a soft reset. */
342 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
343 TWE_CTL_CLEAR_HOST_INTR |
344 TWE_CTL_CLEAR_ATTN_INTR |
345 TWE_CTL_MASK_CMD_INTR |
346 TWE_CTL_MASK_RESP_INTR |
347 TWE_CTL_CLEAR_ERROR_STS |
348 TWE_CTL_DISABLE_INTRS);
349
350 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
351 printf("%s: no attention interrupt\n",
352 sc->sc_dv.dv_xname);
353 return (-1);
354 }
355
356 /* Pull AENs out of the controller; look for a soft reset AEN. */
357 for (got = 0;;) {
358 /* XXX Magic numbers. */
359 if ((tp = twe_param_get(sc, 0x401, 2, 2, NULL)) == NULL)
360 return (-1);
361 aen = le16toh(*(u_int16_t *)tp->tp_data);
362 free(tp, M_DEVBUF);
363 if (aen == TWE_AEN_QUEUE_EMPTY)
364 break;
365 if (aen == TWE_AEN_SOFT_RESET)
366 got = 1;
367 }
368 if (!got) {
369 printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
370 return (-1);
371 }
372
373 /* Check controller status. */
374 status = TWE_INL(sc, TWE_REG_STS);
375 if (twe_status_check(sc, status)) {
376 printf("%s: controller errors detected\n",
377 sc->sc_dv.dv_xname);
378 return (-1);
379 }
380
381 /* Drain the response queue. */
382 for (;;) {
383 status = TWE_INL(sc, TWE_REG_STS);
384 if (twe_status_check(sc, status) != 0) {
385 printf("%s: can't drain response queue\n",
386 sc->sc_dv.dv_xname);
387 return (-1);
388 }
389 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
390 break;
391 junk = TWE_INL(sc, TWE_REG_RESP_QUEUE);
392 }
393
394 return (0);
395 }
396
397 /*
398 * Print autoconfiguration message for a sub-device.
399 */
400 static int
401 twe_print(void *aux, const char *pnp)
402 {
403 struct twe_attach_args *twea;
404
405 twea = aux;
406
407 if (pnp != NULL)
408 printf("block device at %s", pnp);
409 printf(" unit %d", twea->twea_unit);
410 return (UNCONF);
411 }
412
413 /*
414 * Match a sub-device.
415 */
416 static int
417 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
418 {
419 struct twe_attach_args *twea;
420
421 twea = aux;
422
423 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
424 cf->tweacf_unit != twea->twea_unit)
425 return (0);
426
427 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
428 }
429
430 /*
431 * Interrupt service routine.
432 */
433 static int
434 twe_intr(void *arg)
435 {
436 struct twe_softc *sc;
437 u_int status;
438 int caught;
439
440 sc = arg;
441 caught = 0;
442 status = TWE_INL(sc, TWE_REG_STS);
443 twe_status_check(sc, status);
444
445 /* Host interrupts - purpose unknown. */
446 if ((status & TWE_STS_HOST_INTR) != 0) {
447 #ifdef DIAGNOSTIC
448 printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
449 #endif
450 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
451 caught = 1;
452 }
453
454 /*
455 * Attention interrupts, signalled when a controller or child device
456 * state change has occured.
457 */
458 if ((status & TWE_STS_ATTN_INTR) != 0) {
459 /* XXX Magic numbers. */
460 twe_param_get(sc, 0x401, 2, 2, twe_aen_handler);
461 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
462 caught = 1;
463 }
464
465 /*
466 * Command interrupts, signalled when the controller can accept more
467 * commands. We don't use this; instead, we try to submit commands
468 * when we receive them, and when other commands have completed.
469 * Mask it so we don't get another one.
470 */
471 if ((status & TWE_STS_CMD_INTR) != 0) {
472 #ifdef DIAGNOSTIC
473 printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
474 #endif
475 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
476 caught = 1;
477 }
478
479 if ((status & TWE_STS_RESP_INTR) != 0) {
480 twe_poll(sc);
481 caught = 1;
482 }
483
484 return (caught);
485 }
486
487 /*
488 * Handle an AEN returned by the controller.
489 */
490 static void
491 twe_aen_handler(struct twe_ccb *ccb, int error)
492 {
493 struct twe_softc *sc;
494 struct twe_param *tp;
495 const char *str;
496 u_int aen;
497 int i;
498
499 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
500 tp = ccb->ccb_tx.tx_context;
501 twe_ccb_unmap(sc, ccb);
502
503 if (error)
504 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
505 else {
506 aen = le16toh(*(u_int16_t *)tp->tp_data);
507 str = "<unknown>";
508 i = 0;
509 while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) {
510 if (twe_aen_names[i].aen == aen) {
511 str = twe_aen_names[i].desc;
512 break;
513 }
514 i++;
515 }
516 printf("%s: AEN 0x%04x (%s) recieved\n", sc->sc_dv.dv_xname,
517 aen, str);
518 }
519
520 free(tp, M_DEVBUF);
521 twe_ccb_free(sc, ccb);
522 }
523
524 /*
525 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided,
526 * it will be called with generated context when the command has completed.
527 * If no callback is provided, the command will be executed synchronously
528 * and the data returned.
529 *
530 * The caller or callback is responsible for freeing the data.
531 */
532 static void *
533 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
534 void (*func)(struct twe_ccb *, int))
535 {
536 struct twe_ccb *ccb;
537 struct twe_cmd *tc;
538 struct twe_param *tp;
539 int rv, s;
540
541 if (twe_ccb_alloc(sc, &ccb, 1) != 0) {
542 /* XXX */
543 return (NULL);
544 }
545
546 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
547
548 ccb->ccb_data = tp;
549 ccb->ccb_datasize = TWE_SECTOR_SIZE;
550 ccb->ccb_flags = TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT;
551 ccb->ccb_tx.tx_handler = func;
552 ccb->ccb_tx.tx_context = tp;
553 ccb->ccb_tx.tx_dv = &sc->sc_dv;
554
555 tc = ccb->ccb_cmd;
556 tc->tc_size = 2;
557 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
558 tc->tc_unit = 0;
559 tc->tc_count = htole16(1);
560
561 /* Fill in the outbound parameter data. */
562 tp->tp_table_id = htole16(table_id);
563 tp->tp_param_id = param_id;
564 tp->tp_param_size = size;
565
566 /* Map the transfer. */
567 if (twe_ccb_map(sc, ccb) != 0) {
568 twe_ccb_free(sc, ccb);
569 free(tp, M_DEVBUF);
570 return (NULL);
571 }
572
573 /* Submit the command and either wait or let the callback handle it. */
574 if (func == NULL) {
575 s = splbio();
576 if ((rv = twe_ccb_submit(sc, ccb)) == 0)
577 rv = twe_ccb_poll(sc, ccb, 5);
578 twe_ccb_unmap(sc, ccb);
579 twe_ccb_free(sc, ccb);
580 splx(s);
581 if (rv != 0) {
582 free(tp, M_DEVBUF);
583 tp = NULL;
584 }
585 } else {
586 twe_ccb_enqueue(sc, ccb);
587 tp = NULL;
588 }
589
590 return (tp);
591 }
592
593 /*
594 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error.
595 * Must be called with interrupts blocked.
596 */
597 static int
598 twe_init_connection(struct twe_softc *sc)
599 {
600 struct twe_ccb *ccb;
601 struct twe_cmd *tc;
602 int rv;
603
604 if ((rv = twe_ccb_alloc(sc, &ccb, 1)) != 0)
605 return (rv);
606
607 /* Build the command. */
608 tc = ccb->ccb_cmd;
609 tc->tc_size = 3;
610 tc->tc_opcode = TWE_OP_INIT_CONNECTION;
611 tc->tc_unit = 0;
612 tc->tc_count = htole16(TWE_INIT_MESSAGE_CREDITS);
613 tc->tc_args.init_connection.response_queue_pointer = 0;
614
615 /* Submit the command for immediate execution. */
616 if ((rv = twe_ccb_submit(sc, ccb)) == 0)
617 rv = twe_ccb_poll(sc, ccb, 5);
618 twe_ccb_free(sc, ccb);
619 return (rv);
620 }
621
622 /*
623 * Poll the controller for completed commands. Must be called with
624 * interrupts blocked.
625 */
626 static void
627 twe_poll(struct twe_softc *sc)
628 {
629 struct twe_ccb *ccb;
630 int found;
631 u_int status, cmdid;
632
633 found = 0;
634
635 for (;;) {
636 status = TWE_INL(sc, TWE_REG_STS);
637 twe_status_check(sc, status);
638
639 if ((status & TWE_STS_RESP_QUEUE_EMPTY))
640 break;
641
642 found = 1;
643 cmdid = TWE_INL(sc, TWE_REG_RESP_QUEUE);
644 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
645 if (cmdid >= TWE_MAX_QUEUECNT) {
646 printf("%s: bad completion\n", sc->sc_dv.dv_xname);
647 continue;
648 }
649
650 ccb = sc->sc_ccbs + cmdid;
651 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
652 printf("%s: bad completion (not active)\n",
653 sc->sc_dv.dv_xname);
654 continue;
655 }
656 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
657
658 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
659 (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
660 sizeof(struct twe_cmd),
661 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
662
663 /* Pass notification to upper layers. */
664 if (ccb->ccb_tx.tx_handler != NULL)
665 (*ccb->ccb_tx.tx_handler)(ccb,
666 ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
667 }
668
669 /* If any commands have completed, run the software queue. */
670 if (found)
671 twe_ccb_enqueue(sc, NULL);
672 }
673
674 /*
675 * Wait for `status' to be set in the controller status register. Return
676 * zero if found, non-zero if the operation timed out.
677 */
678 static int
679 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
680 {
681
682 for (; timo != 0; timo--) {
683 if ((TWE_INL(sc, TWE_REG_STS) & status) == status)
684 break;
685 delay(100000);
686 }
687
688 return (timo == 0);
689 }
690
691 /*
692 * Complain if the status bits aren't what we expect.
693 */
694 static int
695 twe_status_check(struct twe_softc *sc, u_int status)
696 {
697 int rv;
698
699 rv = 0;
700
701 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
702 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
703 status & ~TWE_STS_EXPECTED_BITS);
704 rv = -1;
705 }
706
707 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
708 printf("%s: unexpected status bits: 0x%08x\n",
709 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
710 rv = -1;
711 }
712
713 return (rv);
714 }
715
716 /*
717 * Allocate and initialise a CCB.
718 */
719 int
720 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int nowait)
721 {
722 struct twe_cmd *tc;
723 struct twe_ccb *ccb;
724 int s;
725
726 s = splbio();
727
728 /* Allocate a CCB and command block. */
729 if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) {
730 if (nowait) {
731 splx(s);
732 return (EAGAIN);
733 }
734 sc->sc_ccb_waitcnt++;
735 tsleep(&sc->sc_ccb_waitcnt, PRIBIO, "twepaqc", 0);
736 }
737 ccb = SLIST_FIRST(&sc->sc_ccb_freelist);
738 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
739
740 /* Initialise some fields and return. */
741 ccb->ccb_tx.tx_handler = NULL;
742 tc = ccb->ccb_cmd;
743 tc->tc_status = 0;
744 tc->tc_flags = 0;
745 tc->tc_cmdid = ccb->ccb_cmdid;
746
747 splx(s);
748 *ccbp = ccb;
749 return (0);
750 }
751
752 /*
753 * Free a CCB. Wake one process that's waiting for a free CCB, if any.
754 */
755 void
756 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
757 {
758 int s;
759
760 ccb->ccb_flags = 0;
761
762 s = splbio();
763 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
764 if (sc->sc_ccb_waitcnt != 0) {
765 sc->sc_ccb_waitcnt--;
766 wakeup_one(&sc->sc_ccb_waitcnt);
767 }
768 splx(s);
769 }
770
771 /*
772 * Map the specified CCB's command block and data buffer (if any) into
773 * controller visible space. Perform DMA synchronisation.
774 */
775 int
776 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
777 {
778 struct twe_cmd *tc;
779 int flags, nsegs, i, s;
780 void *data;
781
782 /* The data as a whole must be 512-byte aligned. */
783 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
784 s = splimp();
785 /* XXX */
786 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
787 ccb->ccb_datasize, UVM_KMF_NOWAIT);
788 splx(s);
789 data = (void *)ccb->ccb_abuf;
790 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
791 memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
792 } else {
793 ccb->ccb_abuf = (vaddr_t)0;
794 data = ccb->ccb_data;
795 }
796
797 /* Map the data buffer into bus space and build the S/G list. */
798 bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
799 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT);
800
801 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
802 tc = ccb->ccb_cmd;
803 tc->tc_size += 2 * nsegs;
804
805 /* The location of the S/G list is dependant upon command type. */
806 switch (tc->tc_opcode >> 5) {
807 case 2:
808 for (i = 0; i < nsegs; i++) {
809 tc->tc_args.param.sgl[i].tsg_address =
810 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
811 tc->tc_args.param.sgl[i].tsg_length =
812 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
813 }
814 /* XXX Needed? */
815 for (; i < TWE_SG_SIZE; i++) {
816 tc->tc_args.param.sgl[i].tsg_address = 0;
817 tc->tc_args.param.sgl[i].tsg_length = 0;
818 }
819 break;
820 case 3:
821 for (i = 0; i < nsegs; i++) {
822 tc->tc_args.io.sgl[i].tsg_address =
823 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
824 tc->tc_args.io.sgl[i].tsg_length =
825 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
826 }
827 /* XXX Needed? */
828 for (; i < TWE_SG_SIZE; i++) {
829 tc->tc_args.io.sgl[i].tsg_address = 0;
830 tc->tc_args.io.sgl[i].tsg_length = 0;
831 }
832 break;
833 #ifdef DEBUG
834 default:
835 panic("twe_ccb_map: oops");
836 #endif
837 }
838
839 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
840 flags = BUS_DMASYNC_PREREAD;
841 else
842 flags = 0;
843 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
844 flags |= BUS_DMASYNC_PREWRITE;
845
846 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
847 ccb->ccb_datasize, flags);
848 return (0);
849 }
850
851 /*
852 * Unmap the specified CCB's command block and data buffer (if any) and
853 * perform DMA synchronisation.
854 */
855 void
856 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
857 {
858 int flags, s;
859
860 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
861 flags = BUS_DMASYNC_POSTREAD;
862 else
863 flags = 0;
864 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
865 flags |= BUS_DMASYNC_POSTWRITE;
866
867 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
868 ccb->ccb_datasize, flags);
869 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
870
871 if (ccb->ccb_abuf != (vaddr_t)0) {
872 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
873 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
874 ccb->ccb_datasize);
875 s = splimp();
876 /* XXX */
877 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
878 splx(s);
879 }
880 }
881
882 /*
883 * Wait for the specified CCB to complete. Return non-zero on timeout (but
884 * don't check status, as some command types don't return status). Must be
885 * called with interrupts blocked.
886 */
887 int
888 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
889 {
890
891 for (; timo != 0; timo--) {
892 twe_poll(sc);
893 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
894 break;
895 DELAY(100000);
896 }
897
898 return (timo == 0);
899 }
900
901 /*
902 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in
903 * the order that they were enqueued and try to submit their command blocks
904 * to the controller for execution.
905 */
906 void
907 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
908 {
909 int s;
910
911 s = splbio();
912
913 if (ccb != NULL)
914 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
915
916 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
917 if (twe_ccb_submit(sc, ccb))
918 break;
919 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
920 }
921
922 splx(s);
923 }
924
925 /*
926 * Submit the command block associated with the specified CCB to the
927 * controller for execution. Must be called with interrupts blocked.
928 */
929 int
930 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
931 {
932 bus_addr_t pa;
933 int rv;
934 u_int status;
935
936 /* Check to see if we can post a command. */
937 status = TWE_INL(sc, TWE_REG_STS);
938 twe_status_check(sc, status);
939
940 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
942 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
943 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
944 ccb->ccb_flags |= TWE_CCB_ACTIVE;
945 pa = sc->sc_cmds_paddr +
946 ccb->ccb_cmdid * sizeof(struct twe_cmd);
947 TWE_OUTL(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
948 rv = 0;
949 } else
950 rv = EBUSY;
951
952 return (rv);
953 }
954