dpt.c revision 1.13 1 /* $NetBSD: dpt.c,v 1.13 1999/11/29 15:04:23 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andy Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 /*
58 * Driver for DPT EATA SCSI adapters.
59 *
60 * TODO:
61 *
62 * o Need a front-end for (newer) ISA boards.
63 * o Handle older firmware better.
64 * o Find a bunch of different firmware EEPROMs and try them out.
65 * o Test with a bunch of different boards.
66 * o dpt_readcfg() should not be using CP_PIO_GETCFG.
67 * o An interface to userland applications.
68 * o Some sysctls or a utility (eg dptctl(8)) to control parameters.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.13 1999/11/29 15:04:23 ad Exp $");
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/device.h>
78 #include <sys/queue.h>
79 #include <sys/proc.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82
83 #include <machine/bswap.h>
84 #include <machine/bus.h>
85
86 #include <dev/scsipi/scsi_all.h>
87 #include <dev/scsipi/scsipi_all.h>
88 #include <dev/scsipi/scsiconf.h>
89
90 #include <dev/ic/dptreg.h>
91 #include <dev/ic/dptvar.h>
92
93 /* A default for our link struct */
94 static struct scsipi_device dpt_dev = {
95 NULL, /* Use default error handler */
96 NULL, /* have a queue, served by this */
97 NULL, /* have no async handler */
98 NULL, /* Use default 'done' routine */
99 };
100
101 static char *dpt_cname[] = {
102 "PM3334", "SmartRAID IV",
103 "PM3332", "SmartRAID IV",
104 "PM2144", "SmartCache IV",
105 "PM2044", "SmartCache IV",
106 "PM2142", "SmartCache IV",
107 "PM2042", "SmartCache IV",
108 "PM2041", "SmartCache IV",
109 "PM3224", "SmartRAID III",
110 "PM3222", "SmartRAID III",
111 "PM3021", "SmartRAID III",
112 "PM2124", "SmartCache III",
113 "PM2024", "SmartCache III",
114 "PM2122", "SmartCache III",
115 "PM2022", "SmartCache III",
116 "PM2021", "SmartCache III",
117 "SK2012", "SmartCache Plus",
118 "SK2011", "SmartCache Plus",
119 NULL, "unknown adapter, please report using send-pr(1)",
120 };
121
122 /*
123 * Handle an interrupt from the HBA.
124 */
125 int
126 dpt_intr(xxx_sc)
127 void *xxx_sc;
128 {
129 struct dpt_softc *sc;
130 struct dpt_ccb *ccb;
131 struct eata_sp *sp;
132 static int moretimo;
133 int more;
134
135 sc = xxx_sc;
136 sp = sc->sc_statpack;
137 more = 0;
138
139 #ifdef DEBUG
140 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
141 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
142 #endif
143
144 /* Don't get stalled by HA_ST_MORE */
145 if (moretimo < DPT_MORE_TIMEOUT / 100)
146 moretimo = 0;
147
148 for (;;) {
149 /*
150 * HBA might have interrupted while we were dealing with the
151 * last completed command, since we ACK before we deal; keep
152 * polling. If no interrupt is signalled, but the HBA has
153 * indicated that more data will be available soon, hang
154 * around.
155 */
156 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
157 if (more != 0 && moretimo++ < DPT_MORE_TIMEOUT / 100) {
158 DELAY(10);
159 continue;
160 }
161 break;
162 }
163
164 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
165 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
166
167 /* Might have looped before HBA can reset HBA_AUX_INTR */
168 if (sp->sp_ccbid == -1) {
169 DELAY(50);
170 #ifdef DIAGNOSTIC
171 printf("%s: slow reset of HA_AUX_STATUS?",
172 sc->sc_dv.dv_xname);
173 #endif
174 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
175 return (0);
176 #ifdef DIAGNOSTIC
177 printf("%s: was a slow reset of HA_AUX_STATUS",
178 sc->sc_dv.dv_xname);
179 #endif
180 /* Re-sync DMA map */
181 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
182 sc->sc_spoff, sizeof(struct eata_sp),
183 BUS_DMASYNC_POSTREAD);
184 }
185
186 /* Make sure CCB ID from status packet is realistic */
187 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
188 /* Sync up DMA map and cache cmd status */
189 ccb = sc->sc_ccbs + sp->sp_ccbid;
190
191 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
192 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
193 BUS_DMASYNC_POSTWRITE);
194
195 ccb->ccb_hba_status = sp->sp_hba_status & 0x7F;
196 ccb->ccb_scsi_status = sp->sp_scsi_status;
197
198 /*
199 * Ack the interrupt and process the CCB. If this
200 * is a private CCB it's up to dpt_poll() to notice.
201 */
202 sp->sp_ccbid = -1;
203 ccb->ccb_flg |= CCB_INTR;
204 more = dpt_inb(sc, HA_STATUS) & HA_ST_MORE;
205 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
206 dpt_done_ccb(sc, ccb);
207 } else {
208 printf("%s: bogus status (returned CCB id %d)\n",
209 sc->sc_dv.dv_xname, sp->sp_ccbid);
210
211 /* Ack the interrupt */
212 sp->sp_ccbid = -1;
213 more = dpt_inb(sc, HA_STATUS) & HA_ST_MORE;
214 }
215
216 /* Don't get stalled by HA_ST_MORE */
217 if (moretimo < DPT_MORE_TIMEOUT / 100)
218 moretimo = 0;
219 }
220
221 return (0);
222 }
223
224 /*
225 * Initialize and attach the HBA. This is the entry point from bus
226 * specific probe-and-attach code.
227 */
228 void
229 dpt_init(sc, intrstr)
230 struct dpt_softc *sc;
231 const char *intrstr;
232 {
233 struct eata_inquiry_data *ei;
234 int i, j, error, rseg, mapsize;
235 bus_dma_segment_t seg;
236 struct eata_cfg *ec;
237 char model[16];
238
239 ec = &sc->sc_ec;
240
241 /* Allocate the CCB/status packet/scratch DMA map and load */
242 sc->sc_nccbs = min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
243 sc->sc_spoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
244 sc->sc_scroff = sc->sc_spoff + sizeof(struct eata_sp);
245 sc->sc_scrlen = 256; /* XXX */
246 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + sc->sc_scrlen +
247 sizeof(struct eata_sp);
248
249 if ((error = bus_dmamem_alloc(sc->sc_dmat, mapsize, NBPG, 0,
250 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
251 printf("%s: unable to allocate CCBs, error = %d\n",
252 sc->sc_dv.dv_xname, error);
253 return;
254 }
255
256 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
257 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
258 printf("%s: unable to map CCBs, error = %d\n",
259 sc->sc_dv.dv_xname, error);
260 return;
261 }
262
263 if ((error = bus_dmamap_create(sc->sc_dmat, mapsize, mapsize, 1, 0,
264 BUS_DMA_NOWAIT, &sc->sc_dmamap_ccb)) != 0) {
265 printf("%s: unable to create CCB DMA map, error = %d\n",
266 sc->sc_dv.dv_xname, error);
267 return;
268 }
269
270 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_ccb,
271 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
272 printf("%s: unable to load CCB DMA map, error = %d\n",
273 sc->sc_dv.dv_xname, error);
274 return;
275 }
276
277 sc->sc_statpack = (struct eata_sp *)((caddr_t)sc->sc_ccbs +
278 sc->sc_spoff);
279 sc->sc_sppa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_spoff;
280 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
281 sc->sc_scrpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_scroff;
282 sc->sc_statpack->sp_ccbid = -1;
283
284 /* Initialize the CCBs */
285 TAILQ_INIT(&sc->sc_free_ccb);
286 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
287
288 if (i == 0) {
289 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
290 return;
291 } else if (i != sc->sc_nccbs) {
292 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
293 sc->sc_nccbs);
294 sc->sc_nccbs = i;
295 }
296
297 /* Set shutdownhook before we start any device activity */
298 sc->sc_sdh = shutdownhook_establish(dpt_shutdown, sc);
299
300 /* Get the page 0 inquiry data from the HBA */
301 dpt_hba_inquire(sc, &ei);
302
303 /*
304 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
305 * dpt0: interrupting at irq 10
306 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
307 */
308 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
309 ;
310 ei->ei_vendor[i] = '\0';
311
312 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
313 model[i] = ei->ei_model[i];
314 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
315 model[i++] = ei->ei_model[i];
316 model[i] = '\0';
317
318 /* Find the cannonical name for the board */
319 for (i = 0; dpt_cname[i] != NULL; i += 2)
320 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
321 break;
322
323 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
324
325 if (intrstr != NULL)
326 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
327
328 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
329 sc->sc_dv.dv_xname, sc->sc_nccbs, ec->ec_maxchannel + 1);
330
331 for (i = 0; i <= ec->ec_maxchannel; i++)
332 printf(" %d", ec->ec_hba[3 - i]);
333 printf("\n");
334
335 /* Reset the SCSI bus */
336 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
337 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
338 DELAY(20000);
339
340 /* Fill in the adapter, each link and attach in turn */
341 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
342 sc->sc_adapter.scsipi_minphys = dpt_minphys;
343
344 for (i = 0; i <= ec->ec_maxchannel; i++) {
345 struct scsipi_link *link;
346
347 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
348 link = &sc->sc_link[i];
349 link->scsipi_scsi.channel = i;
350 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
351 link->scsipi_scsi.max_lun = ec->ec_maxlun;
352 link->scsipi_scsi.max_target = ec->ec_maxtarget;
353 link->type = BUS_SCSI;
354 link->device = &dpt_dev;
355 link->adapter = &sc->sc_adapter;
356 link->adapter_softc = sc;
357 link->openings = sc->sc_nccbs;
358 config_found(&sc->sc_dv, link, scsiprint);
359 }
360 }
361
362 /*
363 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
364 * all data from it's cache and mark array groups as clean.
365 */
366 void
367 dpt_shutdown(xxx_sc)
368 void *xxx_sc;
369 {
370 struct dpt_softc *sc;
371
372 sc = xxx_sc;
373 printf("shutting down %s...", sc->sc_dv.dv_xname);
374 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
375 DELAY(5000*1000);
376 printf(" done\n");
377 }
378
379 /*
380 * Send an EATA command to the HBA.
381 */
382 int
383 dpt_cmd(sc, cp, addr, eatacmd, icmd)
384 struct dpt_softc *sc;
385 struct eata_cp *cp;
386 u_int32_t addr;
387 int eatacmd, icmd;
388 {
389 int i;
390
391 for (i = 20000; i; i--) {
392 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
393 break;
394 DELAY(50);
395 }
396
397 /* Not the most graceful way to handle this */
398 if (i == 0) {
399 printf("%s: HBA timeout on EATA command issue; aborting\n",
400 sc->sc_dv.dv_xname);
401 return (-1);
402 }
403
404 if (cp == NULL)
405 addr = 0;
406
407 dpt_outl(sc, HA_DMA_BASE, (u_int32_t)addr);
408
409 if (eatacmd == CP_IMMEDIATE) {
410 if (cp == NULL) {
411 /* XXX should really pass meaningful values */
412 dpt_outb(sc, HA_ICMD_CODE2, 0);
413 dpt_outb(sc, HA_ICMD_CODE1, 0);
414 }
415 dpt_outb(sc, HA_ICMD, icmd);
416 }
417
418 dpt_outb(sc, HA_COMMAND, eatacmd);
419 return (0);
420 }
421
422 /*
423 * Wait for the HBA to reach an arbitrary state.
424 */
425 int
426 dpt_wait(sc, mask, state, ms)
427 struct dpt_softc *sc;
428 u_int8_t mask, state;
429 int ms;
430 {
431
432 for (ms *= 10; ms; ms--) {
433 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
434 return (0);
435 DELAY(100);
436 }
437 return (-1);
438 }
439
440 /*
441 * Wait for the specified CCB to finish. This is used when we may not be
442 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
443 * The timeout value from the CCB is used. This should only be used for
444 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
445 * a look at it.
446 */
447 int
448 dpt_poll(sc, ccb)
449 struct dpt_softc *sc;
450 struct dpt_ccb *ccb;
451 {
452 int i;
453
454 #ifdef DEBUG
455 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
456 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
457 #endif
458
459 if ((ccb->ccb_flg & CCB_INTR) != 0)
460 return (0);
461
462 for (i = ccb->ccb_timeout * 20; i; i--) {
463 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
464 dpt_intr(sc);
465 if ((ccb->ccb_flg & CCB_INTR) != 0)
466 return (0);
467 DELAY(50);
468 }
469 return (-1);
470 }
471
472 /*
473 * Read the EATA configuration from the HBA and perform some sanity checks.
474 */
475 int
476 dpt_readcfg(sc)
477 struct dpt_softc *sc;
478 {
479 struct eata_cfg *ec;
480 int i, j, stat;
481 u_int16_t *p;
482
483 ec = &sc->sc_ec;
484
485 /* Older firmware may puke if we talk to it too soon after reset */
486 dpt_outb(sc, HA_COMMAND, CP_RESET);
487 DELAY(750000);
488
489 for (i = 1000; i; i--) {
490 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
491 break;
492 DELAY(2000);
493 }
494
495 if (i == 0) {
496 printf("%s: HBA not ready after reset: %02x\n",
497 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
498 return (-1);
499 }
500
501 while((((stat = dpt_inb(sc, HA_STATUS))
502 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
503 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
504 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
505 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
506 /* RAID drives still spinning up? */
507 if((dpt_inb(sc, HA_ERROR) != 'D')
508 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
509 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
510 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
511 return (-1);
512 }
513 }
514
515 /*
516 * Issue the read-config command and wait for the data to appear.
517 * XXX we shouldn't be doing this with PIO, but it makes it a lot
518 * easier as no DMA setup is required.
519 */
520 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
521 memset(ec, 0, sizeof(*ec));
522 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
523 sizeof(ec->ec_cfglen)) >> 1;
524 p = (u_int16_t *)ec;
525
526 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
527 printf("%s: cfg data didn't appear (status:%02x)\n",
528 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
529 return (-1);
530 }
531
532 /* Begin reading */
533 while (i--)
534 *p++ = dpt_inw(sc, HA_DATA);
535
536 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
537 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
538 - sizeof(ec->ec_cfglen)))
539 i = sizeof(struct eata_cfg)
540 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
541 - sizeof(ec->ec_cfglen);
542
543 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
544 sizeof(ec->ec_cfglen);
545 i >>= 1;
546
547 while (i--)
548 *p++ = dpt_inw(sc, HA_DATA);
549
550 /* Flush until we have read 512 bytes. */
551 i = (512 - j + 1) >> 1;
552 while (i--)
553 dpt_inw(sc, HA_DATA);
554
555 /* Defaults for older Firmware */
556 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
557 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
558
559 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
560 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
561 return (-1);
562 }
563
564 if (!ec->ec_hbavalid) {
565 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
566 return (-1);
567 }
568
569 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
570 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
571 return (-1);
572 }
573
574 if (!ec->ec_dmasupported) {
575 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
576 return (-1);
577 }
578
579 return (0);
580 }
581
582 /*
583 * Adjust the size of each I/O before it passes to the SCSI layer.
584 */
585 void
586 dpt_minphys(bp)
587 struct buf *bp;
588 {
589
590 if (bp->b_bcount > DPT_MAX_XFER)
591 bp->b_bcount = DPT_MAX_XFER;
592 minphys(bp);
593 }
594
595 /*
596 * Put a CCB onto the freelist.
597 */
598 void
599 dpt_free_ccb(sc, ccb)
600 struct dpt_softc *sc;
601 struct dpt_ccb *ccb;
602 {
603 int s;
604
605 s = splbio();
606 ccb->ccb_flg = 0;
607 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
608
609 /* Wake anybody waiting for a free ccb */
610 if (ccb->ccb_chain.tqe_next == 0)
611 wakeup(&sc->sc_free_ccb);
612 splx(s);
613 }
614
615 /*
616 * Initialize the specified CCB.
617 */
618 int
619 dpt_init_ccb(sc, ccb)
620 struct dpt_softc *sc;
621 struct dpt_ccb *ccb;
622 {
623 int error;
624
625 /* Create the DMA map for this CCB's data */
626 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
627 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
628 &ccb->ccb_dmamap_xfer);
629
630 if (error) {
631 printf("%s: can't create ccb dmamap (%d)\n",
632 sc->sc_dv.dv_xname, error);
633 return (error);
634 }
635
636 ccb->ccb_flg = 0;
637 ccb->ccb_ccbpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
638 CCB_OFF(sc, ccb);
639 return (0);
640 }
641
642 /*
643 * Create a set of CCBs and add them to the free list.
644 */
645 int
646 dpt_create_ccbs(sc, ccbstore, count)
647 struct dpt_softc *sc;
648 struct dpt_ccb *ccbstore;
649 int count;
650 {
651 struct dpt_ccb *ccb;
652 int i, error;
653
654 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
655
656 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
657 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
658 printf("%s: unable to init ccb, error = %d\n",
659 sc->sc_dv.dv_xname, error);
660 break;
661 }
662 ccb->ccb_id = i;
663 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
664 }
665
666 return (i);
667 }
668
669 /*
670 * Get a free ccb. If there are none, see if we can allocate a new one. If
671 * none are available right now and we are permitted to sleep, then wait
672 * until one becomes free, otherwise return an error.
673 */
674 struct dpt_ccb *
675 dpt_alloc_ccb(sc, flg)
676 struct dpt_softc *sc;
677 int flg;
678 {
679 struct dpt_ccb *ccb;
680 int s;
681
682 s = splbio();
683
684 for (;;) {
685 ccb = TAILQ_FIRST(&sc->sc_free_ccb);
686 if (ccb) {
687 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
688 break;
689 }
690 if ((flg & XS_CTL_NOSLEEP) != 0) {
691 splx(s);
692 return (NULL);
693 }
694 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
695 }
696
697 ccb->ccb_flg |= CCB_ALLOC;
698 splx(s);
699 return (ccb);
700 }
701
702 /*
703 * We have a CCB which has been processed by the HBA, now we look to see how
704 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
705 * passed here by dpt_intr().
706 */
707 void
708 dpt_done_ccb(sc, ccb)
709 struct dpt_softc *sc;
710 struct dpt_ccb *ccb;
711 {
712 struct scsipi_sense_data *s1, *s2;
713 struct scsipi_xfer *xs;
714 bus_dma_tag_t dmat;
715
716 dmat = sc->sc_dmat;
717 xs = ccb->ccb_xs;
718
719 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
720
721 /*
722 * If we were a data transfer, unload the map that described the
723 * data buffer.
724 */
725 if (xs->datalen) {
726 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
727 ccb->ccb_dmamap_xfer->dm_mapsize,
728 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
729 BUS_DMASYNC_POSTWRITE);
730 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
731 }
732
733 /*
734 * Otherwise, put the results of the operation into the xfer and
735 * call whoever started it.
736 */
737 #ifdef DIAGNOSTIC
738 if ((ccb->ccb_flg & CCB_ALLOC) == 0) {
739 panic("%s: done ccb not allocated!\n", sc->sc_dv.dv_xname);
740 return;
741 }
742 #endif
743
744 if (xs->error == XS_NOERROR) {
745 if (ccb->ccb_hba_status != HA_NO_ERROR) {
746 switch (ccb->ccb_hba_status) {
747 case HA_ERROR_SEL_TO:
748 xs->error = XS_SELTIMEOUT;
749 break;
750 case HA_ERROR_RESET:
751 xs->error = XS_RESET;
752 break;
753 default: /* Other scsi protocol messes */
754 printf("%s: HBA status %x\n",
755 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
756 xs->error = XS_DRIVER_STUFFUP;
757 }
758 } else if (ccb->ccb_scsi_status != SCSI_OK) {
759 switch (ccb->ccb_scsi_status) {
760 case SCSI_CHECK:
761 s1 = &ccb->ccb_sense;
762 s2 = &xs->sense.scsi_sense;
763 *s2 = *s1;
764 xs->error = XS_SENSE;
765 break;
766 case SCSI_BUSY:
767 xs->error = XS_BUSY;
768 break;
769 default:
770 printf("%s: SCSI status %x\n",
771 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
772 xs->error = XS_DRIVER_STUFFUP;
773 }
774 } else
775 xs->resid = 0;
776
777 xs->status = ccb->ccb_scsi_status;
778 }
779
780 /* Free up the CCB and mark the command as done */
781 dpt_free_ccb(sc, ccb);
782 xs->xs_status |= XS_STS_DONE;
783 scsipi_done(xs);
784
785 /*
786 * If there are entries in the software queue, try to run the first
787 * one. We should be more or less guaranteed to succeed, since we
788 * just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling it
789 * with the first entry in the queue.
790 */
791 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
792 dpt_scsi_cmd(xs);
793 }
794
795 /*
796 * Start a SCSI command.
797 */
798 int
799 dpt_scsi_cmd(xs)
800 struct scsipi_xfer *xs;
801 {
802 int error, i, flags, s, fromqueue, dontqueue;
803 struct scsipi_link *sc_link;
804 struct dpt_softc *sc;
805 struct dpt_ccb *ccb;
806 struct eata_sg *sg;
807 struct eata_cp *cp;
808 bus_dma_tag_t dmat;
809 bus_dmamap_t xfer;
810
811 sc_link = xs->sc_link;
812 flags = xs->xs_control;
813 sc = sc_link->adapter_softc;
814 dmat = sc->sc_dmat;
815 fromqueue = 0;
816 dontqueue = 0;
817
818 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
819
820 /* Protect the queue */
821 s = splbio();
822
823 /*
824 * If we're running the queue from dpt_done_ccb(), we've been called
825 * with the first queue entry as our argument.
826 */
827 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
828 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
829 fromqueue = 1;
830 } else {
831 /* Cmds must be no more than 12 bytes for us */
832 if (xs->cmdlen > 12) {
833 splx(s);
834 xs->error = XS_DRIVER_STUFFUP;
835 return (COMPLETE);
836 }
837
838 /* XXX we can't reset devices just yet */
839 if ((flags & XS_CTL_RESET) != 0) {
840 xs->error = XS_DRIVER_STUFFUP;
841 return (COMPLETE);
842 }
843
844 /* Polled requests can't be queued for later */
845 dontqueue = flags & XS_CTL_POLL;
846
847 /* If there are jobs in the queue, run them first */
848 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
849 /*
850 * If we can't queue we abort, since we must
851 * preserve the queue order.
852 */
853 if (dontqueue) {
854 splx(s);
855 xs->error = XS_DRIVER_STUFFUP;
856 return (TRY_AGAIN_LATER);
857 }
858
859 /* Swap with the first queue entry. */
860 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
861 xs = TAILQ_FIRST(&sc->sc_queue);
862 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
863 fromqueue = 1;
864 }
865 }
866
867 /* Get a CCB */
868 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
869 /* If we can't queue, we lose */
870 if (dontqueue) {
871 splx(s);
872 xs->error = XS_DRIVER_STUFFUP;
873 return (TRY_AGAIN_LATER);
874 }
875
876 /*
877 * Stuff request into the queue, in front if we came off
878 * it in the first place.
879 */
880 if (fromqueue)
881 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
882 else
883 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
884 splx(s);
885 return (SUCCESSFULLY_QUEUED);
886 }
887
888 splx(s);
889
890 ccb->ccb_xs = xs;
891 ccb->ccb_timeout = xs->timeout;
892
893 cp = &ccb->ccb_eata_cp;
894 memcpy(&cp->cp_scsi_cmd, xs->cmd, xs->cmdlen);
895 cp->cp_ccbid = ccb->ccb_id;
896 cp->cp_id = sc_link->scsipi_scsi.target;
897 cp->cp_lun = sc_link->scsipi_scsi.lun;
898 cp->cp_channel = sc_link->scsipi_scsi.channel;
899 cp->cp_senselen = sizeof(ccb->ccb_sense);
900 cp->cp_stataddr = htobe32(sc->sc_sppa);
901 cp->cp_dispri = 1;
902 cp->cp_identify = 1;
903 cp->cp_autosense = 1;
904 cp->cp_datain = ((flags & XS_CTL_DATA_IN) != 0);
905 cp->cp_dataout = ((flags & XS_CTL_DATA_OUT) != 0);
906 cp->cp_interpret = (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
907 sc_link->scsipi_scsi.target);
908
909 /* Synchronous xfers musn't write-back through the cache */
910 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
911 cp->cp_nocache = 1;
912 else
913 cp->cp_nocache = 0;
914
915 cp->cp_senseaddr = htobe32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
916 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
917
918 if (xs->datalen != 0) {
919 xfer = ccb->ccb_dmamap_xfer;
920 #ifdef TFS
921 if ((flags & XS_CTL_DATA_UIO) != 0) {
922 error = bus_dmamap_load_uio(dmat, xfer,
923 (struct uio *)xs->data, (flags & XS_CTL_NOSLEEP) ?
924 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
925 } else
926 #endif /*TFS */
927 {
928 error = bus_dmamap_load(dmat, xfer, xs->data,
929 xs->datalen, NULL, (flags & XS_CTL_NOSLEEP) ?
930 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
931 }
932
933 if (error) {
934 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
935 if (error == EFBIG)
936 printf("more than %d dma segs\n", DPT_SG_SIZE);
937 else
938 printf("error %d loading dma map\n", error);
939
940 xs->error = XS_DRIVER_STUFFUP;
941 dpt_free_ccb(sc, ccb);
942 return (COMPLETE);
943 }
944
945 bus_dmamap_sync(dmat, xfer, 0, xfer->dm_mapsize,
946 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
947 BUS_DMASYNC_PREWRITE);
948
949 /* Don't bother using scatter/gather for just 1 segment */
950 if (xfer->dm_nsegs == 1) {
951 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
952 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
953 cp->cp_scatter = 0;
954 } else {
955 /*
956 * Load the hardware scatter/gather map with the
957 * contents of the DMA map.
958 */
959 sg = ccb->ccb_sg;
960 for (i = 0; i < xfer->dm_nsegs; i++, sg++) {
961 sg->sg_addr = htobe32(xfer->dm_segs[i].ds_addr);
962 sg->sg_len = htobe32(xfer->dm_segs[i].ds_len);
963 }
964 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
965 sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
966 offsetof(struct dpt_ccb, ccb_sg));
967 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
968 cp->cp_scatter = 1;
969 }
970 } else {
971 cp->cp_dataaddr = 0;
972 cp->cp_datalen = 0;
973 cp->cp_scatter = 0;
974 }
975
976 /* Sync up CCB and status packet */
977 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
978 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
979 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
980 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
981
982 /*
983 * Start the command. If we are polling on completion, mark it
984 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
985 * without us noticing.
986 */
987 if (dontqueue != 0)
988 ccb->ccb_flg |= CCB_PRIVATE;
989
990 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
991 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
992 xs->error = XS_DRIVER_STUFFUP;
993 dpt_free_ccb(sc, ccb);
994 return (TRY_AGAIN_LATER);
995 }
996
997 if (dontqueue == 0)
998 return (SUCCESSFULLY_QUEUED);
999
1000 /* Don't wait longer than this single command wants to wait */
1001 if (dpt_poll(sc, ccb)) {
1002 dpt_timeout(ccb);
1003 /* Wait for abort to complete */
1004 if (dpt_poll(sc, ccb))
1005 dpt_timeout(ccb);
1006 }
1007
1008 dpt_done_ccb(sc, ccb);
1009 return (COMPLETE);
1010 }
1011
1012 /*
1013 * Specified CCB has timed out, abort it.
1014 */
1015 void
1016 dpt_timeout(arg)
1017 void *arg;
1018 {
1019 struct scsipi_link *sc_link;
1020 struct scsipi_xfer *xs;
1021 struct dpt_softc *sc;
1022 struct dpt_ccb *ccb;
1023 int s;
1024
1025 ccb = arg;
1026 xs = ccb->ccb_xs;
1027 sc_link = xs->sc_link;
1028 sc = sc_link->adapter_softc;
1029
1030 scsi_print_addr(sc_link);
1031 printf("timed out (status:%02x aux status:%02x)",
1032 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1033
1034 s = splbio();
1035
1036 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1037 /* Abort timed out, reset the HBA */
1038 printf(" AGAIN, resetting HBA\n");
1039 dpt_outb(sc, HA_COMMAND, CP_RESET);
1040 DELAY(750000);
1041 } else {
1042 /* Abort the operation that has timed out */
1043 printf("\n");
1044 ccb->ccb_xs->error = XS_TIMEOUT;
1045 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1046 ccb->ccb_flg |= CCB_ABORT;
1047 /* Start the abort */
1048 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1049 CP_IMMEDIATE, CPI_SPEC_ABORT))
1050 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1051 }
1052
1053 splx(s);
1054 }
1055
1056 #ifdef DEBUG
1057 /*
1058 * Dump the contents of an EATA status packet.
1059 */
1060 void
1061 dpt_dump_sp(sp)
1062 struct eata_sp *sp;
1063 {
1064 int i;
1065
1066 printf("\thba_status\t%02x\n", sp->sp_hba_status);
1067 printf("\tscsi_status\t%02x\n", sp->sp_scsi_status);
1068 printf("\tinv_residue\t%d\n", sp->sp_inv_residue);
1069 printf("\tccbid\t\t%d\n", sp->sp_ccbid);
1070 printf("\tid_message\t%d\n", sp->sp_id_message);
1071 printf("\tque_message\t%d\n", sp->sp_que_message);
1072 printf("\ttag_message\t%d\n", sp->sp_tag_message);
1073 printf("\tmessages\t");
1074
1075 for (i = 0; i < 9; i++)
1076 printf("%d ", sp->sp_messages[i]);
1077
1078 printf("\n");
1079 }
1080 #endif /* DEBUG */
1081
1082 /*
1083 * Get inquiry data from the adapter.
1084 */
1085 void
1086 dpt_hba_inquire(sc, ei)
1087 struct dpt_softc *sc;
1088 struct eata_inquiry_data **ei;
1089 {
1090 struct dpt_ccb *ccb;
1091 struct eata_cp *cp;
1092 bus_dma_tag_t dmat;
1093
1094 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1095 dmat = sc->sc_dmat;
1096
1097 /* Get a CCB and mark as private */
1098 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1099 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1100
1101 ccb->ccb_flg |= CCB_PRIVATE;
1102 ccb->ccb_timeout = 200;
1103
1104 /* Put all the arguments into the CCB */
1105 cp = &ccb->ccb_eata_cp;
1106 cp->cp_ccbid = ccb->ccb_id;
1107 cp->cp_id = sc->sc_hbaid[0];
1108 cp->cp_lun = 0;
1109 cp->cp_channel = 0;
1110 cp->cp_senselen = sizeof(ccb->ccb_sense);
1111 cp->cp_stataddr = htobe32(sc->sc_sppa);
1112 cp->cp_dispri = 1;
1113 cp->cp_identify = 1;
1114 cp->cp_autosense = 0;
1115 cp->cp_interpret = 1;
1116 cp->cp_nocache = 0;
1117 cp->cp_datain = 1;
1118 cp->cp_dataout = 0;
1119 cp->cp_senseaddr = 0;
1120 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1121 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1122 cp->cp_scatter = 0;
1123
1124 /* Put together the SCSI inquiry command */
1125 memset(&cp->cp_scsi_cmd, 0, 12); /* XXX */
1126 cp->cp_scsi_cmd = INQUIRY;
1127 cp->cp_len = sizeof(struct eata_inquiry_data);
1128
1129 /* Sync up CCB, status packet and scratch area */
1130 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
1131 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1132 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
1133 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1134 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1135 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1136
1137 /* Start the command and poll on completion */
1138 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1139 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1140
1141 if (dpt_poll(sc, ccb))
1142 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1143
1144 if (ccb->ccb_hba_status != HA_NO_ERROR ||
1145 ccb->ccb_scsi_status != SCSI_OK)
1146 panic("%s: inquiry failed (hba:%02x scsi:%02x",
1147 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1148 ccb->ccb_scsi_status);
1149
1150 /* Sync up the DMA map and free CCB, returning */
1151 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1152 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1153 dpt_free_ccb(sc, ccb);
1154 }
1155