dpt.c revision 1.5 1 /* $NetBSD: dpt.c,v 1.5 1999/09/30 17:15:54 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andy Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 /*
58 * Driver for DPT EATA SCSI adapters.
59 *
60 * TODO:
61 *
62 * o Need a front-end for (newer) ISA boards.
63 * o Handle older firmware better.
64 * o Find a bunch of different firmware EEPROMs and try them out.
65 * o Test with a bunch of different boards.
66 * o dpt_readcfg() should not be using CP_PIO_GETCFG.
67 * o An interface to userland applications.
68 * o A port of DPT Storage Manager included in the base system would be nice.
69 * o Some sysctls or a utility (eg dptctl(8)) to control parameters.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.5 1999/09/30 17:15:54 ad Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82
83 #include <machine/endian.h>
84 #include <machine/bus.h>
85
86 #include <dev/scsipi/scsi_all.h>
87 #include <dev/scsipi/scsipi_all.h>
88 #include <dev/scsipi/scsiconf.h>
89
90 #include <dev/ic/dptreg.h>
91 #include <dev/ic/dptvar.h>
92
93 /* A default for our link struct */
94 static struct scsipi_device dpt_dev = {
95 NULL, /* Use default error handler */
96 NULL, /* have a queue, served by this */
97 NULL, /* have no async handler */
98 NULL, /* Use default 'done' routine */
99 };
100
101 static char *dpt_cname[] = {
102 #ifdef notdef
103 "PM3755", "SmartRAID V",
104 "PM3754", "SmartRAID V",
105 "PM2654", "SmartRAID V",
106 "PM2554", "SmartRAID V",
107 "PM1554", "SmartRAID V",
108 #endif
109 "PM3334", "SmartRAID IV",
110 "PM3332", "SmartRAID IV",
111 "PM2144", "SmartCache IV",
112 "PM2044", "SmartCache IV",
113 "PM2142", "SmartCache IV",
114 "PM2042", "SmartCache IV",
115 "PM2041", "SmartCache IV",
116 "PM3224", "SmartRAID III",
117 "PM3222", "SmartRAID III",
118 "PM3021", "SmartRAID III",
119 "PM2124", "SmartCache III",
120 "PM2024", "SmartCache III",
121 "PM2122", "SmartCache III",
122 "PM2022", "SmartCache III",
123 "PM2021", "SmartCache III",
124 "SK2012", "SmartCache Plus",
125 "SK2011", "SmartCache Plus",
126 NULL, "unknown adapter, please report using send-pr(1)",
127 };
128
129 void dpt_shutdown __P((void *));
130 void dpt_timeout __P((void *));
131 void dpt_minphys __P((struct buf *));
132 int dpt_scsi_cmd __P((struct scsipi_xfer *));
133 int dpt_wait __P((struct dpt_softc *, u_int8_t, u_int8_t, int));
134 int dpt_poll __P((struct dpt_softc *, struct dpt_ccb *));
135 int dpt_cmd __P((struct dpt_softc *, struct eata_cp *, u_int32_t, int, int));
136 void dpt_hba_inquire __P((struct dpt_softc *, struct eata_inquiry_data **));
137
138 void dpt_reset_ccb __P((struct dpt_softc *, struct dpt_ccb *));
139 void dpt_free_ccb __P((struct dpt_softc *, struct dpt_ccb *));
140 void dpt_done_ccb __P((struct dpt_softc *, struct dpt_ccb *));
141 int dpt_init_ccb __P((struct dpt_softc *, struct dpt_ccb *));
142 int dpt_create_ccbs __P((struct dpt_softc *, struct dpt_ccb *, int));
143
144 struct dpt_ccb *dpt_alloc_ccb __P((struct dpt_softc *, int));
145
146 #if 0 && defined(DEBUG)
147 static void dpt_dump_sp __P((struct eata_sp *));
148 #endif
149
150 /*
151 * Handle an interrupt from the HBA.
152 */
153 int
154 dpt_intr(xxx_sc)
155 void *xxx_sc;
156 {
157 struct dpt_softc *sc;
158 struct dpt_ccb *ccb;
159 struct eata_sp *sp;
160 volatile int junk;
161
162 sc = xxx_sc;
163 sp = sc->sc_sp;
164
165 #ifdef DEBUG
166 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
167 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
168 #endif
169
170 /*
171 * HBA might have interrupted while we were dealing with the last
172 * completed command, since we ACK before we deal; keep polling.
173 */
174 while ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
175 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
176 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
177
178 /* Might have looped before HBA can reset HBA_AUX_INTR */
179 if (sp->sp_ccbid == -1) {
180 DELAY(50);
181 #ifdef DIAGNOSTIC
182 printf("%s: slow reset of HA_AUX_STATUS?",
183 sc->sc_dv.dv_xname);
184 #endif
185 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
186 return (0);
187 #ifdef DIAGNOSTIC
188 printf("%s: was a slow reset of HA_AUX_STATUS",
189 sc->sc_dv.dv_xname);
190 #endif
191 /* Re-sync DMA map */
192 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
193 sc->sc_spoff, sizeof(struct eata_sp),
194 BUS_DMASYNC_POSTREAD);
195 }
196
197 /* Make sure CCB ID from status packet is realistic */
198 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
199 /* Sync up DMA map and cache cmd status */
200 ccb = sc->sc_ccbs + sp->sp_ccbid;
201
202 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
203 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
204 BUS_DMASYNC_POSTWRITE);
205
206 ccb->ccb_hba_status = sp->sp_hba_status;
207 ccb->ccb_scsi_status = sp->sp_scsi_status;
208
209 /*
210 * Ack the interrupt and process the CCB. If this
211 * is a private CCB it's up to dpt_poll() to notice.
212 */
213 sp->sp_ccbid = -1;
214 ccb->ccb_flg |= CCB_INTR;
215 junk = dpt_inb(sc, HA_STATUS);
216 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
217 dpt_done_ccb(sc, ccb);
218 } else {
219 printf("%s: bogus status (returned CCB id %d)\n",
220 sc->sc_dv.dv_xname, sp->sp_ccbid);
221
222 /* Ack the interrupt */
223 sp->sp_ccbid = -1;
224 junk = dpt_inb(sc, HA_STATUS);
225 }
226 }
227
228 return (0);
229 }
230
231 /*
232 * Initialize and attach the HBA. This is the entry point from bus
233 * specific probe-and-attach code.
234 */
235 void
236 dpt_init(sc, intrstr)
237 struct dpt_softc *sc;
238 const char *intrstr;
239 {
240 struct eata_inquiry_data *ei;
241 int i, j, error, rseg, mapsize;
242 bus_dma_segment_t seg;
243 struct eata_cfg *ec;
244 char model[16];
245
246 ec = &sc->sc_ec;
247
248 /* Allocate the CCB/status packet/scratch DMA map and load */
249 sc->sc_nccbs = min(SWAP16(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
250 sc->sc_spoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
251 sc->sc_scroff = sc->sc_spoff + sizeof(struct eata_sp);
252 sc->sc_scrlen = 256; /* XXX */
253 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + sc->sc_scrlen +
254 sizeof(struct eata_sp);
255
256 if ((error = bus_dmamem_alloc(sc->sc_dmat, mapsize, NBPG, 0,
257 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
258 printf("%s: unable to allocate CCBs, error = %d\n",
259 sc->sc_dv.dv_xname, error);
260 return;
261 }
262
263 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
264 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
265 printf("%s: unable to map CCBs, error = %d\n",
266 sc->sc_dv.dv_xname, error);
267 return;
268 }
269
270 if ((error = bus_dmamap_create(sc->sc_dmat, mapsize, mapsize, 1, 0,
271 BUS_DMA_NOWAIT, &sc->sc_dmamap_ccb)) != 0) {
272 printf("%s: unable to create CCB DMA map, error = %d\n",
273 sc->sc_dv.dv_xname, error);
274 return;
275 }
276
277 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_ccb,
278 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
279 printf("%s: unable to load CCB DMA map, error = %d\n",
280 sc->sc_dv.dv_xname, error);
281 return;
282 }
283
284 sc->sc_sp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_spoff);
285 sc->sc_sppa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_spoff;
286 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
287 sc->sc_scrpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_scroff;
288 sc->sc_sp->sp_ccbid = -1;
289
290 /* Initialize the CCBs */
291 TAILQ_INIT(&sc->sc_free_ccb);
292 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
293
294 if (i == 0) {
295 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
296 return;
297 } else if (i != sc->sc_nccbs) {
298 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
299 sc->sc_nccbs);
300 sc->sc_nccbs = i;
301 }
302
303 /* Set shutdownhook before we start any device activity */
304 sc->sc_sdh = shutdownhook_establish(dpt_shutdown, sc);
305
306 /* Get the page 0 inquiry data from the HBA */
307 dpt_hba_inquire(sc, &ei);
308
309 /*
310 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
311 * dpt0: interrupting at irq 10
312 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
313 */
314 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
315 ;
316 ei->ei_vendor[i] = '\0';
317
318 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
319 model[i] = ei->ei_model[i];
320 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
321 model[i++] = ei->ei_model[i];
322 model[i] = '\0';
323
324 /* Find the cannonical name for the board */
325 for (i = 0; dpt_cname[i]; i += 2)
326 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
327 break;
328
329 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
330
331 if (intrstr != NULL)
332 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
333
334 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
335 sc->sc_dv.dv_xname, sc->sc_nccbs, ec->ec_maxchannel + 1);
336
337 for (i = 0; i <= ec->ec_maxchannel; i++)
338 printf(" %d", ec->ec_hba[3 - i]);
339 printf("\n");
340
341 /* Reset the SCSI bus */
342 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
343 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
344 DELAY(20000);
345
346 /* Fill in the adapter, each link and attach in turn */
347 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
348 sc->sc_adapter.scsipi_minphys = dpt_minphys;
349
350 for (i = 0; i <= ec->ec_maxchannel; i++) {
351 struct scsipi_link *link;
352
353 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
354 link = &sc->sc_link[i];
355 link->scsipi_scsi.channel = i;
356 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
357 link->scsipi_scsi.max_lun = ec->ec_maxlun;
358 link->scsipi_scsi.max_target = ec->ec_maxtarget;
359 link->type = BUS_SCSI;
360 link->device = &dpt_dev;
361 link->adapter = &sc->sc_adapter;
362 link->adapter_softc = sc;
363 link->openings = sc->sc_nccbs;
364 config_found(&sc->sc_dv, link, scsiprint);
365 }
366 }
367
368 /*
369 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
370 * all data from it's cache and mark array groups as clean.
371 */
372 void
373 dpt_shutdown(xxx_sc)
374 void *xxx_sc;
375 {
376 struct dpt_softc *sc;
377
378 sc = xxx_sc;
379 printf("shutting down %s...", sc->sc_dv.dv_xname);
380 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
381 DELAY(5000*1000);
382 printf(" done\n");
383 }
384
385 /*
386 * Send an EATA command to the HBA.
387 */
388 int
389 dpt_cmd(sc, cp, addr, eatacmd, icmd)
390 struct dpt_softc *sc;
391 struct eata_cp *cp;
392 u_int32_t addr;
393 int eatacmd, icmd;
394 {
395 int i;
396
397 for (i = 20000; i; i--) {
398 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
399 break;
400 DELAY(50);
401 }
402
403 /* Not the most graceful way to handle this */
404 if (i == 0) {
405 printf("%s: HBA timeout on EATA command issue; aborting\n",
406 sc->sc_dv.dv_xname);
407 return (-1);
408 }
409
410 if (cp == NULL)
411 addr = 0;
412
413 dpt_outb(sc, HA_DMA_BASE + 0, (u_int32_t)addr);
414 dpt_outb(sc, HA_DMA_BASE + 1, (u_int32_t)addr >> 8);
415 dpt_outb(sc, HA_DMA_BASE + 2, (u_int32_t)addr >> 16);
416 dpt_outb(sc, HA_DMA_BASE + 3, (u_int32_t)addr >> 24);
417
418 if (eatacmd == CP_IMMEDIATE) {
419 if (cp == NULL) {
420 /* XXX should really pass meaningful values */
421 dpt_outb(sc, HA_ICMD_CODE2, 0);
422 dpt_outb(sc, HA_ICMD_CODE1, 0);
423 }
424 dpt_outb(sc, HA_ICMD, icmd);
425 }
426
427 dpt_outb(sc, HA_COMMAND, eatacmd);
428 return (0);
429 }
430
431 /*
432 * Wait for the HBA to reach an arbitrary state.
433 */
434 int
435 dpt_wait(sc, mask, state, ms)
436 struct dpt_softc *sc;
437 u_int8_t mask, state;
438 int ms;
439 {
440
441 for (ms *= 10; ms; ms--) {
442 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
443 return (0);
444 DELAY(100);
445 }
446 return (-1);
447 }
448
449 /*
450 * Wait for the specified CCB to finish. This is used when we may not be
451 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
452 * The timeout value from the CCB is used. This should only be used for
453 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
454 * a look at it.
455 */
456 int
457 dpt_poll(sc, ccb)
458 struct dpt_softc *sc;
459 struct dpt_ccb *ccb;
460 {
461 int i;
462
463 #ifdef DEBUG
464 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
465 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
466 #endif
467
468 for (i = ccb->ccb_timeout * 20; i; i--) {
469 if ((ccb->ccb_flg & CCB_INTR) != 0)
470 return (0);
471 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
472 dpt_intr(sc);
473 if ((ccb->ccb_flg & CCB_INTR) != 0)
474 return (0);
475 DELAY(50);
476 }
477 return (-1);
478 }
479
480 /*
481 * Read the EATA configuration from the HBA and perform some sanity checks.
482 */
483 int
484 dpt_readcfg(sc)
485 struct dpt_softc *sc;
486 {
487 struct eata_cfg *ec;
488 int i, j, stat;
489 u_int16_t *p;
490
491 ec = &sc->sc_ec;
492
493 /* Older firmware may puke if we talk to it too soon after reset */
494 dpt_outb(sc, HA_COMMAND, CP_RESET);
495 DELAY(750000);
496
497 for (i = 1000; i; i--) {
498 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
499 break;
500 DELAY(2000);
501 }
502
503 if (i == 0) {
504 printf("%s: HBA not ready after reset: %02x\n",
505 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
506 return (-1);
507 }
508
509 while((((stat = dpt_inb(sc, HA_STATUS))
510 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
511 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
512 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
513 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
514 /* RAID drives still spinning up? */
515 if((dpt_inb(sc, HA_ERROR) != 'D')
516 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
517 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
518 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
519 return (-1);
520 }
521 }
522
523 /*
524 * Issue the read-config command and wait for the data to appear.
525 * XXX we shouldn't be doing this with PIO, but it makes it a lot
526 * easier as no DMA setup is required.
527 */
528 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
529 memset(ec, 0, sizeof(*ec));
530 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
531 sizeof(ec->ec_cfglen)) >> 1;
532 p = (u_int16_t *)ec;
533
534 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
535 printf("%s: cfg data didn't appear\n", sc->sc_dv.dv_xname);
536 return (-1);
537 }
538
539 /* Begin reading */
540 while (i--)
541 *p++ = dpt_inw(sc, HA_DATA);
542
543 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
544 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
545 - sizeof(ec->ec_cfglen)))
546 i = sizeof(struct eata_cfg)
547 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
548 - sizeof(ec->ec_cfglen);
549
550 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
551 sizeof(ec->ec_cfglen);
552 i >>= 1;
553
554 while (i--)
555 *p++ = dpt_inw(sc, HA_DATA);
556
557 /* Flush until we have read 512 bytes. */
558 i = (512 - j + 1) >> 1;
559 while (i--)
560 dpt_inw(sc, HA_DATA);
561
562 /* Defaults for older Firmware */
563 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
564 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
565
566 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
567 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
568 return (-1);
569 }
570
571 if (!ec->ec_hbavalid) {
572 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
573 return (-1);
574 }
575
576 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
577 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
578 return (-1);
579 }
580
581 if (!ec->ec_dmasupported) {
582 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
583 return (-1);
584 }
585
586 return (0);
587 }
588
589 /*
590 * Adjust the size of each I/O before it passes to the SCSI layer.
591 */
592 void
593 dpt_minphys(bp)
594 struct buf *bp;
595 {
596
597 if (bp->b_bcount > DPT_MAX_XFER)
598 bp->b_bcount = DPT_MAX_XFER;
599 minphys(bp);
600 }
601
602 /*
603 * Put a CCB onto the freelist.
604 */
605 void
606 dpt_free_ccb(sc, ccb)
607 struct dpt_softc *sc;
608 struct dpt_ccb *ccb;
609 {
610 int s;
611
612 s = splbio();
613 ccb->ccb_flg = 0;
614 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
615
616 /* Wake anybody waiting for a free ccb */
617 if (ccb->ccb_chain.tqe_next == 0)
618 wakeup(&sc->sc_free_ccb);
619 splx(s);
620 }
621
622 /*
623 * Initialize the specified CCB.
624 */
625 int
626 dpt_init_ccb(sc, ccb)
627 struct dpt_softc *sc;
628 struct dpt_ccb *ccb;
629 {
630 int error;
631
632 /* Create the DMA map for this CCB's data */
633 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
634 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
635 &ccb->ccb_dmamap_xfer);
636
637 if (error) {
638 printf("%s: can't create ccb dmamap (%d)\n",
639 sc->sc_dv.dv_xname, error);
640 return (error);
641 }
642
643 ccb->ccb_flg = 0;
644 ccb->ccb_ccbpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
645 CCB_OFF(sc, ccb);
646 return (0);
647 }
648
649 /*
650 * Create a set of CCBs and add them to the free list.
651 */
652 int
653 dpt_create_ccbs(sc, ccbstore, count)
654 struct dpt_softc *sc;
655 struct dpt_ccb *ccbstore;
656 int count;
657 {
658 struct dpt_ccb *ccb;
659 int i, error;
660
661 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
662
663 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
664 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
665 printf("%s: unable to init ccb, error = %d\n",
666 sc->sc_dv.dv_xname, error);
667 break;
668 }
669 ccb->ccb_id = i;
670 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
671 }
672
673 return (i);
674 }
675
676 /*
677 * Get a free ccb. If there are none, see if we can allocate a new one.
678 * Otherwise either return an error or if we are permitted to, sleep until
679 * one becomes free.
680 */
681 struct dpt_ccb *
682 dpt_alloc_ccb(sc, flg)
683 struct dpt_softc *sc;
684 int flg;
685 {
686 struct dpt_ccb *ccb;
687 int s;
688
689 s = splbio();
690
691 for (;;) {
692 ccb = sc->sc_free_ccb.tqh_first;
693 if (ccb) {
694 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
695 break;
696 }
697 if ((flg & SCSI_NOSLEEP) != 0) {
698 splx(s);
699 return (NULL);
700 }
701 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
702 }
703
704 ccb->ccb_flg |= CCB_ALLOC;
705 splx(s);
706 return (ccb);
707 }
708
709 /*
710 * We have a CCB which has been processed by the HBA, now we look to see how
711 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
712 * passed here by dpt_intr().
713 */
714 void
715 dpt_done_ccb(sc, ccb)
716 struct dpt_softc *sc;
717 struct dpt_ccb *ccb;
718 {
719 struct scsipi_sense_data *s1, *s2;
720 struct scsipi_xfer *xs;
721 bus_dma_tag_t dmat;
722
723 dmat = sc->sc_dmat;
724 xs = ccb->ccb_xs;
725
726 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
727
728 /*
729 * If we were a data transfer, unload the map that described the
730 * data buffer.
731 */
732 if (xs->datalen) {
733 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
734 ccb->ccb_dmamap_xfer->dm_mapsize,
735 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
736 BUS_DMASYNC_POSTWRITE);
737 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
738 }
739
740 /*
741 * Otherwise, put the results of the operation into the xfer and
742 * call whoever started it.
743 */
744 if ((ccb->ccb_flg & CCB_ALLOC) == 0) {
745 panic("%s: done ccb not allocated!\n", sc->sc_dv.dv_xname);
746 return;
747 }
748
749 if (xs->error == XS_NOERROR) {
750 if (ccb->ccb_hba_status != HA_NO_ERROR) {
751 switch (ccb->ccb_hba_status) {
752 case HA_ERROR_SEL_TO:
753 xs->error = XS_SELTIMEOUT;
754 break;
755 case HA_ERROR_RESET:
756 xs->error = XS_RESET;
757 break;
758 default: /* Other scsi protocol messes */
759 printf("%s: HBA status %x\n",
760 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
761 xs->error = XS_DRIVER_STUFFUP;
762 }
763 } else if (ccb->ccb_scsi_status != SCSI_OK) {
764 switch (ccb->ccb_scsi_status) {
765 case SCSI_CHECK:
766 s1 = &ccb->ccb_sense;
767 s2 = &xs->sense.scsi_sense;
768 *s2 = *s1;
769 xs->error = XS_SENSE;
770 break;
771 case SCSI_BUSY:
772 xs->error = XS_BUSY;
773 break;
774 default:
775 printf("%s: SCSI status %x\n",
776 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
777 xs->error = XS_DRIVER_STUFFUP;
778 }
779 } else
780 xs->resid = 0;
781 }
782
783 /* Free up the CCB and mark the command as done */
784 dpt_free_ccb(sc, ccb);
785 xs->flags |= ITSDONE;
786 scsipi_done(xs);
787
788 /*
789 * If there are queue entries in the software queue, try to run the
790 * first one. We should be more or less guaranteed to succeed, since
791 * we just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling
792 * it with the first entry in the queue.
793 */
794 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
795 dpt_scsi_cmd(xs);
796 }
797
798 /*
799 * Start a SCSI command.
800 */
801 int
802 dpt_scsi_cmd(xs)
803 struct scsipi_xfer *xs;
804 {
805 int error, seg, flags, s, fromqueue, dontqueue;
806 struct scsipi_link *sc_link;
807 struct dpt_softc *sc;
808 struct dpt_ccb *ccb;
809 struct eata_sg *sg;
810 struct eata_cp *cp;
811 bus_dma_tag_t dmat;
812
813 sc_link = xs->sc_link;
814 flags = xs->flags;
815 sc = sc_link->adapter_softc;
816 dmat = sc->sc_dmat;
817 fromqueue = 0;
818 dontqueue = 0;
819
820 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
821
822 /* Protect the queue */
823 s = splbio();
824
825 /*
826 * If we're running the queue from dpt_done_ccb(), we've been called
827 * with the first queue entry as our argument.
828 */
829 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
830 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
831 fromqueue = 1;
832 } else {
833 /* Cmds must be no more than 12 bytes for us */
834 if (xs->cmdlen > 12) {
835 splx(s);
836 xs->error = XS_DRIVER_STUFFUP;
837 return (COMPLETE);
838 }
839
840 /* XXX we can't reset devices just yet */
841 if ((flags & SCSI_RESET) != 0) {
842 xs->error = XS_DRIVER_STUFFUP;
843 return (COMPLETE);
844 }
845
846 /* Polled requests can't be queued for later */
847 dontqueue = flags & SCSI_POLL;
848
849 /* If there are jobs in the queue, run them first */
850 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
851 /*
852 * If we can't queue, we have to abort, since we have
853 * to preserve the queue order.
854 */
855 if (dontqueue) {
856 splx(s);
857 xs->error = XS_DRIVER_STUFFUP;
858 return (TRY_AGAIN_LATER);
859 }
860
861 /* Swap with the first queue entry. */
862 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
863 xs = TAILQ_FIRST(&sc->sc_queue);
864 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
865 fromqueue = 1;
866 }
867 }
868
869 /*
870 * Get a CCB. If the transfer is from a buf (possibly from interrupt
871 * time) then we can't allow it to sleep.
872 */
873 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
874 /* If we can't queue, we lose */
875 if (dontqueue) {
876 splx(s);
877 xs->error = XS_DRIVER_STUFFUP;
878 return (TRY_AGAIN_LATER);
879 }
880
881 /*
882 * Stuff request into the queue, in front if we came off
883 * in the first place.
884 */
885 if (fromqueue)
886 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
887 else
888 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
889 splx(s);
890 return (SUCCESSFULLY_QUEUED);
891 }
892
893 splx(s);
894
895 /* Synchronous xfers musn't write-back through the cache */
896 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
897 ccb->ccb_flg |= CCB_SYNC;
898
899 ccb->ccb_xs = xs;
900 ccb->ccb_timeout = xs->timeout;
901
902 cp = &ccb->ccb_eata_cp;
903 memcpy(&cp->cp_scsi_cmd, xs->cmd, xs->cmdlen);
904 cp->cp_ccbid = ccb->ccb_id;
905 cp->cp_id = sc_link->scsipi_scsi.target;
906 cp->cp_lun = sc_link->scsipi_scsi.lun;
907 cp->cp_channel = sc_link->scsipi_scsi.channel;
908 cp->cp_senselen = sizeof(ccb->ccb_sense);
909 cp->cp_stataddr = SWAP32(sc->sc_sppa);
910 cp->cp_dispri = 1;
911 cp->cp_identify = 1;
912 cp->cp_autosense = 1;
913 cp->cp_nocache = ((ccb->ccb_flg & CCB_SYNC) != 0);
914 cp->cp_datain = ((flags & SCSI_DATA_IN) != 0);
915 cp->cp_dataout = ((flags & SCSI_DATA_OUT) != 0);
916 cp->cp_interpret = (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
917 sc_link->scsipi_scsi.target);
918
919 cp->cp_senseaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
920 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
921
922 if (xs->datalen) {
923 sg = ccb->ccb_sg;
924 seg = 0;
925 #ifdef TFS
926 if (flags & SCSI_DATA_UIO) {
927 error = bus_dmamap_load_uio(dmat,
928 ccb->ccb_dmamap_xfer, (struct uio *)xs->data,
929 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
930 BUS_DMA_WAITOK);
931 } else
932 #endif /*TFS */
933 {
934 error = bus_dmamap_load(dmat,
935 ccb->ccb_dmamap_xfer,
936 xs->data, xs->datalen, NULL,
937 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
938 BUS_DMA_WAITOK);
939 }
940
941 if (error) {
942 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
943 if (error == EFBIG)
944 printf("more than %d dma segs\n", DPT_SG_SIZE);
945 else
946 printf("error %d loading dma map\n", error);
947
948 xs->error = XS_DRIVER_STUFFUP;
949 dpt_free_ccb(sc, ccb);
950 return (COMPLETE);
951 }
952
953 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
954 ccb->ccb_dmamap_xfer->dm_mapsize,
955 (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
956 BUS_DMASYNC_PREWRITE);
957
958 /*
959 * Load the hardware scatter/gather map with the
960 * contents of the DMA map.
961 */
962 for (seg = 0; seg < ccb->ccb_dmamap_xfer->dm_nsegs; seg++) {
963 ccb->ccb_sg[seg].sg_addr =
964 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_addr);
965 ccb->ccb_sg[seg].sg_len =
966 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_len);
967 }
968
969 cp->cp_dataaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr
970 + CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sg));
971 cp->cp_datalen = SWAP32(seg * sizeof(struct eata_sg));
972 cp->cp_scatter = 1;
973 } else {
974 cp->cp_dataaddr = 0;
975 cp->cp_datalen = 0;
976 cp->cp_scatter = 0;
977 }
978
979 /* Sync up CCB and status packet */
980 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
981 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
982 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
983 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
984
985 /*
986 * Start the command. If we are polling on completion, mark it
987 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
988 * without us noticing.
989 */
990 if (dontqueue != 0)
991 ccb->ccb_flg |= CCB_PRIVATE;
992
993 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
994 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
995 xs->error = XS_DRIVER_STUFFUP;
996 dpt_done_ccb(sc, ccb);
997 return (COMPLETE);
998 }
999
1000 if (dontqueue == 0)
1001 return (SUCCESSFULLY_QUEUED);
1002
1003 /* Don't wait longer than this single command wants to wait */
1004 if (dpt_poll(sc, ccb)) {
1005 dpt_timeout(ccb);
1006 /* Wait for abort to complete */
1007 if (dpt_poll(sc, ccb))
1008 dpt_timeout(ccb);
1009 }
1010
1011 dpt_done_ccb(sc, ccb);
1012 return (COMPLETE);
1013 }
1014
1015 /*
1016 * Specified CCB has timed out, abort it.
1017 */
1018 void
1019 dpt_timeout(arg)
1020 void *arg;
1021 {
1022 struct scsipi_link *sc_link;
1023 struct scsipi_xfer *xs;
1024 struct dpt_softc *sc;
1025 struct dpt_ccb *ccb;
1026 int s;
1027
1028 ccb = arg;
1029 xs = ccb->ccb_xs;
1030 sc_link = xs->sc_link;
1031 sc = sc_link->adapter_softc;
1032
1033 scsi_print_addr(sc_link);
1034 printf("timed out (status:%02x aux status:%02x)",
1035 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1036
1037 s = splbio();
1038
1039 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1040 /* Abort timed out, reset the HBA */
1041 printf(" AGAIN, resetting HBA\n");
1042 dpt_outb(sc, HA_COMMAND, CP_RESET);
1043 DELAY(750000);
1044 } else {
1045 /* Abort the operation that has timed out */
1046 printf("\n");
1047 ccb->ccb_xs->error = XS_TIMEOUT;
1048 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1049 ccb->ccb_flg |= CCB_ABORT;
1050 /* Start the abort */
1051 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1052 CP_IMMEDIATE, CPI_SPEC_ABORT))
1053 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1054 }
1055
1056 splx(s);
1057 }
1058
1059 #if 0 && defined(DEBUG)
1060 /*
1061 * Dump the contents of an EATA status packet.
1062 */
1063 static void
1064 dpt_dump_sp(sp)
1065 struct eata_sp *sp;
1066 {
1067 int i;
1068
1069 printf("\thba_status\t%02x\n", sp->sp_hba_status);
1070 printf("\teoc\t\t%d\n", sp->sp_eoc);
1071 printf("\tscsi_status\t%02x\n", sp->sp_scsi_status);
1072 printf("\tinv_residue\t%d\n", sp->sp_inv_residue);
1073 printf("\tccbid\t\t%d\n", sp->sp_ccbid);
1074 printf("\tid_message\t%d\n", sp->sp_id_message);
1075 printf("\tque_message\t%d\n", sp->sp_que_message);
1076 printf("\ttag_message\t%d\n", sp->sp_tag_message);
1077 printf("\tmessages\t");
1078
1079 for (i = 0; i < 9; i++)
1080 printf("%d ", sp->sp_messages[i]);
1081
1082 printf("\n");
1083 }
1084 #endif /* DEBUG */
1085
1086 /*
1087 * Get inquiry data from the adapter.
1088 */
1089 void
1090 dpt_hba_inquire(sc, ei)
1091 struct dpt_softc *sc;
1092 struct eata_inquiry_data **ei;
1093 {
1094 struct dpt_ccb *ccb;
1095 struct eata_cp *cp;
1096 bus_dma_tag_t dmat;
1097
1098 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1099 dmat = sc->sc_dmat;
1100
1101 /* Get a CCB and mark as private */
1102 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1103 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1104
1105 ccb->ccb_flg |= CCB_PRIVATE;
1106 ccb->ccb_timeout = 200;
1107
1108 /* Put all the arguments into the CCB */
1109 cp = &ccb->ccb_eata_cp;
1110 cp->cp_ccbid = ccb->ccb_id;
1111 cp->cp_id = sc->sc_hbaid[0];
1112 cp->cp_lun = 0;
1113 cp->cp_channel = 0;
1114 cp->cp_senselen = sizeof(ccb->ccb_sense);
1115 cp->cp_stataddr = SWAP32(sc->sc_sppa);
1116 cp->cp_dispri = 1;
1117 cp->cp_identify = 1;
1118 cp->cp_autosense = 0;
1119 cp->cp_interpret = 1;
1120 cp->cp_nocache = 0;
1121 cp->cp_datain = 1;
1122 cp->cp_dataout = 0;
1123 cp->cp_senseaddr = 0;
1124 cp->cp_dataaddr = SWAP32(sc->sc_scrpa);
1125 cp->cp_datalen = SWAP32(sizeof(struct eata_inquiry_data));
1126 cp->cp_scatter = 0;
1127
1128 /* Put together the SCSI inquiry command */
1129 memset(&cp->cp_scsi_cmd, 0, 12); /* XXX */
1130 cp->cp_scsi_cmd = INQUIRY;
1131 cp->cp_len = sizeof(struct eata_inquiry_data);
1132
1133 /* Sync up CCB, status packet and scratch area */
1134 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
1135 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1136 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
1137 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1138 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1139 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1140
1141 /* Start the command and poll on completion */
1142 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1143 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1144
1145 if (dpt_poll(sc, ccb))
1146 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1147
1148 if (ccb->ccb_hba_status != HA_NO_ERROR ||
1149 ccb->ccb_scsi_status != SCSI_OK)
1150 panic("%s: inquiry failed (hba:%02x scsi:%02x",
1151 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1152 ccb->ccb_scsi_status);
1153
1154 /* Sync up the DMA map and free CCB, returning */
1155 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1156 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1157 dpt_free_ccb(sc, ccb);
1158 }
1159