dpt.c revision 1.7 1 /* $NetBSD: dpt.c,v 1.7 1999/10/01 12:20:12 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andy Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 /*
58 * Driver for DPT EATA SCSI adapters.
59 *
60 * TODO:
61 *
62 * o Need a front-end for (newer) ISA boards.
63 * o Handle older firmware better.
64 * o Find a bunch of different firmware EEPROMs and try them out.
65 * o Test with a bunch of different boards.
66 * o dpt_readcfg() should not be using CP_PIO_GETCFG.
67 * o An interface to userland applications.
68 * o A port of DPT Storage Manager included in the base system would be nice.
69 * o Some sysctls or a utility (eg dptctl(8)) to control parameters.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.7 1999/10/01 12:20:12 ad Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82
83 #include <machine/endian.h>
84 #include <machine/bus.h>
85
86 #include <dev/scsipi/scsi_all.h>
87 #include <dev/scsipi/scsipi_all.h>
88 #include <dev/scsipi/scsiconf.h>
89
90 #include <dev/ic/dptreg.h>
91 #include <dev/ic/dptvar.h>
92
93 /* A default for our link struct */
94 static struct scsipi_device dpt_dev = {
95 NULL, /* Use default error handler */
96 NULL, /* have a queue, served by this */
97 NULL, /* have no async handler */
98 NULL, /* Use default 'done' routine */
99 };
100
101 static char *dpt_cname[] = {
102 #ifdef notdef
103 "PM3755", "SmartRAID V",
104 "PM3754", "SmartRAID V",
105 "PM2654", "SmartRAID V",
106 "PM2554", "SmartRAID V",
107 "PM1554", "SmartRAID V",
108 #endif
109 "PM3334", "SmartRAID IV",
110 "PM3332", "SmartRAID IV",
111 "PM2144", "SmartCache IV",
112 "PM2044", "SmartCache IV",
113 "PM2142", "SmartCache IV",
114 "PM2042", "SmartCache IV",
115 "PM2041", "SmartCache IV",
116 "PM3224", "SmartRAID III",
117 "PM3222", "SmartRAID III",
118 "PM3021", "SmartRAID III",
119 "PM2124", "SmartCache III",
120 "PM2024", "SmartCache III",
121 "PM2122", "SmartCache III",
122 "PM2022", "SmartCache III",
123 "PM2021", "SmartCache III",
124 "SK2012", "SmartCache Plus",
125 "SK2011", "SmartCache Plus",
126 NULL, "unknown adapter, please report using send-pr(1)",
127 };
128
129 void dpt_shutdown __P((void *));
130 void dpt_timeout __P((void *));
131 void dpt_minphys __P((struct buf *));
132 int dpt_scsi_cmd __P((struct scsipi_xfer *));
133 int dpt_wait __P((struct dpt_softc *, u_int8_t, u_int8_t, int));
134 int dpt_poll __P((struct dpt_softc *, struct dpt_ccb *));
135 int dpt_cmd __P((struct dpt_softc *, struct eata_cp *, u_int32_t, int, int));
136 void dpt_hba_inquire __P((struct dpt_softc *, struct eata_inquiry_data **));
137
138 void dpt_reset_ccb __P((struct dpt_softc *, struct dpt_ccb *));
139 void dpt_free_ccb __P((struct dpt_softc *, struct dpt_ccb *));
140 void dpt_done_ccb __P((struct dpt_softc *, struct dpt_ccb *));
141 int dpt_init_ccb __P((struct dpt_softc *, struct dpt_ccb *));
142 int dpt_create_ccbs __P((struct dpt_softc *, struct dpt_ccb *, int));
143
144 struct dpt_ccb *dpt_alloc_ccb __P((struct dpt_softc *, int));
145
146 #if 0 && defined(DEBUG)
147 static void dpt_dump_sp __P((struct eata_sp *));
148 #endif
149
150 /*
151 * Handle an interrupt from the HBA.
152 */
153 int
154 dpt_intr(xxx_sc)
155 void *xxx_sc;
156 {
157 struct dpt_softc *sc;
158 struct dpt_ccb *ccb;
159 struct eata_sp *sp;
160 int more;
161
162 sc = xxx_sc;
163 sp = sc->sc_sp;
164 more = 0;
165
166 #ifdef DEBUG
167 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
168 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
169 #endif
170
171 for (;;) {
172 /*
173 * HBA might have interrupted while we were dealing with the
174 * last completed command, since we ACK before we deal; keep
175 * polling. If no interrupt is signalled, but the HBA has
176 * indicated that more data will be available soon, hang
177 * around.
178 */
179 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
180 if (more != 0) {
181 DELAY(10);
182 continue;
183 }
184 break;
185 }
186
187 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
188 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
189
190 /* Might have looped before HBA can reset HBA_AUX_INTR */
191 if (sp->sp_ccbid == -1) {
192 DELAY(50);
193 #ifdef DIAGNOSTIC
194 printf("%s: slow reset of HA_AUX_STATUS?",
195 sc->sc_dv.dv_xname);
196 #endif
197 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
198 return (0);
199 #ifdef DIAGNOSTIC
200 printf("%s: was a slow reset of HA_AUX_STATUS",
201 sc->sc_dv.dv_xname);
202 #endif
203 /* Re-sync DMA map */
204 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
205 sc->sc_spoff, sizeof(struct eata_sp),
206 BUS_DMASYNC_POSTREAD);
207 }
208
209 /* Make sure CCB ID from status packet is realistic */
210 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
211 /* Sync up DMA map and cache cmd status */
212 ccb = sc->sc_ccbs + sp->sp_ccbid;
213
214 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
215 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
216 BUS_DMASYNC_POSTWRITE);
217
218 ccb->ccb_hba_status = sp->sp_hba_status;
219 ccb->ccb_scsi_status = sp->sp_scsi_status;
220
221 /*
222 * Ack the interrupt and process the CCB. If this
223 * is a private CCB it's up to dpt_poll() to notice.
224 */
225 sp->sp_ccbid = -1;
226 ccb->ccb_flg |= CCB_INTR;
227 more = dpt_inb(sc, HA_STATUS) & HA_ST_MORE;
228 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
229 dpt_done_ccb(sc, ccb);
230 } else {
231 printf("%s: bogus status (returned CCB id %d)\n",
232 sc->sc_dv.dv_xname, sp->sp_ccbid);
233
234 /* Ack the interrupt */
235 sp->sp_ccbid = -1;
236 more = dpt_inb(sc, HA_STATUS) & HA_ST_MORE;
237 }
238 }
239
240 return (0);
241 }
242
243 /*
244 * Initialize and attach the HBA. This is the entry point from bus
245 * specific probe-and-attach code.
246 */
247 void
248 dpt_init(sc, intrstr)
249 struct dpt_softc *sc;
250 const char *intrstr;
251 {
252 struct eata_inquiry_data *ei;
253 int i, j, error, rseg, mapsize;
254 bus_dma_segment_t seg;
255 struct eata_cfg *ec;
256 char model[16];
257
258 ec = &sc->sc_ec;
259
260 /* Allocate the CCB/status packet/scratch DMA map and load */
261 sc->sc_nccbs = min(SWAP16(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
262 sc->sc_spoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
263 sc->sc_scroff = sc->sc_spoff + sizeof(struct eata_sp);
264 sc->sc_scrlen = 256; /* XXX */
265 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + sc->sc_scrlen +
266 sizeof(struct eata_sp);
267
268 if ((error = bus_dmamem_alloc(sc->sc_dmat, mapsize, NBPG, 0,
269 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
270 printf("%s: unable to allocate CCBs, error = %d\n",
271 sc->sc_dv.dv_xname, error);
272 return;
273 }
274
275 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
276 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
277 printf("%s: unable to map CCBs, error = %d\n",
278 sc->sc_dv.dv_xname, error);
279 return;
280 }
281
282 if ((error = bus_dmamap_create(sc->sc_dmat, mapsize, mapsize, 1, 0,
283 BUS_DMA_NOWAIT, &sc->sc_dmamap_ccb)) != 0) {
284 printf("%s: unable to create CCB DMA map, error = %d\n",
285 sc->sc_dv.dv_xname, error);
286 return;
287 }
288
289 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_ccb,
290 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
291 printf("%s: unable to load CCB DMA map, error = %d\n",
292 sc->sc_dv.dv_xname, error);
293 return;
294 }
295
296 sc->sc_sp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_spoff);
297 sc->sc_sppa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_spoff;
298 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
299 sc->sc_scrpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_scroff;
300 sc->sc_sp->sp_ccbid = -1;
301
302 /* Initialize the CCBs */
303 TAILQ_INIT(&sc->sc_free_ccb);
304 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
305
306 if (i == 0) {
307 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
308 return;
309 } else if (i != sc->sc_nccbs) {
310 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
311 sc->sc_nccbs);
312 sc->sc_nccbs = i;
313 }
314
315 /* Set shutdownhook before we start any device activity */
316 sc->sc_sdh = shutdownhook_establish(dpt_shutdown, sc);
317
318 /* Get the page 0 inquiry data from the HBA */
319 dpt_hba_inquire(sc, &ei);
320
321 /*
322 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
323 * dpt0: interrupting at irq 10
324 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
325 */
326 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
327 ;
328 ei->ei_vendor[i] = '\0';
329
330 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
331 model[i] = ei->ei_model[i];
332 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
333 model[i++] = ei->ei_model[i];
334 model[i] = '\0';
335
336 /* Find the cannonical name for the board */
337 for (i = 0; dpt_cname[i]; i += 2)
338 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
339 break;
340
341 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
342
343 if (intrstr != NULL)
344 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
345
346 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
347 sc->sc_dv.dv_xname, sc->sc_nccbs, ec->ec_maxchannel + 1);
348
349 for (i = 0; i <= ec->ec_maxchannel; i++)
350 printf(" %d", ec->ec_hba[3 - i]);
351 printf("\n");
352
353 /* Reset the SCSI bus */
354 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
355 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
356 DELAY(20000);
357
358 /* Fill in the adapter, each link and attach in turn */
359 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
360 sc->sc_adapter.scsipi_minphys = dpt_minphys;
361
362 for (i = 0; i <= ec->ec_maxchannel; i++) {
363 struct scsipi_link *link;
364
365 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
366 link = &sc->sc_link[i];
367 link->scsipi_scsi.channel = i;
368 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
369 link->scsipi_scsi.max_lun = ec->ec_maxlun;
370 link->scsipi_scsi.max_target = ec->ec_maxtarget;
371 link->type = BUS_SCSI;
372 link->device = &dpt_dev;
373 link->adapter = &sc->sc_adapter;
374 link->adapter_softc = sc;
375 link->openings = sc->sc_nccbs;
376 config_found(&sc->sc_dv, link, scsiprint);
377 }
378 }
379
380 /*
381 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
382 * all data from it's cache and mark array groups as clean.
383 */
384 void
385 dpt_shutdown(xxx_sc)
386 void *xxx_sc;
387 {
388 struct dpt_softc *sc;
389
390 sc = xxx_sc;
391 printf("shutting down %s...", sc->sc_dv.dv_xname);
392 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
393 DELAY(5000*1000);
394 printf(" done\n");
395 }
396
397 /*
398 * Send an EATA command to the HBA.
399 */
400 int
401 dpt_cmd(sc, cp, addr, eatacmd, icmd)
402 struct dpt_softc *sc;
403 struct eata_cp *cp;
404 u_int32_t addr;
405 int eatacmd, icmd;
406 {
407 int i;
408
409 for (i = 20000; i; i--) {
410 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
411 break;
412 DELAY(50);
413 }
414
415 /* Not the most graceful way to handle this */
416 if (i == 0) {
417 printf("%s: HBA timeout on EATA command issue; aborting\n",
418 sc->sc_dv.dv_xname);
419 return (-1);
420 }
421
422 if (cp == NULL)
423 addr = 0;
424
425 dpt_outb(sc, HA_DMA_BASE + 0, (u_int32_t)addr);
426 dpt_outb(sc, HA_DMA_BASE + 1, (u_int32_t)addr >> 8);
427 dpt_outb(sc, HA_DMA_BASE + 2, (u_int32_t)addr >> 16);
428 dpt_outb(sc, HA_DMA_BASE + 3, (u_int32_t)addr >> 24);
429
430 if (eatacmd == CP_IMMEDIATE) {
431 if (cp == NULL) {
432 /* XXX should really pass meaningful values */
433 dpt_outb(sc, HA_ICMD_CODE2, 0);
434 dpt_outb(sc, HA_ICMD_CODE1, 0);
435 }
436 dpt_outb(sc, HA_ICMD, icmd);
437 }
438
439 dpt_outb(sc, HA_COMMAND, eatacmd);
440 return (0);
441 }
442
443 /*
444 * Wait for the HBA to reach an arbitrary state.
445 */
446 int
447 dpt_wait(sc, mask, state, ms)
448 struct dpt_softc *sc;
449 u_int8_t mask, state;
450 int ms;
451 {
452
453 for (ms *= 10; ms; ms--) {
454 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
455 return (0);
456 DELAY(100);
457 }
458 return (-1);
459 }
460
461 /*
462 * Wait for the specified CCB to finish. This is used when we may not be
463 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
464 * The timeout value from the CCB is used. This should only be used for
465 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
466 * a look at it.
467 */
468 int
469 dpt_poll(sc, ccb)
470 struct dpt_softc *sc;
471 struct dpt_ccb *ccb;
472 {
473 int i;
474
475 #ifdef DEBUG
476 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
477 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
478 #endif
479
480 for (i = ccb->ccb_timeout * 20; i; i--) {
481 if ((ccb->ccb_flg & CCB_INTR) != 0)
482 return (0);
483 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
484 dpt_intr(sc);
485 if ((ccb->ccb_flg & CCB_INTR) != 0)
486 return (0);
487 DELAY(50);
488 }
489 return (-1);
490 }
491
492 /*
493 * Read the EATA configuration from the HBA and perform some sanity checks.
494 */
495 int
496 dpt_readcfg(sc)
497 struct dpt_softc *sc;
498 {
499 struct eata_cfg *ec;
500 int i, j, stat;
501 u_int16_t *p;
502
503 ec = &sc->sc_ec;
504
505 /* Older firmware may puke if we talk to it too soon after reset */
506 dpt_outb(sc, HA_COMMAND, CP_RESET);
507 DELAY(750000);
508
509 for (i = 1000; i; i--) {
510 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
511 break;
512 DELAY(2000);
513 }
514
515 if (i == 0) {
516 printf("%s: HBA not ready after reset: %02x\n",
517 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
518 return (-1);
519 }
520
521 while((((stat = dpt_inb(sc, HA_STATUS))
522 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
523 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
524 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
525 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
526 /* RAID drives still spinning up? */
527 if((dpt_inb(sc, HA_ERROR) != 'D')
528 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
529 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
530 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
531 return (-1);
532 }
533 }
534
535 /*
536 * Issue the read-config command and wait for the data to appear.
537 * XXX we shouldn't be doing this with PIO, but it makes it a lot
538 * easier as no DMA setup is required.
539 */
540 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
541 memset(ec, 0, sizeof(*ec));
542 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
543 sizeof(ec->ec_cfglen)) >> 1;
544 p = (u_int16_t *)ec;
545
546 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
547 printf("%s: cfg data didn't appear\n", sc->sc_dv.dv_xname);
548 return (-1);
549 }
550
551 /* Begin reading */
552 while (i--)
553 *p++ = dpt_inw(sc, HA_DATA);
554
555 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
556 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
557 - sizeof(ec->ec_cfglen)))
558 i = sizeof(struct eata_cfg)
559 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
560 - sizeof(ec->ec_cfglen);
561
562 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
563 sizeof(ec->ec_cfglen);
564 i >>= 1;
565
566 while (i--)
567 *p++ = dpt_inw(sc, HA_DATA);
568
569 /* Flush until we have read 512 bytes. */
570 i = (512 - j + 1) >> 1;
571 while (i--)
572 dpt_inw(sc, HA_DATA);
573
574 /* Defaults for older Firmware */
575 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
576 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
577
578 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
579 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
580 return (-1);
581 }
582
583 if (!ec->ec_hbavalid) {
584 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
585 return (-1);
586 }
587
588 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
589 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
590 return (-1);
591 }
592
593 if (!ec->ec_dmasupported) {
594 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
595 return (-1);
596 }
597
598 return (0);
599 }
600
601 /*
602 * Adjust the size of each I/O before it passes to the SCSI layer.
603 */
604 void
605 dpt_minphys(bp)
606 struct buf *bp;
607 {
608
609 if (bp->b_bcount > DPT_MAX_XFER)
610 bp->b_bcount = DPT_MAX_XFER;
611 minphys(bp);
612 }
613
614 /*
615 * Put a CCB onto the freelist.
616 */
617 void
618 dpt_free_ccb(sc, ccb)
619 struct dpt_softc *sc;
620 struct dpt_ccb *ccb;
621 {
622 int s;
623
624 s = splbio();
625 ccb->ccb_flg = 0;
626 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
627
628 /* Wake anybody waiting for a free ccb */
629 if (ccb->ccb_chain.tqe_next == 0)
630 wakeup(&sc->sc_free_ccb);
631 splx(s);
632 }
633
634 /*
635 * Initialize the specified CCB.
636 */
637 int
638 dpt_init_ccb(sc, ccb)
639 struct dpt_softc *sc;
640 struct dpt_ccb *ccb;
641 {
642 int error;
643
644 /* Create the DMA map for this CCB's data */
645 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
646 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
647 &ccb->ccb_dmamap_xfer);
648
649 if (error) {
650 printf("%s: can't create ccb dmamap (%d)\n",
651 sc->sc_dv.dv_xname, error);
652 return (error);
653 }
654
655 ccb->ccb_flg = 0;
656 ccb->ccb_ccbpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
657 CCB_OFF(sc, ccb);
658 return (0);
659 }
660
661 /*
662 * Create a set of CCBs and add them to the free list.
663 */
664 int
665 dpt_create_ccbs(sc, ccbstore, count)
666 struct dpt_softc *sc;
667 struct dpt_ccb *ccbstore;
668 int count;
669 {
670 struct dpt_ccb *ccb;
671 int i, error;
672
673 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
674
675 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
676 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
677 printf("%s: unable to init ccb, error = %d\n",
678 sc->sc_dv.dv_xname, error);
679 break;
680 }
681 ccb->ccb_id = i;
682 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
683 }
684
685 return (i);
686 }
687
688 /*
689 * Get a free ccb. If there are none, see if we can allocate a new one.
690 * Otherwise either return an error or if we are permitted to, sleep until
691 * one becomes free.
692 */
693 struct dpt_ccb *
694 dpt_alloc_ccb(sc, flg)
695 struct dpt_softc *sc;
696 int flg;
697 {
698 struct dpt_ccb *ccb;
699 int s;
700
701 s = splbio();
702
703 for (;;) {
704 ccb = sc->sc_free_ccb.tqh_first;
705 if (ccb) {
706 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
707 break;
708 }
709 if ((flg & XS_CTL_NOSLEEP) != 0) {
710 splx(s);
711 return (NULL);
712 }
713 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
714 }
715
716 ccb->ccb_flg |= CCB_ALLOC;
717 splx(s);
718 return (ccb);
719 }
720
721 /*
722 * We have a CCB which has been processed by the HBA, now we look to see how
723 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
724 * passed here by dpt_intr().
725 */
726 void
727 dpt_done_ccb(sc, ccb)
728 struct dpt_softc *sc;
729 struct dpt_ccb *ccb;
730 {
731 struct scsipi_sense_data *s1, *s2;
732 struct scsipi_xfer *xs;
733 bus_dma_tag_t dmat;
734
735 dmat = sc->sc_dmat;
736 xs = ccb->ccb_xs;
737
738 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
739
740 /*
741 * If we were a data transfer, unload the map that described the
742 * data buffer.
743 */
744 if (xs->datalen) {
745 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
746 ccb->ccb_dmamap_xfer->dm_mapsize,
747 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
748 BUS_DMASYNC_POSTWRITE);
749 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
750 }
751
752 /*
753 * Otherwise, put the results of the operation into the xfer and
754 * call whoever started it.
755 */
756 #ifdef DIAGNOSTIC
757 if ((ccb->ccb_flg & CCB_ALLOC) == 0) {
758 panic("%s: done ccb not allocated!\n", sc->sc_dv.dv_xname);
759 return;
760 }
761 #endif
762
763 if (xs->error == XS_NOERROR) {
764 if (ccb->ccb_hba_status != HA_NO_ERROR) {
765 switch (ccb->ccb_hba_status) {
766 case HA_ERROR_SEL_TO:
767 xs->error = XS_SELTIMEOUT;
768 break;
769 case HA_ERROR_RESET:
770 xs->error = XS_RESET;
771 break;
772 default: /* Other scsi protocol messes */
773 printf("%s: HBA status %x\n",
774 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
775 xs->error = XS_DRIVER_STUFFUP;
776 }
777 } else if (ccb->ccb_scsi_status != SCSI_OK) {
778 switch (ccb->ccb_scsi_status) {
779 case SCSI_CHECK:
780 s1 = &ccb->ccb_sense;
781 s2 = &xs->sense.scsi_sense;
782 *s2 = *s1;
783 xs->error = XS_SENSE;
784 break;
785 case SCSI_BUSY:
786 xs->error = XS_BUSY;
787 break;
788 default:
789 printf("%s: SCSI status %x\n",
790 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
791 xs->error = XS_DRIVER_STUFFUP;
792 }
793 } else
794 xs->resid = 0;
795
796 xs->status = ccb->ccb_scsi_status;
797 }
798
799 /* Free up the CCB and mark the command as done */
800 dpt_free_ccb(sc, ccb);
801 xs->xs_status |= XS_STS_DONE;
802 scsipi_done(xs);
803
804 /*
805 * If there are queue entries in the software queue, try to run the
806 * first one. We should be more or less guaranteed to succeed, since
807 * we just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling
808 * it with the first entry in the queue.
809 */
810 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
811 dpt_scsi_cmd(xs);
812 }
813
814 /*
815 * Start a SCSI command.
816 */
817 int
818 dpt_scsi_cmd(xs)
819 struct scsipi_xfer *xs;
820 {
821 int error, seg, flags, s, fromqueue, dontqueue;
822 struct scsipi_link *sc_link;
823 struct dpt_softc *sc;
824 struct dpt_ccb *ccb;
825 struct eata_sg *sg;
826 struct eata_cp *cp;
827 bus_dma_tag_t dmat;
828
829 sc_link = xs->sc_link;
830 flags = xs->xs_control;
831 sc = sc_link->adapter_softc;
832 dmat = sc->sc_dmat;
833 fromqueue = 0;
834 dontqueue = 0;
835
836 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
837
838 /* Protect the queue */
839 s = splbio();
840
841 /*
842 * If we're running the queue from dpt_done_ccb(), we've been called
843 * with the first queue entry as our argument.
844 */
845 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
846 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
847 fromqueue = 1;
848 } else {
849 /* Cmds must be no more than 12 bytes for us */
850 if (xs->cmdlen > 12) {
851 splx(s);
852 xs->error = XS_DRIVER_STUFFUP;
853 return (COMPLETE);
854 }
855
856 /* XXX we can't reset devices just yet */
857 if ((flags & XS_CTL_RESET) != 0) {
858 xs->error = XS_DRIVER_STUFFUP;
859 return (COMPLETE);
860 }
861
862 /* Polled requests can't be queued for later */
863 dontqueue = flags & XS_CTL_POLL;
864
865 /* If there are jobs in the queue, run them first */
866 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
867 /*
868 * If we can't queue, we have to abort, since we have
869 * to preserve the queue order.
870 */
871 if (dontqueue) {
872 splx(s);
873 xs->error = XS_DRIVER_STUFFUP;
874 return (TRY_AGAIN_LATER);
875 }
876
877 /* Swap with the first queue entry. */
878 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
879 xs = TAILQ_FIRST(&sc->sc_queue);
880 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
881 fromqueue = 1;
882 }
883 }
884
885 /*
886 * Get a CCB. If the transfer is from a buf (possibly from interrupt
887 * time) then we can't allow it to sleep.
888 */
889 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
890 /* If we can't queue, we lose */
891 if (dontqueue) {
892 splx(s);
893 xs->error = XS_DRIVER_STUFFUP;
894 return (TRY_AGAIN_LATER);
895 }
896
897 /*
898 * Stuff request into the queue, in front if we came off
899 * in the first place.
900 */
901 if (fromqueue)
902 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
903 else
904 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
905 splx(s);
906 return (SUCCESSFULLY_QUEUED);
907 }
908
909 splx(s);
910
911 /* Synchronous xfers musn't write-back through the cache */
912 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
913 ccb->ccb_flg |= CCB_SYNC;
914
915 ccb->ccb_xs = xs;
916 ccb->ccb_timeout = xs->timeout;
917
918 cp = &ccb->ccb_eata_cp;
919 memcpy(&cp->cp_scsi_cmd, xs->cmd, xs->cmdlen);
920 cp->cp_ccbid = ccb->ccb_id;
921 cp->cp_id = sc_link->scsipi_scsi.target;
922 cp->cp_lun = sc_link->scsipi_scsi.lun;
923 cp->cp_channel = sc_link->scsipi_scsi.channel;
924 cp->cp_senselen = sizeof(ccb->ccb_sense);
925 cp->cp_stataddr = SWAP32(sc->sc_sppa);
926 cp->cp_dispri = 1;
927 cp->cp_identify = 1;
928 cp->cp_autosense = 1;
929 cp->cp_nocache = ((ccb->ccb_flg & CCB_SYNC) != 0);
930 cp->cp_datain = ((flags & XS_CTL_DATA_IN) != 0);
931 cp->cp_dataout = ((flags & XS_CTL_DATA_OUT) != 0);
932 cp->cp_interpret = (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
933 sc_link->scsipi_scsi.target);
934
935 cp->cp_senseaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
936 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
937
938 if (xs->datalen) {
939 sg = ccb->ccb_sg;
940 seg = 0;
941 #ifdef TFS
942 if (flags & XS_CTL_DATA_UIO) {
943 error = bus_dmamap_load_uio(dmat,
944 ccb->ccb_dmamap_xfer, (struct uio *)xs->data,
945 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
946 BUS_DMA_WAITOK);
947 } else
948 #endif /*TFS */
949 {
950 error = bus_dmamap_load(dmat,
951 ccb->ccb_dmamap_xfer,
952 xs->data, xs->datalen, NULL,
953 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
954 BUS_DMA_WAITOK);
955 }
956
957 if (error) {
958 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
959 if (error == EFBIG)
960 printf("more than %d dma segs\n", DPT_SG_SIZE);
961 else
962 printf("error %d loading dma map\n", error);
963
964 xs->error = XS_DRIVER_STUFFUP;
965 dpt_free_ccb(sc, ccb);
966 return (COMPLETE);
967 }
968
969 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
970 ccb->ccb_dmamap_xfer->dm_mapsize,
971 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
972 BUS_DMASYNC_PREWRITE);
973
974 /*
975 * Load the hardware scatter/gather map with the
976 * contents of the DMA map.
977 */
978 for (seg = 0; seg < ccb->ccb_dmamap_xfer->dm_nsegs; seg++) {
979 ccb->ccb_sg[seg].sg_addr =
980 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_addr);
981 ccb->ccb_sg[seg].sg_len =
982 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_len);
983 }
984
985 cp->cp_dataaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr
986 + CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sg));
987 cp->cp_datalen = SWAP32(seg * sizeof(struct eata_sg));
988 cp->cp_scatter = 1;
989 } else {
990 cp->cp_dataaddr = 0;
991 cp->cp_datalen = 0;
992 cp->cp_scatter = 0;
993 }
994
995 /* Sync up CCB and status packet */
996 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
997 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
998 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
999 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1000
1001 /*
1002 * Start the command. If we are polling on completion, mark it
1003 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
1004 * without us noticing.
1005 */
1006 if (dontqueue != 0)
1007 ccb->ccb_flg |= CCB_PRIVATE;
1008
1009 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
1010 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1011 xs->error = XS_DRIVER_STUFFUP;
1012 dpt_done_ccb(sc, ccb);
1013 return (COMPLETE);
1014 }
1015
1016 if (dontqueue == 0)
1017 return (SUCCESSFULLY_QUEUED);
1018
1019 /* Don't wait longer than this single command wants to wait */
1020 if (dpt_poll(sc, ccb)) {
1021 dpt_timeout(ccb);
1022 /* Wait for abort to complete */
1023 if (dpt_poll(sc, ccb))
1024 dpt_timeout(ccb);
1025 }
1026
1027 dpt_done_ccb(sc, ccb);
1028 return (COMPLETE);
1029 }
1030
1031 /*
1032 * Specified CCB has timed out, abort it.
1033 */
1034 void
1035 dpt_timeout(arg)
1036 void *arg;
1037 {
1038 struct scsipi_link *sc_link;
1039 struct scsipi_xfer *xs;
1040 struct dpt_softc *sc;
1041 struct dpt_ccb *ccb;
1042 int s;
1043
1044 ccb = arg;
1045 xs = ccb->ccb_xs;
1046 sc_link = xs->sc_link;
1047 sc = sc_link->adapter_softc;
1048
1049 scsi_print_addr(sc_link);
1050 printf("timed out (status:%02x aux status:%02x)",
1051 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1052
1053 s = splbio();
1054
1055 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1056 /* Abort timed out, reset the HBA */
1057 printf(" AGAIN, resetting HBA\n");
1058 dpt_outb(sc, HA_COMMAND, CP_RESET);
1059 DELAY(750000);
1060 } else {
1061 /* Abort the operation that has timed out */
1062 printf("\n");
1063 ccb->ccb_xs->error = XS_TIMEOUT;
1064 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1065 ccb->ccb_flg |= CCB_ABORT;
1066 /* Start the abort */
1067 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1068 CP_IMMEDIATE, CPI_SPEC_ABORT))
1069 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1070 }
1071
1072 splx(s);
1073 }
1074
1075 #if 0 && defined(DEBUG)
1076 /*
1077 * Dump the contents of an EATA status packet.
1078 */
1079 static void
1080 dpt_dump_sp(sp)
1081 struct eata_sp *sp;
1082 {
1083 int i;
1084
1085 printf("\thba_status\t%02x\n", sp->sp_hba_status);
1086 printf("\teoc\t\t%d\n", sp->sp_eoc);
1087 printf("\tscsi_status\t%02x\n", sp->sp_scsi_status);
1088 printf("\tinv_residue\t%d\n", sp->sp_inv_residue);
1089 printf("\tccbid\t\t%d\n", sp->sp_ccbid);
1090 printf("\tid_message\t%d\n", sp->sp_id_message);
1091 printf("\tque_message\t%d\n", sp->sp_que_message);
1092 printf("\ttag_message\t%d\n", sp->sp_tag_message);
1093 printf("\tmessages\t");
1094
1095 for (i = 0; i < 9; i++)
1096 printf("%d ", sp->sp_messages[i]);
1097
1098 printf("\n");
1099 }
1100 #endif /* DEBUG */
1101
1102 /*
1103 * Get inquiry data from the adapter.
1104 */
1105 void
1106 dpt_hba_inquire(sc, ei)
1107 struct dpt_softc *sc;
1108 struct eata_inquiry_data **ei;
1109 {
1110 struct dpt_ccb *ccb;
1111 struct eata_cp *cp;
1112 bus_dma_tag_t dmat;
1113
1114 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1115 dmat = sc->sc_dmat;
1116
1117 /* Get a CCB and mark as private */
1118 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1119 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1120
1121 ccb->ccb_flg |= CCB_PRIVATE;
1122 ccb->ccb_timeout = 200;
1123
1124 /* Put all the arguments into the CCB */
1125 cp = &ccb->ccb_eata_cp;
1126 cp->cp_ccbid = ccb->ccb_id;
1127 cp->cp_id = sc->sc_hbaid[0];
1128 cp->cp_lun = 0;
1129 cp->cp_channel = 0;
1130 cp->cp_senselen = sizeof(ccb->ccb_sense);
1131 cp->cp_stataddr = SWAP32(sc->sc_sppa);
1132 cp->cp_dispri = 1;
1133 cp->cp_identify = 1;
1134 cp->cp_autosense = 0;
1135 cp->cp_interpret = 1;
1136 cp->cp_nocache = 0;
1137 cp->cp_datain = 1;
1138 cp->cp_dataout = 0;
1139 cp->cp_senseaddr = 0;
1140 cp->cp_dataaddr = SWAP32(sc->sc_scrpa);
1141 cp->cp_datalen = SWAP32(sizeof(struct eata_inquiry_data));
1142 cp->cp_scatter = 0;
1143
1144 /* Put together the SCSI inquiry command */
1145 memset(&cp->cp_scsi_cmd, 0, 12); /* XXX */
1146 cp->cp_scsi_cmd = INQUIRY;
1147 cp->cp_len = sizeof(struct eata_inquiry_data);
1148
1149 /* Sync up CCB, status packet and scratch area */
1150 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, CCB_OFF(sc, ccb),
1151 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1152 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
1153 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1154 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1155 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1156
1157 /* Start the command and poll on completion */
1158 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1159 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1160
1161 if (dpt_poll(sc, ccb))
1162 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1163
1164 if (ccb->ccb_hba_status != HA_NO_ERROR ||
1165 ccb->ccb_scsi_status != SCSI_OK)
1166 panic("%s: inquiry failed (hba:%02x scsi:%02x",
1167 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1168 ccb->ccb_scsi_status);
1169
1170 /* Sync up the DMA map and free CCB, returning */
1171 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1172 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1173 dpt_free_ccb(sc, ccb);
1174 }
1175