dpt.c revision 1.1 1 /* $NetBSD: dpt.c,v 1.1 1999/09/27 23:41:47 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andy Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 *
56 * commenced: Sun Sep 27 18:14:01 PDT 1992
57 * slight mod to make work with 34F as well: Wed Jun 2 18:05:48 WST 1993
58 */
59
60 /*
61 * Driver for DPT EATA SCSI adapters.
62 *
63 * TODO:
64 *
65 * o Occasionally, dpt_readcfg() will fail while waiting for the HBA - fix.
66 * o Need a front-end for EISA boards.
67 * o Need a front-end for (newer) ISA boards.
68 * o Handle older firmware better.
69 * o Find a bunch of different firmware EEPROMs and try them out.
70 * o Test with a bunch of different boards.
71 * o dpt_readcfg() should not be using CP_PIO_GETCFG.
72 * o An interface to userland applications.
73 * o A port of DPT Storage Manager included in the base system would be nice.
74 * o Some sysctls or a utility (eg dptctl(8)) to control parameters.
75 * o Commit the manpage.
76 * o Code needs KNF in places and sanity in others.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.1 1999/09/27 23:41:47 ad Exp $");
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/proc.h>
88 #include <sys/buf.h>
89
90 #include <machine/endian.h>
91 #include <machine/bus.h>
92
93 #include <dev/scsipi/scsi_all.h>
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsiconf.h>
96
97 #include <dev/ic/dptreg.h>
98 #include <dev/ic/dptvar.h>
99
100 /* A default for our link struct */
101 static struct scsipi_device dpt_dev = {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108 static char *dpt_cname[] = {
109 #ifdef notdef
110 "PM3755", "SmartRAID V",
111 "PM3754", "SmartRAID V",
112 "PM2654", "SmartRAID V",
113 "PM2554", "SmartRAID V",
114 "PM1554", "SmartRAID V",
115 #endif
116 "PM3334", "SmartRAID IV",
117 "PM3332", "SmartRAID IV",
118 "PM2144", "SmartCache IV",
119 "PM2044", "SmartCache IV",
120 "PM2142", "SmartCache IV",
121 "PM2042", "SmartCache IV",
122 "PM2041", "SmartCache IV",
123 "PM3224", "SmartRAID III",
124 "PM3222", "SmartRAID III",
125 "PM3021", "SmartRAID III",
126 "PM2124", "SmartCache III",
127 "PM2024", "SmartCache III",
128 "PM2122", "SmartCache III",
129 "PM2022", "SmartCache III",
130 "PM2021", "SmartCache III",
131 "SK2012", "SmartCache Plus",
132 "SK2011", "SmartCache Plus",
133 NULL, "unknown adapter, please report using send-pr(1)",
134 };
135
136 void dpt_shutdown __P((void *));
137 void dpt_timeout __P((void *));
138 void dpt_minphys __P((struct buf *));
139 int dpt_readcfg __P((struct dpt_softc *, struct eata_cfg *));
140 int dpt_scsi_cmd __P((struct scsipi_xfer *));
141 int dpt_wait __P((struct dpt_softc *, u_int8_t, u_int8_t, int));
142 int dpt_poll __P((struct dpt_softc *, struct dpt_ccb *));
143 void dpt_cmd __P((struct dpt_softc *, struct eata_cp *, u_int32_t, int, int));
144 void dpt_hba_inquire __P((struct dpt_softc *, struct eata_inquiry_data **));
145
146 void dpt_reset_ccb __P((struct dpt_softc *, struct dpt_ccb *));
147 void dpt_free_ccb __P((struct dpt_softc *, struct dpt_ccb *));
148 void dpt_done_ccb __P((struct dpt_softc *, struct dpt_ccb *));
149 int dpt_init_ccb __P((struct dpt_softc *, struct dpt_ccb *));
150 int dpt_create_ccbs __P((struct dpt_softc *, struct dpt_ccb *, int));
151
152 struct dpt_ccb *dpt_alloc_ccb __P((struct dpt_softc *, int));
153
154 #if 0 && defined(DEBUG)
155 static void dpt_dump_sp __P((struct eata_sp *));
156 #endif
157
158 /*
159 * Handle an interrupt from the HBA.
160 */
161 int
162 dpt_intr(xxx_sc)
163 void *xxx_sc;
164 {
165 struct dpt_softc *sc;
166 struct dpt_ccb *ccb;
167 struct eata_sp *sp;
168 volatile int junk;
169
170 sc = xxx_sc;
171 sp = sc->sc_sp;
172
173 #ifdef DEBUG
174 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
175 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
176 #endif
177
178 /*
179 * HBA might have interrupted while we were dealing with the last
180 * completed command, since we ACK before we deal; keep polling.
181 */
182 while ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
183 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
184 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
185
186 /* Might have looped before HBA can reset HBA_AUX_INTR */
187 if (sp->sp_ccbid == -1) {
188 DELAY(50);
189 #ifdef DIAGNOSTIC
190 printf("%s: slow reset of HA_AUX_STATUS?",
191 sc->sc_dv.dv_xname);
192 #endif
193 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
194 return (0);
195 #ifdef DIAGNOSTIC
196 printf("%s: was a slow reset of HA_AUX_STATUS",
197 sc->sc_dv.dv_xname);
198 #endif
199 /* Re-sync DMA map */
200 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
201 sc->sc_spoff, sizeof(struct eata_sp),
202 BUS_DMASYNC_POSTREAD);
203 }
204
205 /* Make sure CCB ID from status packet is realistic */
206 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
207 /* Sync up DMA map and cache cmd status */
208 ccb = sc->sc_ccbs + sp->sp_ccbid;
209
210 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb,
211 DPT_CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
212 BUS_DMASYNC_POSTWRITE);
213
214 ccb->ccb_hba_status = sp->sp_hba_status;
215 ccb->ccb_scsi_status = sp->sp_scsi_status;
216
217 /*
218 * Ack the interrupt and process the CCB. If this
219 * is a private CCB it's up to dpt_poll() to notice.
220 */
221 sp->sp_ccbid = -1;
222 ccb->ccb_flg |= CCB_INTR;
223 junk = dpt_inb(sc, HA_STATUS);
224 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
225 dpt_done_ccb(sc, ccb);
226 } else {
227 printf("%s: bogus status (returned CCB id %d)\n",
228 sc->sc_dv.dv_xname, sp->sp_ccbid);
229
230 /* Ack the interrupt */
231 sp->sp_ccbid = -1;
232 junk = dpt_inb(sc, HA_STATUS);
233 }
234 }
235
236 return (0);
237 }
238
239 /*
240 * Initialize and attach the HBA. This is the entry point from bus
241 * specific probe-and-attach code.
242 */
243 void
244 dpt_init(sc, intrstr)
245 struct dpt_softc *sc;
246 const char *intrstr;
247 {
248 struct eata_inquiry_data *ei;
249 int i, j, error, rseg, mapsize;
250 bus_dma_segment_t seg;
251 struct eata_cfg dc;
252 char model[16];
253
254 /* Older firmware may puke if we talk to it too soon after reset */
255 dpt_outb(sc, HA_COMMAND, CP_RESET);
256 DELAY(750000);
257
258 for (i = 1000; i; i--) {
259 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
260 break;
261 DELAY(2000);
262 }
263
264 if (i == 0) {
265 printf("%s: HBA not ready after reset: %02x\n",
266 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
267 return;
268 }
269
270 if (dpt_readcfg(sc, &dc)) {
271 printf("%s: readcfg failed - see dpt(4)\n",
272 sc->sc_dv.dv_xname);
273 return;
274 }
275
276 /* Allocate the CCB/status packet/scratch DMA map and load */
277 sc->sc_nccbs = min(SWAP16(*(int16_t *)dc.dc_queuedepth), DPT_MAX_CCBS);
278 sc->sc_spoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
279 sc->sc_scroff = sc->sc_spoff + sizeof(struct eata_sp);
280 sc->sc_scrlen = 256; /* XXX */
281 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + sc->sc_scrlen +
282 sizeof(struct eata_sp);
283
284 if ((error = bus_dmamem_alloc(sc->sc_dmat, mapsize, NBPG, 0,
285 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
286 printf("%s: unable to allocate CCBs, error = %d\n",
287 sc->sc_dv.dv_xname, error);
288 return;
289 }
290
291 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
292 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
293 printf("%s: unable to map CCBs, error = %d\n",
294 sc->sc_dv.dv_xname, error);
295 return;
296 }
297
298 if ((error = bus_dmamap_create(sc->sc_dmat, mapsize, mapsize, 1, 0,
299 BUS_DMA_NOWAIT, &sc->sc_dmamap_ccb)) != 0) {
300 printf("%s: unable to create CCB DMA map, error = %d\n",
301 sc->sc_dv.dv_xname, error);
302 return;
303 }
304
305 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_ccb,
306 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
307 printf("%s: unable to load CCB DMA map, error = %d\n",
308 sc->sc_dv.dv_xname, error);
309 return;
310 }
311
312 sc->sc_sp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_spoff);
313 sc->sc_sppa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_spoff;
314 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
315 sc->sc_scrpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr + sc->sc_scroff;
316 sc->sc_sp->sp_ccbid = -1;
317 #ifdef notdef
318 sc->sc_pending = 0;
319 #endif
320 /* Initialize the CCBs */
321 TAILQ_INIT(&sc->sc_free_ccb);
322 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
323
324 if (i == 0) {
325 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
326 return;
327 } else if (i != sc->sc_nccbs) {
328 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
329 sc->sc_nccbs);
330 sc->sc_nccbs = i;
331 }
332
333 /* Set shutdownhook before we start any device activity */
334 sc->sc_sdh = shutdownhook_establish(dpt_shutdown, sc);
335
336 /* Get the page 0 inquiry data from the HBA */
337 dpt_hba_inquire(sc, &ei);
338
339 /*
340 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
341 * dpt0: interrupting at irq 10
342 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
343 */
344 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
345 ;
346 ei->ei_vendor[i] = '\0';
347
348 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
349 model[i] = ei->ei_model[i];
350 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
351 model[i++] = ei->ei_model[i];
352 model[i] = '\0';
353
354 /* Find the cannonical name for the board */
355 for (i = 0; dpt_cname[i]; i += 2)
356 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
357 break;
358
359 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
360
361 if (intrstr != NULL)
362 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
363
364 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
365 sc->sc_dv.dv_xname, sc->sc_nccbs, dc.dc_maxchannel + 1);
366
367 for (i = 0; i <= dc.dc_maxchannel; i++)
368 printf(" %d", dc.dc_hba[3 - i]);
369 printf("\n");
370
371 /* Reset the SCSI bus */
372 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET);
373 DELAY(20000);
374
375 /* Fill in the adapter, each link and attach in turn */
376 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
377 sc->sc_adapter.scsipi_minphys = dpt_minphys;
378
379 for (i = 0; i <= dc.dc_maxchannel; i++) {
380 struct scsipi_link *link;
381
382 sc->sc_hbaid[i] = dc.dc_hba[3 - i];
383 link = &sc->sc_link[i];
384 memset(link, 0, sizeof(*link));
385 link->scsipi_scsi.channel = i;
386 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
387 link->scsipi_scsi.max_lun = dc.dc_maxlun;
388 link->scsipi_scsi.max_target = dc.dc_maxtarget;
389 link->type = BUS_SCSI;
390 link->device = &dpt_dev;
391 link->adapter = &sc->sc_adapter;
392 link->adapter_softc = sc;
393 link->openings = sc->sc_nccbs;
394 config_found(&sc->sc_dv, link, scsiprint);
395 }
396 }
397
398 /*
399 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
400 * all data from it's cache and mark array groups as clean.
401 */
402 void
403 dpt_shutdown(xxx_sc)
404 void *xxx_sc;
405 {
406 struct dpt_softc *sc;
407
408 sc = xxx_sc;
409 printf("shutting down %s...", sc->sc_dv.dv_xname);
410 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
411 DELAY(5000*1000);
412 printf(" done\n");
413 }
414
415 /*
416 * Send an EATA command to the HBA.
417 */
418 void
419 dpt_cmd(sc, cp, addr, eatacmd, icmd)
420 struct dpt_softc *sc;
421 struct eata_cp *cp;
422 u_int32_t addr;
423 int eatacmd, icmd;
424 {
425 int i;
426
427 for (i = 20000; i; i--) {
428 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
429 break;
430 DELAY(50);
431 }
432
433 if (i == 0) {
434 /* XXX what to do here? */
435 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
436 }
437
438 if (cp == NULL)
439 addr = 0;
440
441 dpt_outb(sc, HA_DMA_BASE + 0, (u_int32_t)addr);
442 dpt_outb(sc, HA_DMA_BASE + 1, (u_int32_t)addr >> 8);
443 dpt_outb(sc, HA_DMA_BASE + 2, (u_int32_t)addr >> 16);
444 dpt_outb(sc, HA_DMA_BASE + 3, (u_int32_t)addr >> 24);
445
446 if (eatacmd == CP_IMMEDIATE) {
447 if (cp == NULL) {
448 /* XXX should really pass meaningful values */
449 dpt_outb(sc, HA_ICMD_CODE2, 0);
450 dpt_outb(sc, HA_ICMD_CODE1, 0);
451 }
452 dpt_outb(sc, HA_ICMD, icmd);
453 }
454
455 dpt_outb(sc, HA_COMMAND, eatacmd);
456 }
457
458 /*
459 * Wait for the HBA to reach an arbitrary state.
460 */
461 int
462 dpt_wait(sc, mask, state, ms)
463 struct dpt_softc *sc;
464 u_int8_t mask, state;
465 int ms;
466 {
467
468 for (ms *= 10; ms; ms--) {
469 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
470 return (0);
471 DELAY(100);
472 }
473 return (-1);
474 }
475
476 /*
477 * Wait for the specified CCB to finish. This is used when we may not be
478 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
479 * The timeout value from the CCB is used. This should only be used for
480 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
481 * a look at it.
482 */
483 int
484 dpt_poll(sc, ccb)
485 struct dpt_softc *sc;
486 struct dpt_ccb *ccb;
487 {
488 int i;
489
490 for (i = ccb->ccb_timeout * 20; i; i--) {
491 if ((ccb->ccb_flg & CCB_INTR) != 0)
492 return (0);
493 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
494 dpt_intr(sc);
495 if ((ccb->ccb_flg & CCB_INTR) != 0)
496 return (0);
497 DELAY(50);
498 }
499 return (-1);
500 }
501
502 /*
503 * Read the EATA configuration from the HBA and perform some sanity checks.
504 */
505 int
506 dpt_readcfg(sc, dc)
507 struct dpt_softc *sc;
508 struct eata_cfg *dc;
509 {
510 int i, j, stat;
511 u_int16_t *p;
512
513 while((((stat = dpt_inb(sc, HA_STATUS))
514 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
515 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
516 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
517 || (dpt_wait(sc, HA_ST_BUSY, 0, 200))) {
518 /* RAID drives still spinning up? */
519 if((dpt_inb(sc, HA_ERROR) != 'D')
520 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
521 || (dpt_inb(sc, HA_ERROR + 2) != 'T'))
522 return (-1);
523 }
524
525 /*
526 * Issue the read-config command and wait for the data to appear.
527 * XXX we shouldn't be doing this with PIO, but it makes it a lot
528 * easier as no DMA setup is required.
529 */
530 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
531 memset(dc, 0, sizeof(*dc));
532 i = ((int)&((struct eata_cfg *)0)->dc_cfglen +
533 sizeof(dc->dc_cfglen)) >> 1;
534 p = (u_int16_t *)dc;
535
536 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000))
537 return (-1);
538
539 /* Begin reading */
540 while (i--)
541 *p++ = dpt_inw(sc, HA_DATA);
542
543 if ((i = dc->dc_cfglen) > (sizeof(struct eata_cfg)
544 - (int)(&(((struct eata_cfg *)0L)->dc_cfglen))
545 - sizeof(dc->dc_cfglen)))
546 i = sizeof(struct eata_cfg)
547 - (int)(&(((struct eata_cfg *)0L)->dc_cfglen))
548 - sizeof(dc->dc_cfglen);
549
550 j = i + (int)(&(((struct eata_cfg *)0L)->dc_cfglen)) +
551 sizeof(dc->dc_cfglen);
552 i >>= 1;
553
554 while (i--)
555 *p++ = dpt_inw(sc, HA_DATA);
556
557 /* Flush until we have read 512 bytes. */
558 i = (512 - j + 1) >> 1;
559 while (i--)
560 dpt_inw(sc, HA_DATA);
561
562 /* Defaults for older Firmware */
563 if (p <= (u_short *)&dc->dc_hba[DPT_MAX_CHANNELS - 1])
564 dc->dc_hba[DPT_MAX_CHANNELS - 1] = 7;
565
566 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
567 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
568 return (-1);
569 }
570
571 if (!dc->dc_hbavalid) {
572 printf("%s: dc.dc_hba field invalid\n", sc->sc_dv.dv_xname);
573 return (-1);
574 }
575
576 if (memcmp(dc->dc_eatasig, "EATA", 4) != 0) {
577 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
578 return (-1);
579 }
580
581 if (!dc->dc_dmasupported) {
582 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
583 return (-1);
584 }
585
586 return (0);
587 }
588
589 /*
590 * Adjust the size of each I/O before it passes to the SCSI layer.
591 */
592 void
593 dpt_minphys(bp)
594 struct buf *bp;
595 {
596
597 if (bp->b_bcount > DPT_MAX_XFER)
598 bp->b_bcount = DPT_MAX_XFER;
599 minphys(bp);
600 }
601
602 /*
603 * Put a CCB onto the freelist.
604 */
605 void
606 dpt_free_ccb(sc, ccb)
607 struct dpt_softc *sc;
608 struct dpt_ccb *ccb;
609 {
610 int s;
611
612 s = splbio();
613 ccb->ccb_flg = 0;
614 #ifdef notdef
615 sc->sc_pending--;
616 #endif
617 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
618
619 /* Wake anybody waiting for a free ccb */
620 if (ccb->ccb_chain.tqe_next == 0)
621 wakeup(&sc->sc_free_ccb);
622
623 splx(s);
624 }
625
626 /*
627 * Initialize the specified CCB.
628 */
629 int
630 dpt_init_ccb(sc, ccb)
631 struct dpt_softc *sc;
632 struct dpt_ccb *ccb;
633 {
634 int error;
635
636 /* Create the DMA map for this CCB's data */
637 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
638 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
639 &ccb->ccb_dmamap_xfer);
640
641 if (error) {
642 printf("%s: can't create ccb dmamap (%d)\n",
643 sc->sc_dv.dv_xname, error);
644 return (error);
645 }
646
647 ccb->ccb_flg = 0;
648 ccb->ccb_ccbpa = sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
649 DPT_CCB_OFF(sc, ccb);
650 return (0);
651 }
652
653 /*
654 * Create a set of CCBs and add them to the free list.
655 */
656 int
657 dpt_create_ccbs(sc, ccbstore, count)
658 struct dpt_softc *sc;
659 struct dpt_ccb *ccbstore;
660 int count;
661 {
662 struct dpt_ccb *ccb;
663 int i, error;
664
665 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
666
667 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
668 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
669 printf("%s: unable to init ccb, error = %d\n",
670 sc->sc_dv.dv_xname, error);
671 break;
672 }
673 ccb->ccb_id = i;
674 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
675 }
676
677 return (i);
678 }
679
680 /*
681 * Get a free ccb. If there are none, see if we can allocate a new one.
682 * Otherwise either return an error or if we are permitted to, sleep until
683 * one becomes free.
684 */
685 struct dpt_ccb *
686 dpt_alloc_ccb(sc, flg)
687 struct dpt_softc *sc;
688 int flg;
689 {
690 struct dpt_ccb *ccb;
691 int s;
692
693 s = splbio();
694
695 for (;;) {
696 ccb = sc->sc_free_ccb.tqh_first;
697 if (ccb) {
698 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
699 break;
700 }
701 if ((flg & SCSI_NOSLEEP) != 0) {
702 splx(s);
703 return (NULL);
704 }
705 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
706 }
707
708 ccb->ccb_flg |= CCB_ALLOC;
709 #ifdef notdef
710 sc->sc_pending++;
711 #endif
712 splx(s);
713 return (ccb);
714 }
715
716 /*
717 * We have a CCB which has been processed by the HBA, now we look to see how
718 * the operation went. CCBs marked with CCB_PRIVATE never get to here.
719 */
720 void
721 dpt_done_ccb(sc, ccb)
722 struct dpt_softc *sc;
723 struct dpt_ccb *ccb;
724 {
725 struct scsipi_sense_data *s1, *s2;
726 struct scsipi_xfer *xs;
727 bus_dma_tag_t dmat;
728
729 dmat = sc->sc_dmat;
730 xs = ccb->ccb_xs;
731
732 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
733
734 /*
735 * If we were a data transfer, unload the map that described the
736 * data buffer.
737 */
738 if (xs->datalen) {
739 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
740 ccb->ccb_dmamap_xfer->dm_mapsize,
741 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
742 BUS_DMASYNC_POSTWRITE);
743 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
744 }
745
746 /*
747 * Otherwise, put the results of the operation into the xfer and
748 * call whoever started it.
749 */
750 if ((ccb->ccb_flg & CCB_ALLOC) == 0) {
751 panic("%s: done ccb not allocated!\n", sc->sc_dv.dv_xname);
752 return;
753 }
754
755 if (xs->error == XS_NOERROR) {
756 if (ccb->ccb_hba_status != HA_NO_ERROR) {
757 switch (ccb->ccb_hba_status) {
758 case HA_ERROR_SEL_TO:
759 xs->error = XS_SELTIMEOUT;
760 break;
761 case HA_ERROR_RESET:
762 xs->error = XS_RESET;
763 break;
764 default: /* Other scsi protocol messes */
765 printf("%s: HBA status %x\n",
766 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
767 xs->error = XS_DRIVER_STUFFUP;
768 }
769 } else if (ccb->ccb_scsi_status != SCSI_OK) {
770 switch (ccb->ccb_scsi_status) {
771 case SCSI_CHECK:
772 s1 = &ccb->ccb_sense;
773 s2 = &xs->sense.scsi_sense;
774 *s2 = *s1;
775 xs->error = XS_SENSE;
776 break;
777 case SCSI_BUSY:
778 xs->error = XS_BUSY;
779 break;
780 default:
781 printf("%s: SCSI status %x\n",
782 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
783 xs->error = XS_DRIVER_STUFFUP;
784 }
785 } else
786 xs->resid = 0;
787 }
788
789 /* Free up the CCB and mark the command as done */
790 dpt_free_ccb(sc, ccb);
791 xs->flags |= ITSDONE;
792 scsipi_done(xs);
793
794 /*
795 * If there are queue entries in the software queue, try to run the
796 * first one. We should be more or less guaranteed to succeed, since
797 * we just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling
798 * it with the first entry in the queue.
799 */
800 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
801 dpt_scsi_cmd(xs);
802 }
803
804 /*
805 * Start a SCSI command.
806 */
807 int
808 dpt_scsi_cmd(xs)
809 struct scsipi_xfer *xs;
810 {
811 int error, seg, flags, s, fromqueue, dontqueue;
812 struct scsipi_link *sc_link;
813 struct dpt_softc *sc;
814 struct dpt_ccb *ccb;
815 struct eata_sg *sg;
816 struct eata_cp *cp;
817 bus_dma_tag_t dmat;
818
819 sc_link = xs->sc_link;
820 flags = xs->flags;
821 sc = sc_link->adapter_softc;
822 dmat = sc->sc_dmat;
823 fromqueue = 0;
824 dontqueue = 0;
825
826 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
827
828 /* Protect the queue */
829 s = splbio();
830
831 /*
832 * If we're running the queue from dpt_done_ccb(), we've been called
833 * with the first queue entry as our argument.
834 */
835 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
836 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
837 fromqueue = 1;
838 } else {
839 /* Cmds must be no more than 12 bytes for us */
840 if (xs->cmdlen > 12) {
841 splx(s);
842 xs->error = XS_DRIVER_STUFFUP;
843 return (COMPLETE);
844 }
845
846 /* XXX we can't reset devices just yet */
847 if ((flags & SCSI_RESET) != 0) {
848 xs->error = XS_DRIVER_STUFFUP;
849 return (COMPLETE);
850 }
851
852 /* Polled requests can't be queued for later */
853 dontqueue = flags & SCSI_POLL;
854
855 /* If there are jobs in the queue, run them first */
856 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
857 /*
858 * If we can't queue, we have to abort, since we have
859 * to preserve the queue order.
860 */
861 if (dontqueue) {
862 splx(s);
863 xs->error = XS_DRIVER_STUFFUP;
864 return (TRY_AGAIN_LATER);
865 }
866
867 /* Swap with the first queue entry. */
868 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
869 xs = TAILQ_FIRST(&sc->sc_queue);
870 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
871 fromqueue = 1;
872 }
873 }
874
875 /*
876 * Get a CCB. If the transfer is from a buf (possibly from interrupt
877 * time) then we can't allow it to sleep.
878 */
879 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
880 /* If we can't queue, we lose */
881 if (dontqueue) {
882 splx(s);
883 xs->error = XS_DRIVER_STUFFUP;
884 return (TRY_AGAIN_LATER);
885 }
886
887 /*
888 * Stuff request into the queue, in front if we came off
889 * in the first place.
890 */
891 if (fromqueue)
892 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
893 else
894 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
895 #ifdef notdef
896 sc->sc_pending++;
897 #endif
898 splx(s);
899 return (SUCCESSFULLY_QUEUED);
900 }
901
902 #ifdef notdef
903 /*
904 * Request has been shifted from pending command queue to executing
905 * CCB queue. Bump sc_pending to compensate.
906 */
907 sc->sc_pending--;
908 #endif
909 splx(s);
910
911 ccb->ccb_xs = xs;
912 ccb->ccb_timeout = xs->timeout;
913
914 cp = &ccb->ccb_eata_cp;
915 memcpy(&cp->cp_scsi_cmd, xs->cmd, xs->cmdlen);
916 cp->cp_ccbid = ccb->ccb_id;
917 cp->cp_id = sc_link->scsipi_scsi.target;
918 cp->cp_lun = sc_link->scsipi_scsi.lun;
919 cp->cp_channel = sc_link->scsipi_scsi.channel;
920 cp->cp_senselen = sizeof(ccb->ccb_sense);
921 cp->cp_stataddr = SWAP32(sc->sc_sppa);
922 cp->cp_dispri = 1;
923 cp->cp_luntar = 0;
924 cp->cp_identify = 1;
925 cp->cp_autosense = 1;
926 cp->cp_quick = 0;
927 cp->cp_datain = ((flags & SCSI_DATA_IN) != 0);
928 cp->cp_dataout = ((flags & SCSI_DATA_OUT) != 0);
929 cp->cp_interpret = (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
930 sc_link->scsipi_scsi.target);
931
932 cp->cp_senseaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr +
933 DPT_CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
934
935 if (xs->datalen) {
936 sg = ccb->ccb_sg;
937 seg = 0;
938 #ifdef TFS
939 if (flags & SCSI_DATA_UIO) {
940 error = bus_dmamap_load_uio(dmat,
941 ccb->ccb_dmamap_xfer, (struct uio *)xs->data,
942 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
943 BUS_DMA_WAITOK);
944 } else
945 #endif /*TFS */
946 {
947 error = bus_dmamap_load(dmat,
948 ccb->ccb_dmamap_xfer,
949 xs->data, xs->datalen, NULL,
950 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
951 BUS_DMA_WAITOK);
952 }
953
954 if (error) {
955 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
956 if (error == EFBIG)
957 printf("more than %d dma segs\n", DPT_SG_SIZE);
958 else
959 printf("error %d loading dma map\n", error);
960
961 xs->error = XS_DRIVER_STUFFUP;
962 dpt_free_ccb(sc, ccb);
963 return (COMPLETE);
964 }
965
966 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
967 ccb->ccb_dmamap_xfer->dm_mapsize,
968 (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
969 BUS_DMASYNC_PREWRITE);
970
971 /*
972 * Load the hardware scatter/gather map with the
973 * contents of the DMA map.
974 */
975 for (seg = 0; seg < ccb->ccb_dmamap_xfer->dm_nsegs; seg++) {
976 ccb->ccb_sg[seg].sg_addr =
977 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_addr);
978 ccb->ccb_sg[seg].sg_len =
979 SWAP32(ccb->ccb_dmamap_xfer->dm_segs[seg].ds_len);
980 }
981
982 cp->cp_dataaddr = SWAP32(sc->sc_dmamap_ccb->dm_segs[0].ds_addr
983 + DPT_CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sg));
984 cp->cp_datalen = SWAP32(seg * sizeof(struct eata_sg));
985 cp->cp_scatter = 1;
986 } else {
987 cp->cp_dataaddr = 0;
988 cp->cp_datalen = 0;
989 cp->cp_scatter = 0;
990 }
991
992 /* Sync up CCB and status packet */
993 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, DPT_CCB_OFF(sc, ccb),
994 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
995 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
996 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
997
998 /*
999 * Start the command. If we are polling on completion, mark it
1000 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
1001 * without us noticing.
1002 */
1003 if (dontqueue != 0)
1004 ccb->ccb_flg |= CCB_PRIVATE;
1005
1006 dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0);
1007
1008 if (dontqueue == 0)
1009 return (SUCCESSFULLY_QUEUED);
1010
1011 /* Don't wait longer than this single command wants to wait */
1012 if (dpt_poll(sc, ccb)) {
1013 dpt_timeout(ccb);
1014 /* Wait for abort to complete */
1015 if (dpt_poll(sc, ccb))
1016 dpt_timeout(ccb);
1017 }
1018
1019 dpt_done_ccb(sc, ccb);
1020 return (COMPLETE);
1021 }
1022
1023 /*
1024 * Specified CCB has timed out, abort it.
1025 */
1026 void
1027 dpt_timeout(arg)
1028 void *arg;
1029 {
1030 struct scsipi_link *sc_link;
1031 struct scsipi_xfer *xs;
1032 struct dpt_softc *sc;
1033 struct dpt_ccb *ccb;
1034 int s;
1035
1036 ccb = arg;
1037 xs = ccb->ccb_xs;
1038 sc_link = xs->sc_link;
1039 sc = sc_link->adapter_softc;
1040
1041 scsi_print_addr(sc_link);
1042 printf("timed out (status:%02x aux status:%02x)",
1043 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1044
1045 s = splbio();
1046
1047 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1048 /* Abort timed out, reset the HBA */
1049 printf(" AGAIN, resetting HBA\n");
1050 dpt_outb(sc, HA_COMMAND, CP_RESET);
1051 DELAY(750000);
1052 } else {
1053 /* Abort the operation that has timed out */
1054 printf("\n");
1055 ccb->ccb_xs->error = XS_TIMEOUT;
1056 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1057 ccb->ccb_flg |= CCB_ABORT;
1058 /* Start the abort */
1059 dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_IMMEDIATE,
1060 CPI_SPEC_ABORT);
1061 }
1062
1063 splx(s);
1064 }
1065
1066 #if 0 && defined(DEBUG)
1067 /*
1068 * Dump the contents of an EATA status packet.
1069 */
1070 static void
1071 dpt_dump_sp(sp)
1072 struct eata_sp *sp;
1073 {
1074 int i;
1075
1076 printf("\thba_status\t%02x\n", sp->sp_hba_status);
1077 printf("\teoc\t\t%d\n", sp->sp_eoc);
1078 printf("\tscsi_status\t%02x\n", sp->sp_scsi_status);
1079 printf("\tinv_residue\t%d\n", sp->sp_inv_residue);
1080 printf("\tccbid\t\t%d\n", sp->sp_ccbid);
1081 printf("\tid_message\t%d\n", sp->sp_id_message);
1082 printf("\tque_message\t%d\n", sp->sp_que_message);
1083 printf("\ttag_message\t%d\n", sp->sp_tag_message);
1084 printf("\tmessages\t");
1085
1086 for (i = 0; i < 9; i++)
1087 printf("%d ", sp->sp_messages[i]);
1088
1089 printf("\n");
1090 }
1091 #endif /* DEBUG */
1092
1093 /*
1094 * Get inquiry data from the adapter.
1095 */
1096 void
1097 dpt_hba_inquire(sc, ei)
1098 struct dpt_softc *sc;
1099 struct eata_inquiry_data **ei;
1100 {
1101 struct dpt_ccb *ccb;
1102 struct eata_cp *cp;
1103 bus_dma_tag_t dmat;
1104
1105 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1106 dmat = sc->sc_dmat;
1107
1108 /* Get a CCB and mark as private */
1109 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1110 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1111
1112 ccb->ccb_flg |= CCB_PRIVATE;
1113 ccb->ccb_timeout = 200;
1114
1115 /* Put all the arguments into the CCB */
1116 cp = &ccb->ccb_eata_cp;
1117 memset(cp, 0, sizeof(*cp));
1118
1119 cp->cp_ccbid = ccb->ccb_id;
1120 cp->cp_id = sc->sc_hbaid[0];
1121 cp->cp_lun = 0;
1122 cp->cp_channel = 0;
1123 cp->cp_senselen = sizeof(ccb->ccb_sense);
1124 cp->cp_stataddr = SWAP32(sc->sc_sppa);
1125 cp->cp_dispri = 1;
1126 cp->cp_luntar = 0;
1127 cp->cp_identify = 1;
1128 cp->cp_autosense = 0;
1129 cp->cp_quick = 0;
1130 cp->cp_interpret = 1;
1131 cp->cp_datain = 1;
1132 cp->cp_dataout = 0;
1133 cp->cp_senseaddr = 0;
1134 cp->cp_dataaddr = SWAP32(sc->sc_scrpa);
1135 cp->cp_datalen = SWAP32(sizeof(struct eata_inquiry_data));
1136 cp->cp_scatter = 0;
1137
1138 /* Put together the SCSI inquiry command */
1139 memset(&cp->cp_scsi_cmd, 0, 12); /* XXX */
1140 cp->cp_scsi_cmd = INQUIRY;
1141 cp->cp_len = sizeof(struct eata_inquiry_data);
1142
1143 /* Sync up CCB, status packet and scratch area */
1144 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, DPT_CCB_OFF(sc, ccb),
1145 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1146 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_spoff,
1147 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1148 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1149 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1150
1151 /* Start the command and poll on completion */
1152 dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0);
1153
1154 if (dpt_poll(sc, ccb))
1155 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1156
1157 if (ccb->ccb_hba_status != HA_NO_ERROR ||
1158 ccb->ccb_scsi_status != SCSI_OK)
1159 panic("%s: inquiry failed (hba:%02x scsi:%02x",
1160 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1161 ccb->ccb_scsi_status);
1162
1163 /* Sync up the DMA map and free CCB, returning */
1164 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap_ccb, sc->sc_scroff,
1165 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1166 dpt_free_ccb(sc, ccb);
1167 }
1168