dpt.c revision 1.19 1 /* $NetBSD: dpt.c,v 1.19 2000/02/24 18:47:55 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andy Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.19 2000/02/24 18:47:55 ad Exp $");
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65 #include <sys/proc.h>
66 #include <sys/buf.h>
67 #include <sys/endian.h>
68
69 #include <machine/bswap.h>
70 #include <machine/bus.h>
71
72 #include <dev/scsipi/scsi_all.h>
73 #include <dev/scsipi/scsipi_all.h>
74 #include <dev/scsipi/scsiconf.h>
75
76 #include <dev/ic/dptreg.h>
77 #include <dev/ic/dptvar.h>
78
79 /* A default for our link struct */
80 static struct scsipi_device dpt_dev = {
81 NULL, /* Use default error handler */
82 NULL, /* have a queue, served by this */
83 NULL, /* have no async handler */
84 NULL, /* Use default 'done' routine */
85 };
86
87 static char *dpt_cname[] = {
88 "PM3334", "SmartRAID IV",
89 "PM3332", "SmartRAID IV",
90 "PM2144", "SmartCache IV",
91 "PM2044", "SmartCache IV",
92 "PM2142", "SmartCache IV",
93 "PM2042", "SmartCache IV",
94 "PM2041", "SmartCache IV",
95 "PM3224", "SmartRAID III",
96 "PM3222", "SmartRAID III",
97 "PM3021", "SmartRAID III",
98 "PM2124", "SmartCache III",
99 "PM2024", "SmartCache III",
100 "PM2122", "SmartCache III",
101 "PM2022", "SmartCache III",
102 "PM2021", "SmartCache III",
103 "SK2012", "SmartCache Plus",
104 "SK2011", "SmartCache Plus",
105 NULL, "unknown adapter, please report using send-pr(1)",
106 };
107
108 TAILQ_HEAD(, dpt_softc) dpt_hba; /* tailq of HBA softc's */
109 void *dpt_sdh; /* shutdown hook */
110
111 /*
112 * Handle an interrupt from the HBA.
113 */
114 int
115 dpt_intr(xxx_sc)
116 void *xxx_sc;
117 {
118 struct dpt_softc *sc;
119 struct dpt_ccb *ccb;
120 struct eata_sp *sp;
121 volatile int junk;
122
123 sc = xxx_sc;
124 sp = sc->sc_stp;
125
126 #ifdef DEBUG
127 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
128 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
129 return (1);
130 }
131 #endif
132
133 for (;;) {
134 /*
135 * HBA might have interrupted while we were dealing with the
136 * last completed command, since we ACK before we deal; keep
137 * polling.
138 */
139 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
140 break;
141
142 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
143 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
144
145 /* Might have looped before HBA can reset HBA_AUX_INTR */
146 if (sp->sp_ccbid == -1) {
147 DELAY(50);
148
149 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
150 return (0);
151
152 printf("%s: no status\n", sc->sc_dv.dv_xname);
153
154 /* Re-sync DMA map */
155 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
156 sc->sc_stpoff, sizeof(struct eata_sp),
157 BUS_DMASYNC_POSTREAD);
158 }
159
160 /* Make sure CCB ID from status packet is realistic */
161 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
162 /* Sync up DMA map and cache cmd status */
163 ccb = sc->sc_ccbs + sp->sp_ccbid;
164
165 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
166 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
167 BUS_DMASYNC_POSTWRITE);
168
169 ccb->ccb_hba_status = sp->sp_hba_status & 0x7F;
170 ccb->ccb_scsi_status = sp->sp_scsi_status;
171
172 /*
173 * Ack the interrupt and process the CCB. If this
174 * is a private CCB it's up to dpt_poll() to notice.
175 */
176 junk = dpt_inb(sc, HA_STATUS);
177 sp->sp_ccbid = -1;
178 ccb->ccb_flg |= CCB_INTR;
179 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
180 dpt_done_ccb(sc, ccb);
181 } else {
182 printf("%s: bogus status (returned CCB id %d)\n",
183 sc->sc_dv.dv_xname, sp->sp_ccbid);
184
185 /* Ack the interrupt */
186 junk = dpt_inb(sc, HA_STATUS);
187 sp->sp_ccbid = -1;
188 }
189 }
190
191 return (1);
192 }
193
194 /*
195 * Initialize and attach the HBA. This is the entry point from bus
196 * specific probe-and-attach code.
197 */
198 void
199 dpt_init(sc, intrstr)
200 struct dpt_softc *sc;
201 const char *intrstr;
202 {
203 struct eata_inquiry_data *ei;
204 int i, j, error, rseg, maxchannel, maxtarget;
205 bus_dma_segment_t seg;
206 struct eata_cfg *ec;
207 struct scsipi_link *link;
208 char model[16];
209
210 ec = &sc->sc_ec;
211
212 /* Allocate the CCB/status packet/scratch DMA map and load */
213 sc->sc_nccbs =
214 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
215 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
216 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
217 sc->sc_scrlen = DPT_SCRATCH_SIZE;
218 sc->sc_dmamapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
219 sc->sc_scrlen + sizeof(struct eata_sp);
220
221 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmamapsize, NBPG, 0,
222 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
223 printf("%s: unable to allocate CCBs, error = %d\n",
224 sc->sc_dv.dv_xname, error);
225 return;
226 }
227
228 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_dmamapsize,
229 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
230 printf("%s: unable to map CCBs, error = %d\n",
231 sc->sc_dv.dv_xname, error);
232 return;
233 }
234
235 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmamapsize,
236 sc->sc_dmamapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
237 printf("%s: unable to create CCB DMA map, error = %d\n",
238 sc->sc_dv.dv_xname, error);
239 return;
240 }
241
242 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
243 sc->sc_ccbs, sc->sc_dmamapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
244 printf("%s: unable to load CCB DMA map, error = %d\n",
245 sc->sc_dv.dv_xname, error);
246 return;
247 }
248
249 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
250 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
251 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
252 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
253 sc->sc_stp->sp_ccbid = -1;
254
255 /* Initialize the CCBs */
256 TAILQ_INIT(&sc->sc_free_ccb);
257 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
258
259 if (i == 0) {
260 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
261 return;
262 } else if (i != sc->sc_nccbs) {
263 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
264 sc->sc_nccbs);
265 sc->sc_nccbs = i;
266 }
267
268 /* Set shutdownhook before we start any device activity */
269 if (dpt_sdh == NULL) {
270 TAILQ_INIT(&dpt_hba);
271 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
272 }
273
274 /* Get the page 0 inquiry data from the HBA */
275 dpt_hba_inquire(sc, &ei);
276
277 /*
278 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
279 * dpt0: interrupting at irq 10
280 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
281 */
282 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
283 ;
284 ei->ei_vendor[i] = '\0';
285
286 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
287 model[i] = ei->ei_model[i];
288 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
289 model[i++] = ei->ei_model[i];
290 model[i] = '\0';
291
292 /* Find the cannonical name for the board */
293 for (i = 0; dpt_cname[i] != NULL; i += 2)
294 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
295 break;
296
297 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
298
299 if (intrstr != NULL)
300 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
301
302 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
303 EC_F3_MAX_CHANNEL_SHIFT;
304 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
305 EC_F3_MAX_TARGET_SHIFT;
306
307 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
308 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
309
310 for (i = 0; i <= maxchannel; i++) {
311 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
312 printf(" %d", sc->sc_hbaid[i]);
313 }
314 printf("\n");
315
316 /* Reset the SCSI bus */
317 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
318 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
319
320 /* Fill in the adapter, each link and attach in turn */
321 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
322 sc->sc_adapter.scsipi_minphys = dpt_minphys;
323
324 for (i = 0; i <= maxchannel; i++) {
325 link = &sc->sc_link[i];
326 link->scsipi_scsi.channel = i;
327 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
328 link->scsipi_scsi.max_lun = ec->ec_maxlun;
329 link->scsipi_scsi.max_target = maxtarget;
330 link->type = BUS_SCSI;
331 link->device = &dpt_dev;
332 link->adapter = &sc->sc_adapter;
333 link->adapter_softc = sc;
334 link->openings = sc->sc_nccbs; /* XXX */
335 config_found(&sc->sc_dv, link, scsiprint);
336 }
337
338 TAILQ_INSERT_TAIL(&dpt_hba, sc, sc_chain);
339 }
340
341 /*
342 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
343 * all data from it's cache and mark array groups as clean.
344 */
345 void
346 dpt_shutdown(xxx_sc)
347 void *xxx_sc;
348 {
349 struct dpt_softc *sc;
350
351 printf("shutting down DPT HBAs...");
352
353 for (sc = TAILQ_FIRST(&dpt_hba); sc != NULL;
354 sc = TAILQ_NEXT(sc, sc_chain))
355 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
356
357 DELAY(5000*1000);
358 printf(" done\n");
359 }
360
361 /*
362 * Send an EATA command to the HBA.
363 */
364 int
365 dpt_cmd(sc, cp, addr, eatacmd, icmd)
366 struct dpt_softc *sc;
367 struct eata_cp *cp;
368 u_int32_t addr;
369 int eatacmd, icmd;
370 {
371 int i;
372
373 for (i = 20000; i; i--) {
374 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
375 break;
376 DELAY(50);
377 }
378
379 /* Not the most graceful way to handle this */
380 if (i == 0) {
381 printf("%s: HBA timeout on EATA command issue; aborting\n",
382 sc->sc_dv.dv_xname);
383 return (-1);
384 }
385
386 if (cp == NULL)
387 addr = 0;
388
389 dpt_outl(sc, HA_DMA_BASE, (u_int32_t)addr);
390
391 if (eatacmd == CP_IMMEDIATE) {
392 if (cp == NULL) {
393 /* XXX should really pass meaningful values */
394 dpt_outb(sc, HA_ICMD_CODE2, 0);
395 dpt_outb(sc, HA_ICMD_CODE1, 0);
396 }
397 dpt_outb(sc, HA_ICMD, icmd);
398 }
399
400 dpt_outb(sc, HA_COMMAND, eatacmd);
401 return (0);
402 }
403
404 /*
405 * Wait for the HBA status register to reach a specific state.
406 */
407 int
408 dpt_wait(sc, mask, state, ms)
409 struct dpt_softc *sc;
410 u_int8_t mask, state;
411 int ms;
412 {
413
414 for (ms *= 10; ms; ms--) {
415 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
416 return (0);
417 DELAY(100);
418 }
419 return (-1);
420 }
421
422 /*
423 * Wait for the specified CCB to finish. This is used when we may not be
424 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
425 * The timeout value from the CCB is used. This should only be used for
426 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
427 * a look at it.
428 */
429 int
430 dpt_poll(sc, ccb)
431 struct dpt_softc *sc;
432 struct dpt_ccb *ccb;
433 {
434 int i;
435
436 #ifdef DEBUG
437 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
438 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
439 #endif
440
441 if ((ccb->ccb_flg & CCB_INTR) != 0)
442 return (0);
443
444 for (i = ccb->ccb_timeout * 20; i; i--) {
445 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
446 dpt_intr(sc);
447 if ((ccb->ccb_flg & CCB_INTR) != 0)
448 return (0);
449 }
450 DELAY(50);
451 }
452
453 return (-1);
454 }
455
456 /*
457 * Read the EATA configuration from the HBA and perform some sanity checks.
458 */
459 int
460 dpt_readcfg(sc)
461 struct dpt_softc *sc;
462 {
463 struct eata_cfg *ec;
464 int i, j, stat;
465 u_int16_t *p;
466
467 ec = &sc->sc_ec;
468
469 /* Older firmware may puke if we talk to it too soon after reset */
470 dpt_outb(sc, HA_COMMAND, CP_RESET);
471 DELAY(750000);
472
473 for (i = 1000; i; i--) {
474 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
475 break;
476 DELAY(2000);
477 }
478
479 if (i == 0) {
480 printf("%s: HBA not ready after reset (hba status:%02x)\n",
481 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
482 return (-1);
483 }
484
485 while((((stat = dpt_inb(sc, HA_STATUS))
486 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
487 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
488 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
489 && dpt_wait(sc, HA_ST_BUSY, 0, 2000)) {
490 /* RAID drives still spinning up? */
491 if((dpt_inb(sc, HA_ERROR) != 'D')
492 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
493 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
494 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
495 return (-1);
496 }
497 }
498
499 /*
500 * Issue the read-config command and wait for the data to appear.
501 * XXX we shouldn't be doing this with PIO, but it makes it a lot
502 * easier as no DMA setup is required.
503 */
504 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
505 memset(ec, 0, sizeof(*ec));
506 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
507 sizeof(ec->ec_cfglen)) >> 1;
508 p = (u_int16_t *)ec;
509
510 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
511 printf("%s: cfg data didn't appear (hba status:%02x)\n",
512 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
513 return (-1);
514 }
515
516 /* Begin reading */
517 while (i--)
518 *p++ = dpt_inw(sc, HA_DATA);
519
520 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
521 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
522 - sizeof(ec->ec_cfglen)))
523 i = sizeof(struct eata_cfg)
524 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
525 - sizeof(ec->ec_cfglen);
526
527 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
528 sizeof(ec->ec_cfglen);
529 i >>= 1;
530
531 while (i--)
532 *p++ = dpt_inw(sc, HA_DATA);
533
534 /* Flush until we have read 512 bytes. */
535 i = (512 - j + 1) >> 1;
536 while (i--)
537 dpt_inw(sc, HA_DATA);
538
539 /* Defaults for older Firmware */
540 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
541 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
542
543 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
544 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
545 return (-1);
546 }
547
548 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
549 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
550 return (-1);
551 }
552
553 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
554 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
555 return (-1);
556 }
557
558 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
559 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
560 return (-1);
561 }
562
563 return (0);
564 }
565
566 /*
567 * Adjust the size of each I/O before it passes to the SCSI layer.
568 */
569 void
570 dpt_minphys(bp)
571 struct buf *bp;
572 {
573
574 if (bp->b_bcount > DPT_MAX_XFER)
575 bp->b_bcount = DPT_MAX_XFER;
576 minphys(bp);
577 }
578
579 /*
580 * Put a CCB onto the freelist.
581 */
582 void
583 dpt_free_ccb(sc, ccb)
584 struct dpt_softc *sc;
585 struct dpt_ccb *ccb;
586 {
587 int s;
588
589 s = splbio();
590 ccb->ccb_flg = 0;
591 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
592
593 /* Wake anybody waiting for a free ccb */
594 if (ccb->ccb_chain.tqe_next == 0)
595 wakeup(&sc->sc_free_ccb);
596 splx(s);
597 }
598
599 /*
600 * Initialize the specified CCB.
601 */
602 int
603 dpt_init_ccb(sc, ccb)
604 struct dpt_softc *sc;
605 struct dpt_ccb *ccb;
606 {
607 int error;
608
609 /* Create the DMA map for this CCB's data */
610 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
611 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
612 &ccb->ccb_dmamap_xfer);
613
614 if (error) {
615 printf("%s: can't create ccb dmamap (%d)\n",
616 sc->sc_dv.dv_xname, error);
617 return (error);
618 }
619
620 ccb->ccb_flg = 0;
621 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
622 CCB_OFF(sc, ccb);
623 return (0);
624 }
625
626 /*
627 * Create a set of CCBs and add them to the free list.
628 */
629 int
630 dpt_create_ccbs(sc, ccbstore, count)
631 struct dpt_softc *sc;
632 struct dpt_ccb *ccbstore;
633 int count;
634 {
635 struct dpt_ccb *ccb;
636 int i, error;
637
638 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
639
640 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
641 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
642 printf("%s: unable to init ccb, error = %d\n",
643 sc->sc_dv.dv_xname, error);
644 break;
645 }
646 ccb->ccb_id = i;
647 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
648 }
649
650 return (i);
651 }
652
653 /*
654 * Get a free ccb. If there are none, see if we can allocate a new one. If
655 * none are available right now and we are permitted to sleep, then wait
656 * until one becomes free, otherwise return an error.
657 */
658 struct dpt_ccb *
659 dpt_alloc_ccb(sc, flg)
660 struct dpt_softc *sc;
661 int flg;
662 {
663 struct dpt_ccb *ccb;
664 int s;
665
666 s = splbio();
667
668 for (;;) {
669 if ((ccb = TAILQ_FIRST(&sc->sc_free_ccb)) != NULL) {
670 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
671 break;
672 }
673 if ((flg & XS_CTL_NOSLEEP) != 0) {
674 ccb = NULL;
675 break;
676 }
677 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
678 }
679
680 splx(s);
681 return (ccb);
682 }
683
684 /*
685 * We have a CCB which has been processed by the HBA, now we look to see how
686 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
687 * passed here by dpt_intr().
688 */
689 void
690 dpt_done_ccb(sc, ccb)
691 struct dpt_softc *sc;
692 struct dpt_ccb *ccb;
693 {
694 struct scsipi_sense_data *s1, *s2;
695 struct scsipi_xfer *xs;
696 bus_dma_tag_t dmat;
697
698 dmat = sc->sc_dmat;
699 xs = ccb->ccb_xs;
700
701 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
702
703 /*
704 * If we were a data transfer, unload the map that described the
705 * data buffer.
706 */
707 if (xs->datalen != 0) {
708 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
709 ccb->ccb_dmamap_xfer->dm_mapsize,
710 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
711 BUS_DMASYNC_POSTWRITE);
712 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
713 }
714
715 if (xs->error == XS_NOERROR) {
716 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
717 switch (ccb->ccb_hba_status) {
718 case SP_HBA_ERROR_SEL_TO:
719 xs->error = XS_SELTIMEOUT;
720 break;
721 case SP_HBA_ERROR_RESET:
722 xs->error = XS_RESET;
723 break;
724 default: /* Other scsi protocol messes */
725 printf("%s: HBA status %x\n",
726 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
727 xs->error = XS_DRIVER_STUFFUP;
728 }
729 } else if (ccb->ccb_scsi_status != SCSI_OK) {
730 switch (ccb->ccb_scsi_status) {
731 case SCSI_CHECK:
732 s1 = &ccb->ccb_sense;
733 s2 = &xs->sense.scsi_sense;
734 *s2 = *s1;
735 xs->error = XS_SENSE;
736 break;
737 case SCSI_BUSY:
738 xs->error = XS_BUSY;
739 break;
740 default:
741 printf("%s: SCSI status %x\n",
742 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
743 xs->error = XS_DRIVER_STUFFUP;
744 }
745 } else
746 xs->resid = 0;
747
748 xs->status = ccb->ccb_scsi_status;
749 }
750
751 /* Free up the CCB and mark the command as done */
752 dpt_free_ccb(sc, ccb);
753 xs->xs_status |= XS_STS_DONE;
754 scsipi_done(xs);
755
756 /*
757 * If there are entries in the software queue, try to run the first
758 * one. We should be more or less guaranteed to succeed, since we
759 * just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling it
760 * with the first entry in the queue.
761 */
762 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
763 dpt_scsi_cmd(xs);
764 }
765
766 /*
767 * Start a SCSI command.
768 */
769 int
770 dpt_scsi_cmd(xs)
771 struct scsipi_xfer *xs;
772 {
773 int error, i, flags, s, fromqueue, dontqueue, nowait;
774 struct scsipi_link *sc_link;
775 struct dpt_softc *sc;
776 struct dpt_ccb *ccb;
777 struct eata_sg *sg;
778 struct eata_cp *cp;
779 bus_dma_tag_t dmat;
780 bus_dmamap_t xfer;
781
782 sc_link = xs->sc_link;
783 flags = xs->xs_control;
784 sc = sc_link->adapter_softc;
785 dmat = sc->sc_dmat;
786 fromqueue = 0;
787 dontqueue = 0;
788 nowait = 0;
789
790 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
791
792 /* Protect the queue */
793 s = splbio();
794
795 /*
796 * If we're running the queue from dpt_done_ccb(), we've been called
797 * with the first queue entry as our argument.
798 */
799 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
800 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
801 fromqueue = 1;
802 nowait = 1;
803 } else {
804 /* Cmds must be no more than 12 bytes for us */
805 if (xs->cmdlen > 12) {
806 splx(s);
807 xs->error = XS_DRIVER_STUFFUP;
808 return (COMPLETE);
809 }
810
811 /*
812 * XXX we can't reset devices just yet. Apparently some
813 * older firmware revisions don't even support it.
814 */
815 if ((flags & XS_CTL_RESET) != 0) {
816 xs->error = XS_DRIVER_STUFFUP;
817 return (COMPLETE);
818 }
819
820 /* Polled requests can't be queued for later */
821 dontqueue = flags & XS_CTL_POLL;
822
823 /* If there are jobs in the queue, run them first */
824 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
825 /*
826 * If we can't queue we abort, since we must
827 * preserve the queue order.
828 */
829 if (dontqueue) {
830 splx(s);
831 xs->error = XS_DRIVER_STUFFUP;
832 return (TRY_AGAIN_LATER);
833 }
834
835 /* Swap with the first queue entry. */
836 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
837 xs = TAILQ_FIRST(&sc->sc_queue);
838 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
839 fromqueue = 1;
840 }
841 }
842
843 /* Get a CCB */
844 if (nowait)
845 flags |= XS_CTL_NOSLEEP;
846 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
847 /* If we can't queue, we lose */
848 if (dontqueue) {
849 splx(s);
850 xs->error = XS_DRIVER_STUFFUP;
851 return (TRY_AGAIN_LATER);
852 }
853
854 /*
855 * Stuff request into the queue, in front if we came off
856 * it in the first place.
857 */
858 if (fromqueue)
859 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
860 else
861 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
862 splx(s);
863 return (SUCCESSFULLY_QUEUED);
864 }
865
866 splx(s);
867
868 ccb->ccb_xs = xs;
869 ccb->ccb_timeout = xs->timeout;
870
871 cp = &ccb->ccb_eata_cp;
872 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
873 cp->cp_ccbid = ccb->ccb_id;
874 cp->cp_senselen = sizeof(ccb->ccb_sense);
875 cp->cp_stataddr = htobe32(sc->sc_stppa);
876 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
877 cp->cp_ctl1 = 0;
878 cp->cp_ctl2 = 0;
879 cp->cp_ctl3 = sc_link->scsipi_scsi.target << CP_C3_ID_SHIFT;
880 cp->cp_ctl3 |= sc_link->scsipi_scsi.channel << CP_C3_CHANNEL_SHIFT;
881 cp->cp_ctl4 = sc_link->scsipi_scsi.lun << CP_C4_LUN_SHIFT;
882 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
883
884 if ((flags & XS_CTL_DATA_IN) != 0)
885 cp->cp_ctl0 |= CP_C0_DATA_IN;
886 if ((flags & XS_CTL_DATA_OUT) != 0)
887 cp->cp_ctl0 |= CP_C0_DATA_OUT;
888 if (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
889 sc_link->scsipi_scsi.target)
890 cp->cp_ctl0 |= CP_C0_INTERPRET;
891
892 /* Synchronous xfers musn't write-back through the cache */
893 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
894 cp->cp_ctl2 |= CP_C2_NO_CACHE;
895
896 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
897 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
898
899 if (xs->datalen != 0) {
900 xfer = ccb->ccb_dmamap_xfer;
901 #ifdef TFS
902 if ((flags & XS_CTL_DATA_UIO) != 0) {
903 error = bus_dmamap_load_uio(dmat, xfer,
904 (struct uio *)xs->data, (flags & XS_CTL_NOSLEEP) ?
905 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
906 } else
907 #endif /* TFS */
908 {
909 error = bus_dmamap_load(dmat, xfer, xs->data,
910 xs->datalen, NULL, (flags & XS_CTL_NOSLEEP) ?
911 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
912 }
913
914 if (error) {
915 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
916 if (error == EFBIG)
917 printf("more than %d dma segs\n", DPT_SG_SIZE);
918 else
919 printf("error %d loading dma map\n", error);
920
921 xs->error = XS_DRIVER_STUFFUP;
922 dpt_free_ccb(sc, ccb);
923 return (COMPLETE);
924 }
925
926 bus_dmamap_sync(dmat, xfer, 0, xfer->dm_mapsize,
927 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
928 BUS_DMASYNC_PREWRITE);
929
930 /* Don't bother using scatter/gather for just 1 segment */
931 if (xfer->dm_nsegs == 1) {
932 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
933 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
934 } else {
935 /*
936 * Load the hardware scatter/gather map with the
937 * contents of the DMA map.
938 */
939 sg = ccb->ccb_sg;
940 for (i = 0; i < xfer->dm_nsegs; i++, sg++) {
941 sg->sg_addr = htobe32(xfer->dm_segs[i].ds_addr);
942 sg->sg_len = htobe32(xfer->dm_segs[i].ds_len);
943 }
944 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
945 sc->sc_dmamap->dm_segs[0].ds_addr +
946 offsetof(struct dpt_ccb, ccb_sg));
947 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
948 cp->cp_ctl0 |= CP_C0_SCATTER;
949 }
950 } else {
951 cp->cp_dataaddr = 0;
952 cp->cp_datalen = 0;
953 }
954
955 /* Sync up CCB and status packet */
956 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
957 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
958 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
959 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
960
961 /*
962 * Start the command. If we are polling on completion, mark it
963 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
964 * without us noticing.
965 */
966 if (dontqueue != 0)
967 ccb->ccb_flg |= CCB_PRIVATE;
968
969 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
970 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
971 xs->error = XS_DRIVER_STUFFUP;
972 dpt_free_ccb(sc, ccb);
973 return (TRY_AGAIN_LATER);
974 }
975
976 if (dontqueue == 0)
977 return (SUCCESSFULLY_QUEUED);
978
979 /* Don't wait longer than this single command wants to wait */
980 if (dpt_poll(sc, ccb)) {
981 dpt_timeout(ccb);
982 /* Wait for abort to complete */
983 if (dpt_poll(sc, ccb))
984 dpt_timeout(ccb);
985 }
986
987 dpt_done_ccb(sc, ccb);
988 return (COMPLETE);
989 }
990
991 /*
992 * Specified CCB has timed out, abort it.
993 */
994 void
995 dpt_timeout(arg)
996 void *arg;
997 {
998 struct scsipi_link *sc_link;
999 struct scsipi_xfer *xs;
1000 struct dpt_softc *sc;
1001 struct dpt_ccb *ccb;
1002 int s;
1003
1004 ccb = arg;
1005 xs = ccb->ccb_xs;
1006 sc_link = xs->sc_link;
1007 sc = sc_link->adapter_softc;
1008
1009 scsi_print_addr(sc_link);
1010 printf("timed out (status:%02x aux status:%02x)",
1011 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1012
1013 s = splbio();
1014
1015 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1016 /* Abort timed out, reset the HBA */
1017 printf(" AGAIN, resetting HBA\n");
1018 dpt_outb(sc, HA_COMMAND, CP_RESET);
1019 DELAY(750000);
1020 } else {
1021 /* Abort the operation that has timed out */
1022 printf("\n");
1023 ccb->ccb_xs->error = XS_TIMEOUT;
1024 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1025 ccb->ccb_flg |= CCB_ABORT;
1026 /* Start the abort */
1027 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1028 CP_IMMEDIATE, CPI_SPEC_ABORT))
1029 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1030 }
1031
1032 splx(s);
1033 }
1034
1035 /*
1036 * Get inquiry data from the adapter.
1037 */
1038 void
1039 dpt_hba_inquire(sc, ei)
1040 struct dpt_softc *sc;
1041 struct eata_inquiry_data **ei;
1042 {
1043 struct dpt_ccb *ccb;
1044 struct eata_cp *cp;
1045 bus_dma_tag_t dmat;
1046
1047 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1048 dmat = sc->sc_dmat;
1049
1050 /* Get a CCB and mark as private */
1051 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1052 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1053
1054 ccb->ccb_flg |= CCB_PRIVATE;
1055 ccb->ccb_timeout = 200;
1056
1057 /* Put all the arguments into the CCB */
1058 cp = &ccb->ccb_eata_cp;
1059 cp->cp_ccbid = ccb->ccb_id;
1060 cp->cp_senselen = sizeof(ccb->ccb_sense);
1061 cp->cp_senseaddr = 0;
1062 cp->cp_stataddr = htobe32(sc->sc_stppa);
1063 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1064 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1065 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1066 cp->cp_ctl1 = 0;
1067 cp->cp_ctl2 = 0;
1068 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1069 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1070
1071 /* Put together the SCSI inquiry command */
1072 memset(&cp->cp_cdb_cmd, 0, 12); /* XXX */
1073 cp->cp_cdb_cmd = INQUIRY;
1074 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1075
1076 /* Sync up CCB, status packet and scratch area */
1077 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1078 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1079 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1080 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1081 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1082 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1083
1084 /* Start the command and poll on completion */
1085 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1086 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1087
1088 if (dpt_poll(sc, ccb))
1089 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1090
1091 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1092 ccb->ccb_scsi_status != SCSI_OK)
1093 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1094 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1095 ccb->ccb_scsi_status);
1096
1097 /* Sync up the DMA map and free CCB, returning */
1098 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1099 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1100 dpt_free_ccb(sc, ccb);
1101 }
1102