dpt.c revision 1.23 1 /* $NetBSD: dpt.c,v 1.23 2000/07/18 15:27:44 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.23 2000/07/18 15:27:44 ad Exp $");
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65 #include <sys/proc.h>
66 #include <sys/buf.h>
67 #include <sys/endian.h>
68
69 #include <machine/bswap.h>
70 #include <machine/bus.h>
71
72 #include <dev/scsipi/scsi_all.h>
73 #include <dev/scsipi/scsipi_all.h>
74 #include <dev/scsipi/scsiconf.h>
75
76 #include <dev/ic/dptreg.h>
77 #include <dev/ic/dptvar.h>
78
79 /* A default for our link struct */
80 static struct scsipi_device dpt_dev = {
81 NULL, /* Use default error handler */
82 NULL, /* have a queue, served by this */
83 NULL, /* have no async handler */
84 NULL, /* Use default 'done' routine */
85 };
86
87 static char *dpt_cname[] = {
88 "PM3334", "SmartRAID IV",
89 "PM3332", "SmartRAID IV",
90 "PM2144", "SmartCache IV",
91 "PM2044", "SmartCache IV",
92 "PM2142", "SmartCache IV",
93 "PM2042", "SmartCache IV",
94 "PM2041", "SmartCache IV",
95 "PM3224", "SmartRAID III",
96 "PM3222", "SmartRAID III",
97 "PM3021", "SmartRAID III",
98 "PM2124", "SmartCache III",
99 "PM2024", "SmartCache III",
100 "PM2122", "SmartCache III",
101 "PM2022", "SmartCache III",
102 "PM2021", "SmartCache III",
103 "SK2012", "SmartCache Plus",
104 "SK2011", "SmartCache Plus",
105 NULL, "unknown adapter, please report using send-pr(1)",
106 };
107
108 void *dpt_sdh; /* shutdown hook */
109
110 /*
111 * Handle an interrupt from the HBA.
112 */
113 int
114 dpt_intr(xxx_sc)
115 void *xxx_sc;
116 {
117 struct dpt_softc *sc;
118 struct dpt_ccb *ccb;
119 struct eata_sp *sp;
120 volatile int junk;
121
122 sc = xxx_sc;
123 sp = sc->sc_stp;
124
125 #ifdef DEBUG
126 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
127 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
128 return (1);
129 }
130 #endif
131
132 for (;;) {
133 /*
134 * HBA might have interrupted while we were dealing with the
135 * last completed command, since we ACK before we deal; keep
136 * polling.
137 */
138 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
139 break;
140
141 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
142 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
143
144 /* Might have looped before HBA can reset HBA_AUX_INTR */
145 if (sp->sp_ccbid == -1) {
146 DELAY(50);
147
148 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
149 return (0);
150
151 printf("%s: no status\n", sc->sc_dv.dv_xname);
152
153 /* Re-sync DMA map */
154 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
155 sc->sc_stpoff, sizeof(struct eata_sp),
156 BUS_DMASYNC_POSTREAD);
157 }
158
159 /* Make sure CCB ID from status packet is realistic */
160 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
161 /* Sync up DMA map and cache cmd status */
162 ccb = sc->sc_ccbs + sp->sp_ccbid;
163
164 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
165 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
166 BUS_DMASYNC_POSTWRITE);
167
168 ccb->ccb_hba_status = sp->sp_hba_status & 0x7F;
169 ccb->ccb_scsi_status = sp->sp_scsi_status;
170
171 /*
172 * Ack the interrupt and process the CCB. If this
173 * is a private CCB it's up to dpt_poll() to notice.
174 */
175 sp->sp_ccbid = -1;
176 ccb->ccb_flg |= CCB_INTR;
177 junk = dpt_inb(sc, HA_STATUS);
178 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
179 dpt_done_ccb(sc, ccb);
180 } else {
181 printf("%s: bogus status (returned CCB id %d)\n",
182 sc->sc_dv.dv_xname, sp->sp_ccbid);
183
184 /* Ack the interrupt */
185 sp->sp_ccbid = -1;
186 junk = dpt_inb(sc, HA_STATUS);
187 }
188 }
189
190 return (1);
191 }
192
193 /*
194 * Initialize and attach the HBA. This is the entry point from bus
195 * specific probe-and-attach code.
196 */
197 void
198 dpt_init(sc, intrstr)
199 struct dpt_softc *sc;
200 const char *intrstr;
201 {
202 struct eata_inquiry_data *ei;
203 int i, j, error, rseg, maxchannel, maxtarget;
204 bus_dma_segment_t seg;
205 struct eata_cfg *ec;
206 struct scsipi_link *link;
207 char model[16];
208
209 ec = &sc->sc_ec;
210
211 /* Allocate the CCB/status packet/scratch DMA map and load */
212 sc->sc_nccbs =
213 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
214 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
215 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
216 sc->sc_scrlen = DPT_SCRATCH_SIZE;
217 sc->sc_dmamapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
218 sc->sc_scrlen + sizeof(struct eata_sp);
219
220 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmamapsize, NBPG, 0,
221 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
222 printf("%s: unable to allocate CCBs, error = %d\n",
223 sc->sc_dv.dv_xname, error);
224 return;
225 }
226
227 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_dmamapsize,
228 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
229 printf("%s: unable to map CCBs, error = %d\n",
230 sc->sc_dv.dv_xname, error);
231 return;
232 }
233
234 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmamapsize,
235 sc->sc_dmamapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
236 printf("%s: unable to create CCB DMA map, error = %d\n",
237 sc->sc_dv.dv_xname, error);
238 return;
239 }
240
241 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
242 sc->sc_ccbs, sc->sc_dmamapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
243 printf("%s: unable to load CCB DMA map, error = %d\n",
244 sc->sc_dv.dv_xname, error);
245 return;
246 }
247
248 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
249 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
250 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
251 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
252 sc->sc_stp->sp_ccbid = -1;
253
254 /* Initialize the CCBs */
255 TAILQ_INIT(&sc->sc_free_ccb);
256 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
257
258 if (i == 0) {
259 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
260 return;
261 } else if (i != sc->sc_nccbs) {
262 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
263 sc->sc_nccbs);
264 sc->sc_nccbs = i;
265 }
266
267 /* Set shutdownhook before we start any device activity */
268 if (dpt_sdh == NULL)
269 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
270
271 /* Get the page 0 inquiry data from the HBA */
272 dpt_hba_inquire(sc, &ei);
273
274 /*
275 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
276 * dpt0: interrupting at irq 10
277 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
278 */
279 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
280 ;
281 ei->ei_vendor[i] = '\0';
282
283 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
284 model[i] = ei->ei_model[i];
285 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
286 model[i++] = ei->ei_model[i];
287 model[i] = '\0';
288
289 /* Find the cannonical name for the board */
290 for (i = 0; dpt_cname[i] != NULL; i += 2)
291 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
292 break;
293
294 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
295
296 if (intrstr != NULL)
297 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
298
299 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
300 EC_F3_MAX_CHANNEL_SHIFT;
301 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
302 EC_F3_MAX_TARGET_SHIFT;
303
304 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
305 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
306
307 for (i = 0; i <= maxchannel; i++) {
308 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
309 printf(" %d", sc->sc_hbaid[i]);
310 }
311 printf("\n");
312
313 /* Reset the SCSI bus */
314 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
315 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
316
317 /* Fill in the adapter, each link and attach in turn */
318 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
319 sc->sc_adapter.scsipi_minphys = dpt_minphys;
320
321 for (i = 0; i <= maxchannel; i++) {
322 link = &sc->sc_link[i];
323 link->scsipi_scsi.channel = i;
324 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
325 link->scsipi_scsi.max_lun = ec->ec_maxlun;
326 link->scsipi_scsi.max_target = maxtarget;
327 link->type = BUS_SCSI;
328 link->device = &dpt_dev;
329 link->adapter = &sc->sc_adapter;
330 link->adapter_softc = sc;
331 link->openings = sc->sc_nccbs; /* XXX */
332 config_found(&sc->sc_dv, link, scsiprint);
333 }
334 }
335
336 /*
337 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
338 * all data from it's cache and mark array groups as clean.
339 */
340 void
341 dpt_shutdown(xxx_sc)
342 void *xxx_sc;
343 {
344 extern struct cfdriver dpt_cd;
345 struct dpt_softc *sc;
346 int i;
347
348 printf("shutting down dpt devices...");
349
350 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
351 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
352 continue;
353 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
354 }
355
356 DELAY(5000*1000);
357 printf(" done\n");
358 }
359
360 /*
361 * Send an EATA command to the HBA.
362 */
363 int
364 dpt_cmd(sc, cp, addr, eatacmd, icmd)
365 struct dpt_softc *sc;
366 struct eata_cp *cp;
367 u_int32_t addr;
368 int eatacmd, icmd;
369 {
370 int i;
371
372 for (i = 20000; i; i--) {
373 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
374 break;
375 DELAY(50);
376 }
377
378 /* Not the most graceful way to handle this */
379 if (i == 0) {
380 printf("%s: HBA timeout on EATA command issue; aborting\n",
381 sc->sc_dv.dv_xname);
382 return (-1);
383 }
384
385 if (cp == NULL)
386 addr = 0;
387
388 dpt_outl(sc, HA_DMA_BASE, (u_int32_t)addr);
389
390 if (eatacmd == CP_IMMEDIATE) {
391 if (cp == NULL) {
392 /* XXX should really pass meaningful values */
393 dpt_outb(sc, HA_ICMD_CODE2, 0);
394 dpt_outb(sc, HA_ICMD_CODE1, 0);
395 }
396 dpt_outb(sc, HA_ICMD, icmd);
397 }
398
399 dpt_outb(sc, HA_COMMAND, eatacmd);
400 return (0);
401 }
402
403 /*
404 * Wait for the HBA status register to reach a specific state.
405 */
406 int
407 dpt_wait(sc, mask, state, ms)
408 struct dpt_softc *sc;
409 u_int8_t mask, state;
410 int ms;
411 {
412
413 for (ms *= 10; ms; ms--) {
414 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
415 return (0);
416 DELAY(100);
417 }
418 return (-1);
419 }
420
421 /*
422 * Wait for the specified CCB to finish. This is used when we may not be
423 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
424 * The timeout value from the CCB is used. This should only be used for
425 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
426 * a look at it.
427 */
428 int
429 dpt_poll(sc, ccb)
430 struct dpt_softc *sc;
431 struct dpt_ccb *ccb;
432 {
433 int i;
434
435 #ifdef DEBUG
436 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
437 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
438 #endif
439
440 if ((ccb->ccb_flg & CCB_INTR) != 0)
441 return (0);
442
443 for (i = ccb->ccb_timeout * 20; i; i--) {
444 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
445 dpt_intr(sc);
446 if ((ccb->ccb_flg & CCB_INTR) != 0)
447 return (0);
448 }
449 DELAY(50);
450 }
451
452 return (-1);
453 }
454
455 /*
456 * Read the EATA configuration from the HBA and perform some sanity checks.
457 */
458 int
459 dpt_readcfg(sc)
460 struct dpt_softc *sc;
461 {
462 struct eata_cfg *ec;
463 int i, j, stat;
464 u_int16_t *p;
465
466 ec = &sc->sc_ec;
467
468 /* Older firmware may puke if we talk to it too soon after reset */
469 dpt_outb(sc, HA_COMMAND, CP_RESET);
470 DELAY(750000);
471
472 for (i = 1000; i; i--) {
473 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
474 break;
475 DELAY(2000);
476 }
477
478 if (i == 0) {
479 printf("%s: HBA not ready after reset (hba status:%02x)\n",
480 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
481 return (-1);
482 }
483
484 while((((stat = dpt_inb(sc, HA_STATUS))
485 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
486 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
487 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
488 && dpt_wait(sc, HA_ST_BUSY, 0, 2000)) {
489 /* RAID drives still spinning up? */
490 if((dpt_inb(sc, HA_ERROR) != 'D')
491 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
492 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
493 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
494 return (-1);
495 }
496 }
497
498 /*
499 * Issue the read-config command and wait for the data to appear.
500 * XXX we shouldn't be doing this with PIO, but it makes it a lot
501 * easier as no DMA setup is required.
502 */
503 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
504 memset(ec, 0, sizeof(*ec));
505 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
506 sizeof(ec->ec_cfglen)) >> 1;
507 p = (u_int16_t *)ec;
508
509 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
510 printf("%s: cfg data didn't appear (hba status:%02x)\n",
511 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
512 return (-1);
513 }
514
515 /* Begin reading */
516 while (i--)
517 *p++ = dpt_inw(sc, HA_DATA);
518
519 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
520 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
521 - sizeof(ec->ec_cfglen)))
522 i = sizeof(struct eata_cfg)
523 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
524 - sizeof(ec->ec_cfglen);
525
526 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
527 sizeof(ec->ec_cfglen);
528 i >>= 1;
529
530 while (i--)
531 *p++ = dpt_inw(sc, HA_DATA);
532
533 /* Flush until we have read 512 bytes. */
534 i = (512 - j + 1) >> 1;
535 while (i--)
536 dpt_inw(sc, HA_DATA);
537
538 /* Defaults for older Firmware */
539 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
540 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
541
542 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
543 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
544 return (-1);
545 }
546
547 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
548 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
549 return (-1);
550 }
551
552 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
553 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
554 return (-1);
555 }
556
557 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
558 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
559 return (-1);
560 }
561
562 return (0);
563 }
564
565 /*
566 * Adjust the size of each I/O before it passes to the SCSI layer.
567 */
568 void
569 dpt_minphys(bp)
570 struct buf *bp;
571 {
572
573 if (bp->b_bcount > DPT_MAX_XFER)
574 bp->b_bcount = DPT_MAX_XFER;
575 minphys(bp);
576 }
577
578 /*
579 * Put a CCB onto the freelist.
580 */
581 void
582 dpt_free_ccb(sc, ccb)
583 struct dpt_softc *sc;
584 struct dpt_ccb *ccb;
585 {
586 int s;
587
588 s = splbio();
589 ccb->ccb_flg = 0;
590 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
591
592 /* Wake anybody waiting for a free ccb */
593 if (ccb->ccb_chain.tqe_next == 0)
594 wakeup(&sc->sc_free_ccb);
595 splx(s);
596 }
597
598 /*
599 * Initialize the specified CCB.
600 */
601 int
602 dpt_init_ccb(sc, ccb)
603 struct dpt_softc *sc;
604 struct dpt_ccb *ccb;
605 {
606 int error;
607
608 /* Create the DMA map for this CCB's data */
609 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
610 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
611 &ccb->ccb_dmamap_xfer);
612
613 if (error) {
614 printf("%s: can't create ccb dmamap (%d)\n",
615 sc->sc_dv.dv_xname, error);
616 return (error);
617 }
618
619 ccb->ccb_flg = 0;
620 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
621 CCB_OFF(sc, ccb);
622 return (0);
623 }
624
625 /*
626 * Create a set of CCBs and add them to the free list.
627 */
628 int
629 dpt_create_ccbs(sc, ccbstore, count)
630 struct dpt_softc *sc;
631 struct dpt_ccb *ccbstore;
632 int count;
633 {
634 struct dpt_ccb *ccb;
635 int i, error;
636
637 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
638
639 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
640 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
641 printf("%s: unable to init ccb, error = %d\n",
642 sc->sc_dv.dv_xname, error);
643 break;
644 }
645 ccb->ccb_id = i;
646 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
647 }
648
649 return (i);
650 }
651
652 /*
653 * Get a free ccb. If there are none, see if we can allocate a new one. If
654 * none are available right now and we are permitted to sleep, then wait
655 * until one becomes free, otherwise return an error.
656 */
657 struct dpt_ccb *
658 dpt_alloc_ccb(sc, flg)
659 struct dpt_softc *sc;
660 int flg;
661 {
662 struct dpt_ccb *ccb;
663 int s;
664
665 s = splbio();
666
667 for (;;) {
668 if ((ccb = TAILQ_FIRST(&sc->sc_free_ccb)) != NULL) {
669 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
670 break;
671 }
672 if ((flg & XS_CTL_NOSLEEP) != 0) {
673 ccb = NULL;
674 break;
675 }
676 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
677 }
678
679 splx(s);
680 return (ccb);
681 }
682
683 /*
684 * We have a CCB which has been processed by the HBA, now we look to see how
685 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
686 * passed here by dpt_intr().
687 */
688 void
689 dpt_done_ccb(sc, ccb)
690 struct dpt_softc *sc;
691 struct dpt_ccb *ccb;
692 {
693 struct scsipi_sense_data *s1, *s2;
694 struct scsipi_xfer *xs;
695 bus_dma_tag_t dmat;
696
697 dmat = sc->sc_dmat;
698 xs = ccb->ccb_xs;
699
700 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
701
702 /*
703 * If we were a data transfer, unload the map that described the
704 * data buffer.
705 */
706 if (xs->datalen != 0) {
707 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
708 ccb->ccb_dmamap_xfer->dm_mapsize,
709 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
710 BUS_DMASYNC_POSTWRITE);
711 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
712 }
713
714 if (xs->error == XS_NOERROR) {
715 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
716 switch (ccb->ccb_hba_status) {
717 case SP_HBA_ERROR_SEL_TO:
718 xs->error = XS_SELTIMEOUT;
719 break;
720 case SP_HBA_ERROR_RESET:
721 xs->error = XS_RESET;
722 break;
723 default: /* Other scsi protocol messes */
724 printf("%s: HBA status %x\n",
725 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
726 xs->error = XS_DRIVER_STUFFUP;
727 }
728 } else if (ccb->ccb_scsi_status != SCSI_OK) {
729 switch (ccb->ccb_scsi_status) {
730 case SCSI_CHECK:
731 s1 = &ccb->ccb_sense;
732 s2 = &xs->sense.scsi_sense;
733 *s2 = *s1;
734 xs->error = XS_SENSE;
735 break;
736 case SCSI_BUSY:
737 xs->error = XS_BUSY;
738 break;
739 default:
740 printf("%s: SCSI status %x\n",
741 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
742 xs->error = XS_DRIVER_STUFFUP;
743 }
744 } else
745 xs->resid = 0;
746
747 xs->status = ccb->ccb_scsi_status;
748 }
749
750 /* Free up the CCB and mark the command as done */
751 dpt_free_ccb(sc, ccb);
752 xs->xs_status |= XS_STS_DONE;
753 scsipi_done(xs);
754
755 /*
756 * If there are entries in the software queue, try to run the first
757 * one. We should be more or less guaranteed to succeed, since we
758 * just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling it
759 * with the first entry in the queue.
760 */
761 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
762 dpt_scsi_cmd(xs);
763 }
764
765 /*
766 * Start a SCSI command.
767 */
768 int
769 dpt_scsi_cmd(xs)
770 struct scsipi_xfer *xs;
771 {
772 int error, i, flags, s, fromqueue, dontqueue, nowait;
773 struct scsipi_link *sc_link;
774 struct dpt_softc *sc;
775 struct dpt_ccb *ccb;
776 struct eata_sg *sg;
777 struct eata_cp *cp;
778 bus_dma_tag_t dmat;
779 bus_dmamap_t xfer;
780
781 sc_link = xs->sc_link;
782 flags = xs->xs_control;
783 sc = sc_link->adapter_softc;
784 dmat = sc->sc_dmat;
785 fromqueue = 0;
786 dontqueue = 0;
787 nowait = 0;
788
789 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
790
791 /* Protect the queue */
792 s = splbio();
793
794 /*
795 * If we're running the queue from dpt_done_ccb(), we've been called
796 * with the first queue entry as our argument.
797 */
798 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
799 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
800 fromqueue = 1;
801 nowait = 1;
802 } else {
803 /* Cmds must be no more than 12 bytes for us */
804 if (xs->cmdlen > 12) {
805 splx(s);
806 xs->error = XS_DRIVER_STUFFUP;
807 return (COMPLETE);
808 }
809
810 /*
811 * XXX we can't reset devices just yet. Apparently some
812 * older firmware revisions don't even support it.
813 */
814 if ((flags & XS_CTL_RESET) != 0) {
815 xs->error = XS_DRIVER_STUFFUP;
816 return (COMPLETE);
817 }
818
819 /* Polled requests can't be queued for later */
820 dontqueue = flags & XS_CTL_POLL;
821
822 /* If there are jobs in the queue, run them first */
823 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
824 /*
825 * If we can't queue we abort, since we must
826 * preserve the queue order.
827 */
828 if (dontqueue) {
829 splx(s);
830 xs->error = XS_DRIVER_STUFFUP;
831 return (TRY_AGAIN_LATER);
832 }
833
834 /* Swap with the first queue entry. */
835 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
836 xs = TAILQ_FIRST(&sc->sc_queue);
837 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
838 fromqueue = 1;
839 }
840 }
841
842 /* Get a CCB */
843 if (nowait)
844 flags |= XS_CTL_NOSLEEP;
845 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
846 /* If we can't queue, we lose */
847 if (dontqueue) {
848 splx(s);
849 xs->error = XS_DRIVER_STUFFUP;
850 return (TRY_AGAIN_LATER);
851 }
852
853 /*
854 * Stuff request into the queue, in front if we came off
855 * it in the first place.
856 */
857 if (fromqueue)
858 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
859 else
860 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
861 splx(s);
862 return (SUCCESSFULLY_QUEUED);
863 }
864
865 splx(s);
866
867 ccb->ccb_xs = xs;
868 ccb->ccb_timeout = xs->timeout;
869
870 cp = &ccb->ccb_eata_cp;
871 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
872 cp->cp_ccbid = ccb->ccb_id;
873 cp->cp_senselen = sizeof(ccb->ccb_sense);
874 cp->cp_stataddr = htobe32(sc->sc_stppa);
875 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
876 cp->cp_ctl1 = 0;
877 cp->cp_ctl2 = 0;
878 cp->cp_ctl3 = sc_link->scsipi_scsi.target << CP_C3_ID_SHIFT;
879 cp->cp_ctl3 |= sc_link->scsipi_scsi.channel << CP_C3_CHANNEL_SHIFT;
880 cp->cp_ctl4 = sc_link->scsipi_scsi.lun << CP_C4_LUN_SHIFT;
881 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
882
883 if ((flags & XS_CTL_DATA_IN) != 0)
884 cp->cp_ctl0 |= CP_C0_DATA_IN;
885 if ((flags & XS_CTL_DATA_OUT) != 0)
886 cp->cp_ctl0 |= CP_C0_DATA_OUT;
887 if (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
888 sc_link->scsipi_scsi.target)
889 cp->cp_ctl0 |= CP_C0_INTERPRET;
890
891 /* Synchronous xfers musn't write-back through the cache */
892 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
893 cp->cp_ctl2 |= CP_C2_NO_CACHE;
894
895 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
896 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
897
898 if (xs->datalen != 0) {
899 xfer = ccb->ccb_dmamap_xfer;
900 #ifdef TFS
901 if ((flags & XS_CTL_DATA_UIO) != 0) {
902 error = bus_dmamap_load_uio(dmat, xfer,
903 (struct uio *)xs->data, (flags & XS_CTL_NOSLEEP) ?
904 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
905 } else
906 #endif /* TFS */
907 {
908 error = bus_dmamap_load(dmat, xfer, xs->data,
909 xs->datalen, NULL, (flags & XS_CTL_NOSLEEP) ?
910 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
911 }
912
913 if (error) {
914 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
915 if (error == EFBIG)
916 printf("more than %d dma segs\n", DPT_SG_SIZE);
917 else
918 printf("error %d loading dma map\n", error);
919
920 xs->error = XS_DRIVER_STUFFUP;
921 dpt_free_ccb(sc, ccb);
922 return (COMPLETE);
923 }
924
925 bus_dmamap_sync(dmat, xfer, 0, xfer->dm_mapsize,
926 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
927 BUS_DMASYNC_PREWRITE);
928
929 /* Don't bother using scatter/gather for just 1 segment */
930 if (xfer->dm_nsegs == 1) {
931 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
932 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
933 } else {
934 /*
935 * Load the hardware scatter/gather map with the
936 * contents of the DMA map.
937 */
938 sg = ccb->ccb_sg;
939 for (i = 0; i < xfer->dm_nsegs; i++, sg++) {
940 sg->sg_addr = htobe32(xfer->dm_segs[i].ds_addr);
941 sg->sg_len = htobe32(xfer->dm_segs[i].ds_len);
942 }
943 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
944 sc->sc_dmamap->dm_segs[0].ds_addr +
945 offsetof(struct dpt_ccb, ccb_sg));
946 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
947 cp->cp_ctl0 |= CP_C0_SCATTER;
948 }
949 } else {
950 cp->cp_dataaddr = 0;
951 cp->cp_datalen = 0;
952 }
953
954 /* Sync up CCB and status packet */
955 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
956 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
957 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
958 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
959
960 /*
961 * Start the command. If we are polling on completion, mark it
962 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
963 * without us noticing.
964 */
965 if (dontqueue != 0)
966 ccb->ccb_flg |= CCB_PRIVATE;
967
968 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
969 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
970 xs->error = XS_DRIVER_STUFFUP;
971 dpt_free_ccb(sc, ccb);
972 return (TRY_AGAIN_LATER);
973 }
974
975 if (dontqueue == 0)
976 return (SUCCESSFULLY_QUEUED);
977
978 /* Don't wait longer than this single command wants to wait */
979 if (dpt_poll(sc, ccb)) {
980 dpt_timeout(ccb);
981 /* Wait for abort to complete */
982 if (dpt_poll(sc, ccb))
983 dpt_timeout(ccb);
984 }
985
986 dpt_done_ccb(sc, ccb);
987 return (COMPLETE);
988 }
989
990 /*
991 * Specified CCB has timed out, abort it.
992 */
993 void
994 dpt_timeout(arg)
995 void *arg;
996 {
997 struct scsipi_link *sc_link;
998 struct scsipi_xfer *xs;
999 struct dpt_softc *sc;
1000 struct dpt_ccb *ccb;
1001 int s;
1002
1003 ccb = arg;
1004 xs = ccb->ccb_xs;
1005 sc_link = xs->sc_link;
1006 sc = sc_link->adapter_softc;
1007
1008 scsi_print_addr(sc_link);
1009 printf("timed out (status:%02x aux status:%02x)",
1010 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1011
1012 s = splbio();
1013
1014 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1015 /* Abort timed out, reset the HBA */
1016 printf(" AGAIN, resetting HBA\n");
1017 dpt_outb(sc, HA_COMMAND, CP_RESET);
1018 DELAY(750000);
1019 } else {
1020 /* Abort the operation that has timed out */
1021 printf("\n");
1022 ccb->ccb_xs->error = XS_TIMEOUT;
1023 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1024 ccb->ccb_flg |= CCB_ABORT;
1025 /* Start the abort */
1026 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1027 CP_IMMEDIATE, CPI_SPEC_ABORT))
1028 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1029 }
1030
1031 splx(s);
1032 }
1033
1034 /*
1035 * Get inquiry data from the adapter.
1036 */
1037 void
1038 dpt_hba_inquire(sc, ei)
1039 struct dpt_softc *sc;
1040 struct eata_inquiry_data **ei;
1041 {
1042 struct dpt_ccb *ccb;
1043 struct eata_cp *cp;
1044 bus_dma_tag_t dmat;
1045
1046 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1047 dmat = sc->sc_dmat;
1048
1049 /* Get a CCB and mark as private */
1050 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1051 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1052
1053 ccb->ccb_flg |= CCB_PRIVATE;
1054 ccb->ccb_timeout = 200;
1055
1056 /* Put all the arguments into the CCB */
1057 cp = &ccb->ccb_eata_cp;
1058 cp->cp_ccbid = ccb->ccb_id;
1059 cp->cp_senselen = sizeof(ccb->ccb_sense);
1060 cp->cp_senseaddr = 0;
1061 cp->cp_stataddr = htobe32(sc->sc_stppa);
1062 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1063 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1064 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1065 cp->cp_ctl1 = 0;
1066 cp->cp_ctl2 = 0;
1067 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1068 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1069
1070 /* Put together the SCSI inquiry command */
1071 memset(&cp->cp_cdb_cmd, 0, 12); /* XXX */
1072 cp->cp_cdb_cmd = INQUIRY;
1073 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1074
1075 /* Sync up CCB, status packet and scratch area */
1076 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1077 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1078 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1079 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1080 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1081 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1082
1083 /* Start the command and poll on completion */
1084 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1085 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1086
1087 if (dpt_poll(sc, ccb))
1088 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1089
1090 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1091 ccb->ccb_scsi_status != SCSI_OK)
1092 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1093 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1094 ccb->ccb_scsi_status);
1095
1096 /* Sync up the DMA map and free CCB, returning */
1097 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1098 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1099 dpt_free_ccb(sc, ccb);
1100 }
1101