dpt.c revision 1.27 1 /* $NetBSD: dpt.c,v 1.27 2001/03/10 13:30:55 briggs Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Portions of this code fall under the following copyright:
42 *
43 * Originally written by Julian Elischer (julian (at) tfs.com)
44 * for TRW Financial Systems for use under the MACH(2.5) operating system.
45 *
46 * TRW Financial Systems, in accordance with their agreement with Carnegie
47 * Mellon University, makes this software available to CMU to distribute
48 * or use in any manner that they see fit as long as this message is kept with
49 * the software. For this reason TFS also grants any other persons or
50 * organisations permission to use or modify this software.
51 *
52 * TFS supplies this software to be publicly redistributed
53 * on the understanding that TFS is not responsible for the correct
54 * functioning of this software in any circumstances.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.27 2001/03/10 13:30:55 briggs Exp $");
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65 #include <sys/proc.h>
66 #include <sys/buf.h>
67 #include <sys/endian.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <machine/bswap.h>
72 #include <machine/bus.h>
73
74 #include <dev/scsipi/scsi_all.h>
75 #include <dev/scsipi/scsipi_all.h>
76 #include <dev/scsipi/scsiconf.h>
77
78 #include <dev/ic/dptreg.h>
79 #include <dev/ic/dptvar.h>
80
81 /* A default for our link struct */
82 static struct scsipi_device dpt_dev = {
83 NULL, /* Use default error handler */
84 NULL, /* have a queue, served by this */
85 NULL, /* have no async handler */
86 NULL, /* Use default 'done' routine */
87 };
88
89 static char *dpt_cname[] = {
90 "PM3334", "SmartRAID IV",
91 "PM3332", "SmartRAID IV",
92 "PM2144", "SmartCache IV",
93 "PM2044", "SmartCache IV",
94 "PM2142", "SmartCache IV",
95 "PM2042", "SmartCache IV",
96 "PM2041", "SmartCache IV",
97 "PM3224", "SmartRAID III",
98 "PM3222", "SmartRAID III",
99 "PM3021", "SmartRAID III",
100 "PM2124", "SmartCache III",
101 "PM2024", "SmartCache III",
102 "PM2122", "SmartCache III",
103 "PM2022", "SmartCache III",
104 "PM2021", "SmartCache III",
105 "SK2012", "SmartCache Plus",
106 "SK2011", "SmartCache Plus",
107 NULL, "unknown adapter, please report using send-pr(1)",
108 };
109
110 void *dpt_sdh; /* shutdown hook */
111
112 /*
113 * Handle an interrupt from the HBA.
114 */
115 int
116 dpt_intr(xxx_sc)
117 void *xxx_sc;
118 {
119 struct dpt_softc *sc;
120 struct dpt_ccb *ccb;
121 struct eata_sp *sp;
122 volatile int junk;
123
124 sc = xxx_sc;
125 sp = sc->sc_stp;
126
127 #ifdef DEBUG
128 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
129 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
130 return (1);
131 }
132 #endif
133
134 for (;;) {
135 /*
136 * HBA might have interrupted while we were dealing with the
137 * last completed command, since we ACK before we deal; keep
138 * polling.
139 */
140 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
141 break;
142
143 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
144 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
145
146 /* Might have looped before HBA can reset HBA_AUX_INTR */
147 if (sp->sp_ccbid == -1) {
148 DELAY(50);
149
150 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
151 return (0);
152
153 printf("%s: no status\n", sc->sc_dv.dv_xname);
154
155 /* Re-sync DMA map */
156 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
157 sc->sc_stpoff, sizeof(struct eata_sp),
158 BUS_DMASYNC_POSTREAD);
159 }
160
161 /* Make sure CCB ID from status packet is realistic */
162 if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
163 /* Sync up DMA map and cache cmd status */
164 ccb = sc->sc_ccbs + sp->sp_ccbid;
165
166 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
167 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
168 BUS_DMASYNC_POSTWRITE);
169
170 ccb->ccb_hba_status = sp->sp_hba_status & 0x7F;
171 ccb->ccb_scsi_status = sp->sp_scsi_status;
172
173 /*
174 * Ack the interrupt and process the CCB. If this
175 * is a private CCB it's up to dpt_poll() to notice.
176 */
177 sp->sp_ccbid = -1;
178 ccb->ccb_flg |= CCB_INTR;
179 junk = dpt_inb(sc, HA_STATUS);
180 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
181 dpt_done_ccb(sc, ccb);
182 } else {
183 printf("%s: bogus status (returned CCB id %d)\n",
184 sc->sc_dv.dv_xname, sp->sp_ccbid);
185
186 /* Ack the interrupt */
187 sp->sp_ccbid = -1;
188 junk = dpt_inb(sc, HA_STATUS);
189 }
190 }
191
192 return (1);
193 }
194
195 /*
196 * Initialize and attach the HBA. This is the entry point from bus
197 * specific probe-and-attach code.
198 */
199 void
200 dpt_init(sc, intrstr)
201 struct dpt_softc *sc;
202 const char *intrstr;
203 {
204 struct eata_inquiry_data *ei;
205 int i, j, error, rseg, maxchannel, maxtarget;
206 bus_dma_segment_t seg;
207 struct eata_cfg *ec;
208 struct scsipi_link *link;
209 char model[16];
210
211 ec = &sc->sc_ec;
212
213 /* Allocate the CCB/status packet/scratch DMA map and load */
214 sc->sc_nccbs =
215 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
216 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
217 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
218 sc->sc_scrlen = DPT_SCRATCH_SIZE;
219 sc->sc_dmamapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
220 sc->sc_scrlen + sizeof(struct eata_sp);
221
222 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmamapsize,
223 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
224 printf("%s: unable to allocate CCBs, error = %d\n",
225 sc->sc_dv.dv_xname, error);
226 return;
227 }
228
229 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_dmamapsize,
230 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
231 printf("%s: unable to map CCBs, error = %d\n",
232 sc->sc_dv.dv_xname, error);
233 return;
234 }
235
236 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmamapsize,
237 sc->sc_dmamapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
238 printf("%s: unable to create CCB DMA map, error = %d\n",
239 sc->sc_dv.dv_xname, error);
240 return;
241 }
242
243 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
244 sc->sc_ccbs, sc->sc_dmamapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
245 printf("%s: unable to load CCB DMA map, error = %d\n",
246 sc->sc_dv.dv_xname, error);
247 return;
248 }
249
250 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
251 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
252 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
253 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
254 sc->sc_stp->sp_ccbid = -1;
255
256 /* Initialize the CCBs */
257 TAILQ_INIT(&sc->sc_free_ccb);
258 i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
259
260 if (i == 0) {
261 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
262 return;
263 } else if (i != sc->sc_nccbs) {
264 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
265 sc->sc_nccbs);
266 sc->sc_nccbs = i;
267 }
268
269 /* Set shutdownhook before we start any device activity */
270 if (dpt_sdh == NULL)
271 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
272
273 /* Get the page 0 inquiry data from the HBA */
274 dpt_hba_inquire(sc, &ei);
275
276 /*
277 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
278 * dpt0: interrupting at irq 10
279 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
280 */
281 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
282 ;
283 ei->ei_vendor[i] = '\0';
284
285 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
286 model[i] = ei->ei_model[i];
287 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++)
288 model[i] = ei->ei_model[i];
289 model[i] = '\0';
290
291 /* Find the cannonical name for the board */
292 for (i = 0; dpt_cname[i] != NULL; i += 2)
293 if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
294 break;
295
296 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
297
298 if (intrstr != NULL)
299 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
300
301 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
302 EC_F3_MAX_CHANNEL_SHIFT;
303 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
304 EC_F3_MAX_TARGET_SHIFT;
305
306 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
307 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
308
309 for (i = 0; i <= maxchannel; i++) {
310 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
311 printf(" %d", sc->sc_hbaid[i]);
312 }
313 printf("\n");
314
315 /* Reset the SCSI bus */
316 if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
317 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
318
319 /* Fill in the adapter, each link and attach in turn */
320 sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
321 sc->sc_adapter.scsipi_minphys = dpt_minphys;
322
323 for (i = 0; i <= maxchannel; i++) {
324 link = &sc->sc_link[i];
325 link->scsipi_scsi.channel = i;
326 link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
327 link->scsipi_scsi.max_lun = ec->ec_maxlun;
328 link->scsipi_scsi.max_target = maxtarget;
329 link->type = BUS_SCSI;
330 link->device = &dpt_dev;
331 link->adapter = &sc->sc_adapter;
332 link->adapter_softc = sc;
333 link->openings = sc->sc_nccbs; /* XXX */
334 config_found(&sc->sc_dv, link, scsiprint);
335 }
336 }
337
338 /*
339 * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
340 * all data from it's cache and mark array groups as clean.
341 */
342 void
343 dpt_shutdown(xxx_sc)
344 void *xxx_sc;
345 {
346 extern struct cfdriver dpt_cd;
347 struct dpt_softc *sc;
348 int i;
349
350 printf("shutting down dpt devices...");
351
352 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
353 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
354 continue;
355 dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
356 }
357
358 DELAY(5000*1000);
359 printf(" done\n");
360 }
361
362 /*
363 * Send an EATA command to the HBA.
364 */
365 int
366 dpt_cmd(sc, cp, addr, eatacmd, icmd)
367 struct dpt_softc *sc;
368 struct eata_cp *cp;
369 u_int32_t addr;
370 int eatacmd, icmd;
371 {
372 int i;
373
374 for (i = 20000; i; i--) {
375 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
376 break;
377 DELAY(50);
378 }
379
380 /* Not the most graceful way to handle this */
381 if (i == 0) {
382 printf("%s: HBA timeout on EATA command issue; aborting\n",
383 sc->sc_dv.dv_xname);
384 return (-1);
385 }
386
387 if (cp == NULL)
388 addr = 0;
389
390 dpt_outb(sc, HA_DMA_BASE+0, (addr ) & 0xff);
391 dpt_outb(sc, HA_DMA_BASE+1, (addr>>8 ) & 0xff);
392 dpt_outb(sc, HA_DMA_BASE+2, (addr>>16) & 0xff);
393 dpt_outb(sc, HA_DMA_BASE+3, (addr>>24) & 0xff);
394
395 if (eatacmd == CP_IMMEDIATE) {
396 if (cp == NULL) {
397 /* XXX should really pass meaningful values */
398 dpt_outb(sc, HA_ICMD_CODE2, 0);
399 dpt_outb(sc, HA_ICMD_CODE1, 0);
400 }
401 dpt_outb(sc, HA_ICMD, icmd);
402 }
403
404 dpt_outb(sc, HA_COMMAND, eatacmd);
405 return (0);
406 }
407
408 /*
409 * Wait for the HBA status register to reach a specific state.
410 */
411 int
412 dpt_wait(sc, mask, state, ms)
413 struct dpt_softc *sc;
414 u_int8_t mask, state;
415 int ms;
416 {
417
418 for (ms *= 10; ms; ms--) {
419 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
420 return (0);
421 DELAY(100);
422 }
423 return (-1);
424 }
425
426 /*
427 * Wait for the specified CCB to finish. This is used when we may not be
428 * able to sleep and/or interrupts are disabled (eg autoconfiguration).
429 * The timeout value from the CCB is used. This should only be used for
430 * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
431 * a look at it.
432 */
433 int
434 dpt_poll(sc, ccb)
435 struct dpt_softc *sc;
436 struct dpt_ccb *ccb;
437 {
438 int i;
439
440 #ifdef DEBUG
441 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
442 panic("dpt_poll: called for non-CCB_PRIVATE request\n");
443 #endif
444
445 if ((ccb->ccb_flg & CCB_INTR) != 0)
446 return (0);
447
448 for (i = ccb->ccb_timeout * 20; i; i--) {
449 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
450 dpt_intr(sc);
451 if ((ccb->ccb_flg & CCB_INTR) != 0)
452 return (0);
453 }
454 DELAY(50);
455 }
456
457 return (-1);
458 }
459
460 /*
461 * Read the EATA configuration from the HBA and perform some sanity checks.
462 */
463 int
464 dpt_readcfg(sc)
465 struct dpt_softc *sc;
466 {
467 struct eata_cfg *ec;
468 int i, j, stat;
469 u_int16_t *p;
470
471 ec = &sc->sc_ec;
472
473 /* Older firmware may puke if we talk to it too soon after reset */
474 dpt_outb(sc, HA_COMMAND, CP_RESET);
475 DELAY(750000);
476
477 for (i = 1000; i; i--) {
478 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
479 break;
480 DELAY(2000);
481 }
482
483 if (i == 0) {
484 printf("%s: HBA not ready after reset (hba status:%02x)\n",
485 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
486 return (-1);
487 }
488
489 while((((stat = dpt_inb(sc, HA_STATUS))
490 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
491 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
492 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
493 && dpt_wait(sc, HA_ST_BUSY, 0, 2000)) {
494 /* RAID drives still spinning up? */
495 if((dpt_inb(sc, HA_ERROR) != 'D')
496 || (dpt_inb(sc, HA_ERROR + 1) != 'P')
497 || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
498 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
499 return (-1);
500 }
501 }
502
503 /*
504 * Issue the read-config command and wait for the data to appear.
505 * XXX we shouldn't be doing this with PIO, but it makes it a lot
506 * easier as no DMA setup is required.
507 */
508 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
509 memset(ec, 0, sizeof(*ec));
510 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
511 sizeof(ec->ec_cfglen)) >> 1;
512 p = (u_int16_t *)ec;
513
514 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
515 printf("%s: cfg data didn't appear (hba status:%02x)\n",
516 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
517 return (-1);
518 }
519
520 /* Begin reading */
521 while (i--)
522 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
523
524 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
525 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
526 - sizeof(ec->ec_cfglen)))
527 i = sizeof(struct eata_cfg)
528 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
529 - sizeof(ec->ec_cfglen);
530
531 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
532 sizeof(ec->ec_cfglen);
533 i >>= 1;
534
535 while (i--)
536 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
537
538 /* Flush until we have read 512 bytes. */
539 i = (512 - j + 1) >> 1;
540 while (i--)
541 dpt_inw(sc, HA_DATA);
542
543 /* Defaults for older Firmware */
544 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
545 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
546
547 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
548 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
549 return (-1);
550 }
551
552 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
553 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
554 return (-1);
555 }
556
557 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
558 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
559 return (-1);
560 }
561
562 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
563 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
564 return (-1);
565 }
566
567 return (0);
568 }
569
570 /*
571 * Adjust the size of each I/O before it passes to the SCSI layer.
572 */
573 void
574 dpt_minphys(bp)
575 struct buf *bp;
576 {
577
578 if (bp->b_bcount > DPT_MAX_XFER)
579 bp->b_bcount = DPT_MAX_XFER;
580 minphys(bp);
581 }
582
583 /*
584 * Put a CCB onto the freelist.
585 */
586 void
587 dpt_free_ccb(sc, ccb)
588 struct dpt_softc *sc;
589 struct dpt_ccb *ccb;
590 {
591 int s;
592
593 s = splbio();
594 ccb->ccb_flg = 0;
595 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
596
597 /* Wake anybody waiting for a free ccb */
598 if (ccb->ccb_chain.tqe_next == 0)
599 wakeup(&sc->sc_free_ccb);
600 splx(s);
601 }
602
603 /*
604 * Initialize the specified CCB.
605 */
606 int
607 dpt_init_ccb(sc, ccb)
608 struct dpt_softc *sc;
609 struct dpt_ccb *ccb;
610 {
611 int error;
612
613 /* Create the DMA map for this CCB's data */
614 error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
615 DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
616 &ccb->ccb_dmamap_xfer);
617
618 if (error) {
619 printf("%s: can't create ccb dmamap (%d)\n",
620 sc->sc_dv.dv_xname, error);
621 return (error);
622 }
623
624 ccb->ccb_flg = 0;
625 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
626 CCB_OFF(sc, ccb);
627 return (0);
628 }
629
630 /*
631 * Create a set of CCBs and add them to the free list.
632 */
633 int
634 dpt_create_ccbs(sc, ccbstore, count)
635 struct dpt_softc *sc;
636 struct dpt_ccb *ccbstore;
637 int count;
638 {
639 struct dpt_ccb *ccb;
640 int i, error;
641
642 memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
643
644 for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
645 if ((error = dpt_init_ccb(sc, ccb)) != 0) {
646 printf("%s: unable to init ccb, error = %d\n",
647 sc->sc_dv.dv_xname, error);
648 break;
649 }
650 ccb->ccb_id = i;
651 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
652 }
653
654 return (i);
655 }
656
657 /*
658 * Get a free ccb. If there are none, see if we can allocate a new one. If
659 * none are available right now and we are permitted to sleep, then wait
660 * until one becomes free, otherwise return an error.
661 */
662 struct dpt_ccb *
663 dpt_alloc_ccb(sc, flg)
664 struct dpt_softc *sc;
665 int flg;
666 {
667 struct dpt_ccb *ccb;
668 int s;
669
670 s = splbio();
671
672 for (;;) {
673 if ((ccb = TAILQ_FIRST(&sc->sc_free_ccb)) != NULL) {
674 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
675 break;
676 }
677 if ((flg & XS_CTL_NOSLEEP) != 0) {
678 ccb = NULL;
679 break;
680 }
681 tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
682 }
683
684 splx(s);
685 return (ccb);
686 }
687
688 /*
689 * We have a CCB which has been processed by the HBA, now we look to see how
690 * the operation went. CCBs marked with CCB_PRIVATE are not automatically
691 * passed here by dpt_intr().
692 */
693 void
694 dpt_done_ccb(sc, ccb)
695 struct dpt_softc *sc;
696 struct dpt_ccb *ccb;
697 {
698 struct scsipi_sense_data *s1, *s2;
699 struct scsipi_xfer *xs;
700 bus_dma_tag_t dmat;
701
702 dmat = sc->sc_dmat;
703 xs = ccb->ccb_xs;
704
705 SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
706
707 /*
708 * If we were a data transfer, unload the map that described the
709 * data buffer.
710 */
711 if (xs->datalen != 0) {
712 bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
713 ccb->ccb_dmamap_xfer->dm_mapsize,
714 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
715 BUS_DMASYNC_POSTWRITE);
716 bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
717 }
718
719 if (xs->error == XS_NOERROR) {
720 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
721 switch (ccb->ccb_hba_status) {
722 case SP_HBA_ERROR_SEL_TO:
723 xs->error = XS_SELTIMEOUT;
724 break;
725 case SP_HBA_ERROR_RESET:
726 xs->error = XS_RESET;
727 break;
728 default: /* Other scsi protocol messes */
729 printf("%s: HBA status %x\n",
730 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
731 xs->error = XS_DRIVER_STUFFUP;
732 }
733 } else if (ccb->ccb_scsi_status != SCSI_OK) {
734 switch (ccb->ccb_scsi_status) {
735 case SCSI_CHECK:
736 s1 = &ccb->ccb_sense;
737 s2 = &xs->sense.scsi_sense;
738 *s2 = *s1;
739 xs->error = XS_SENSE;
740 break;
741 case SCSI_BUSY:
742 xs->error = XS_BUSY;
743 break;
744 default:
745 printf("%s: SCSI status %x\n",
746 sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
747 xs->error = XS_DRIVER_STUFFUP;
748 }
749 } else
750 xs->resid = 0;
751
752 xs->status = ccb->ccb_scsi_status;
753 }
754
755 /* Free up the CCB and mark the command as done */
756 dpt_free_ccb(sc, ccb);
757 xs->xs_status |= XS_STS_DONE;
758 scsipi_done(xs);
759
760 /*
761 * If there are entries in the software queue, try to run the first
762 * one. We should be more or less guaranteed to succeed, since we
763 * just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling it
764 * with the first entry in the queue.
765 */
766 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
767 dpt_scsi_cmd(xs);
768 }
769
770 /*
771 * Start a SCSI command.
772 */
773 int
774 dpt_scsi_cmd(xs)
775 struct scsipi_xfer *xs;
776 {
777 int error, i, flags, s, fromqueue, dontqueue, nowait;
778 struct scsipi_link *sc_link;
779 struct dpt_softc *sc;
780 struct dpt_ccb *ccb;
781 struct eata_sg *sg;
782 struct eata_cp *cp;
783 bus_dma_tag_t dmat;
784 bus_dmamap_t xfer;
785
786 sc_link = xs->sc_link;
787 flags = xs->xs_control;
788 sc = sc_link->adapter_softc;
789 dmat = sc->sc_dmat;
790 fromqueue = 0;
791 dontqueue = 0;
792 nowait = 0;
793
794 SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
795
796 /* Protect the queue */
797 s = splbio();
798
799 /*
800 * If we're running the queue from dpt_done_ccb(), we've been called
801 * with the first queue entry as our argument.
802 */
803 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
804 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
805 fromqueue = 1;
806 nowait = 1;
807 } else {
808 /* Cmds must be no more than 12 bytes for us */
809 if (xs->cmdlen > 12) {
810 splx(s);
811 xs->error = XS_DRIVER_STUFFUP;
812 return (COMPLETE);
813 }
814
815 /*
816 * XXX we can't reset devices just yet. Apparently some
817 * older firmware revisions don't even support it.
818 */
819 if ((flags & XS_CTL_RESET) != 0) {
820 xs->error = XS_DRIVER_STUFFUP;
821 return (COMPLETE);
822 }
823
824 /* Polled requests can't be queued for later */
825 dontqueue = flags & XS_CTL_POLL;
826
827 /* If there are jobs in the queue, run them first */
828 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
829 /*
830 * If we can't queue we abort, since we must
831 * preserve the queue order.
832 */
833 if (dontqueue) {
834 splx(s);
835 xs->error = XS_DRIVER_STUFFUP;
836 return (TRY_AGAIN_LATER);
837 }
838
839 /* Swap with the first queue entry. */
840 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
841 xs = TAILQ_FIRST(&sc->sc_queue);
842 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
843 fromqueue = 1;
844 }
845 }
846
847 /* Get a CCB */
848 if (nowait)
849 flags |= XS_CTL_NOSLEEP;
850 if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
851 /* If we can't queue, we lose */
852 if (dontqueue) {
853 splx(s);
854 xs->error = XS_DRIVER_STUFFUP;
855 return (TRY_AGAIN_LATER);
856 }
857
858 /*
859 * Stuff request into the queue, in front if we came off
860 * it in the first place.
861 */
862 if (fromqueue)
863 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
864 else
865 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
866 splx(s);
867 return (SUCCESSFULLY_QUEUED);
868 }
869
870 splx(s);
871
872 ccb->ccb_xs = xs;
873 ccb->ccb_timeout = xs->timeout;
874
875 cp = &ccb->ccb_eata_cp;
876 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
877 cp->cp_ccbid = ccb->ccb_id;
878 cp->cp_senselen = sizeof(ccb->ccb_sense);
879 cp->cp_stataddr = htobe32(sc->sc_stppa);
880 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
881 cp->cp_ctl1 = 0;
882 cp->cp_ctl2 = 0;
883 cp->cp_ctl3 = sc_link->scsipi_scsi.target << CP_C3_ID_SHIFT;
884 cp->cp_ctl3 |= sc_link->scsipi_scsi.channel << CP_C3_CHANNEL_SHIFT;
885 cp->cp_ctl4 = sc_link->scsipi_scsi.lun << CP_C4_LUN_SHIFT;
886 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
887
888 if ((flags & XS_CTL_DATA_IN) != 0)
889 cp->cp_ctl0 |= CP_C0_DATA_IN;
890 if ((flags & XS_CTL_DATA_OUT) != 0)
891 cp->cp_ctl0 |= CP_C0_DATA_OUT;
892 if (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
893 sc_link->scsipi_scsi.target)
894 cp->cp_ctl0 |= CP_C0_INTERPRET;
895
896 /* Synchronous xfers musn't write-back through the cache */
897 if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
898 cp->cp_ctl2 |= CP_C2_NO_CACHE;
899
900 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
901 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
902
903 if (xs->datalen != 0) {
904 xfer = ccb->ccb_dmamap_xfer;
905 #ifdef TFS
906 if ((flags & XS_CTL_DATA_UIO) != 0) {
907 error = bus_dmamap_load_uio(dmat, xfer,
908 (struct uio *)xs->data, ((flags & XS_CTL_NOSLEEP) ?
909 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
910 BUS_DMA_STREAMING);
911 } else
912 #endif /* TFS */
913 {
914 error = bus_dmamap_load(dmat, xfer, xs->data,
915 xs->datalen, NULL, ((flags & XS_CTL_NOSLEEP) ?
916 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
917 BUS_DMA_STREAMING);
918 }
919
920 if (error) {
921 printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
922 if (error == EFBIG)
923 printf("more than %d dma segs\n", DPT_SG_SIZE);
924 else
925 printf("error %d loading dma map\n", error);
926
927 xs->error = XS_DRIVER_STUFFUP;
928 dpt_free_ccb(sc, ccb);
929 return (COMPLETE);
930 }
931
932 bus_dmamap_sync(dmat, xfer, 0, xfer->dm_mapsize,
933 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
934 BUS_DMASYNC_PREWRITE);
935
936 /* Don't bother using scatter/gather for just 1 segment */
937 if (xfer->dm_nsegs == 1) {
938 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
939 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
940 } else {
941 /*
942 * Load the hardware scatter/gather map with the
943 * contents of the DMA map.
944 */
945 sg = ccb->ccb_sg;
946 for (i = 0; i < xfer->dm_nsegs; i++, sg++) {
947 sg->sg_addr = htobe32(xfer->dm_segs[i].ds_addr);
948 sg->sg_len = htobe32(xfer->dm_segs[i].ds_len);
949 }
950 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
951 sc->sc_dmamap->dm_segs[0].ds_addr +
952 offsetof(struct dpt_ccb, ccb_sg));
953 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
954 cp->cp_ctl0 |= CP_C0_SCATTER;
955 }
956 } else {
957 cp->cp_dataaddr = 0;
958 cp->cp_datalen = 0;
959 }
960
961 /* Sync up CCB and status packet */
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
963 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
964 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
965 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
966
967 /*
968 * Start the command. If we are polling on completion, mark it
969 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
970 * without us noticing.
971 */
972 if (dontqueue != 0)
973 ccb->ccb_flg |= CCB_PRIVATE;
974
975 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
976 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
977 xs->error = XS_DRIVER_STUFFUP;
978 dpt_free_ccb(sc, ccb);
979 return (TRY_AGAIN_LATER);
980 }
981
982 if (dontqueue == 0)
983 return (SUCCESSFULLY_QUEUED);
984
985 /* Don't wait longer than this single command wants to wait */
986 if (dpt_poll(sc, ccb)) {
987 dpt_timeout(ccb);
988 /* Wait for abort to complete */
989 if (dpt_poll(sc, ccb))
990 dpt_timeout(ccb);
991 }
992
993 dpt_done_ccb(sc, ccb);
994 return (COMPLETE);
995 }
996
997 /*
998 * Specified CCB has timed out, abort it.
999 */
1000 void
1001 dpt_timeout(arg)
1002 void *arg;
1003 {
1004 struct scsipi_link *sc_link;
1005 struct scsipi_xfer *xs;
1006 struct dpt_softc *sc;
1007 struct dpt_ccb *ccb;
1008 int s;
1009
1010 ccb = arg;
1011 xs = ccb->ccb_xs;
1012 sc_link = xs->sc_link;
1013 sc = sc_link->adapter_softc;
1014
1015 scsi_print_addr(sc_link);
1016 printf("timed out (status:%02x aux status:%02x)",
1017 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
1018
1019 s = splbio();
1020
1021 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
1022 /* Abort timed out, reset the HBA */
1023 printf(" AGAIN, resetting HBA\n");
1024 dpt_outb(sc, HA_COMMAND, CP_RESET);
1025 DELAY(750000);
1026 } else {
1027 /* Abort the operation that has timed out */
1028 printf("\n");
1029 ccb->ccb_xs->error = XS_TIMEOUT;
1030 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
1031 ccb->ccb_flg |= CCB_ABORT;
1032 /* Start the abort */
1033 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
1034 CP_IMMEDIATE, CPI_SPEC_ABORT))
1035 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1036 }
1037
1038 splx(s);
1039 }
1040
1041 /*
1042 * Get inquiry data from the adapter.
1043 */
1044 void
1045 dpt_hba_inquire(sc, ei)
1046 struct dpt_softc *sc;
1047 struct eata_inquiry_data **ei;
1048 {
1049 struct dpt_ccb *ccb;
1050 struct eata_cp *cp;
1051 bus_dma_tag_t dmat;
1052
1053 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1054 dmat = sc->sc_dmat;
1055
1056 /* Get a CCB and mark as private */
1057 if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
1058 panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
1059
1060 ccb->ccb_flg |= CCB_PRIVATE;
1061 ccb->ccb_timeout = 200;
1062
1063 /* Put all the arguments into the CCB */
1064 cp = &ccb->ccb_eata_cp;
1065 cp->cp_ccbid = ccb->ccb_id;
1066 cp->cp_senselen = sizeof(ccb->ccb_sense);
1067 cp->cp_senseaddr = 0;
1068 cp->cp_stataddr = htobe32(sc->sc_stppa);
1069 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1070 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1071 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1072 cp->cp_ctl1 = 0;
1073 cp->cp_ctl2 = 0;
1074 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1075 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1076
1077 /* Put together the SCSI inquiry command */
1078 memset(&cp->cp_cdb_cmd, 0, 12); /* XXX */
1079 cp->cp_cdb_cmd = INQUIRY;
1080 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1081
1082 /* Sync up CCB, status packet and scratch area */
1083 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1084 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1085 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1086 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1087 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1088 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1089
1090 /* Start the command and poll on completion */
1091 if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
1092 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1093
1094 if (dpt_poll(sc, ccb))
1095 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1096
1097 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1098 ccb->ccb_scsi_status != SCSI_OK)
1099 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1100 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1101 ccb->ccb_scsi_status);
1102
1103 /* Sync up the DMA map and free CCB, returning */
1104 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1105 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1106 dpt_free_ccb(sc, ccb);
1107 }
1108