dpt.c revision 1.33 1 /* $NetBSD: dpt.c,v 1.33 2002/12/07 19:48:32 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
42 * Copyright (c) 2000 Adaptec Corporation
43 * All rights reserved.
44 *
45 * TERMS AND CONDITIONS OF USE
46 *
47 * Redistribution and use in source form, with or without modification, are
48 * permitted provided that redistributions of source code must retain the
49 * above copyright notice, this list of conditions and the following disclaimer.
50 *
51 * This software is provided `as is' by Adaptec and any express or implied
52 * warranties, including, but not limited to, the implied warranties of
53 * merchantability and fitness for a particular purpose, are disclaimed. In no
54 * event shall Adaptec be liable for any direct, indirect, incidental, special,
55 * exemplary or consequential damages (including, but not limited to,
56 * procurement of substitute goods or services; loss of use, data, or profits;
57 * or business interruptions) however caused and on any theory of liability,
58 * whether in contract, strict liability, or tort (including negligence or
59 * otherwise) arising in any way out of the use of this driver software, even
60 * if advised of the possibility of such damage.
61 */
62
63 /*
64 * Portions of this code fall under the following copyright:
65 *
66 * Originally written by Julian Elischer (julian (at) tfs.com)
67 * for TRW Financial Systems for use under the MACH(2.5) operating system.
68 *
69 * TRW Financial Systems, in accordance with their agreement with Carnegie
70 * Mellon University, makes this software available to CMU to distribute
71 * or use in any manner that they see fit as long as this message is kept with
72 * the software. For this reason TFS also grants any other persons or
73 * organisations permission to use or modify this software.
74 *
75 * TFS supplies this software to be publicly redistributed
76 * on the understanding that TFS is not responsible for the correct
77 * functioning of this software in any circumstances.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.33 2002/12/07 19:48:32 ad Exp $");
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90
91 #include <uvm/uvm_extern.h>
92
93 #include <machine/bus.h>
94 #ifdef i386
95 #include <machine/pio.h>
96 #endif
97
98 #include <dev/scsipi/scsi_all.h>
99 #include <dev/scsipi/scsipi_all.h>
100 #include <dev/scsipi/scsiconf.h>
101
102 #include <dev/ic/dptreg.h>
103 #include <dev/ic/dptvar.h>
104
105 #include <dev/i2o/dptivar.h>
106
107 #ifdef DEBUG
108 #define DPRINTF(x) printf x
109 #else
110 #define DPRINTF(x)
111 #endif
112
113 #define dpt_inb(x, o) \
114 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
115 #define dpt_outb(x, o, d) \
116 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
117
118 static const char * const dpt_cname[] = {
119 "3334", "SmartRAID IV",
120 "3332", "SmartRAID IV",
121 "2144", "SmartCache IV",
122 "2044", "SmartCache IV",
123 "2142", "SmartCache IV",
124 "2042", "SmartCache IV",
125 "2041", "SmartCache IV",
126 "3224", "SmartRAID III",
127 "3222", "SmartRAID III",
128 "3021", "SmartRAID III",
129 "2124", "SmartCache III",
130 "2024", "SmartCache III",
131 "2122", "SmartCache III",
132 "2022", "SmartCache III",
133 "2021", "SmartCache III",
134 "2012", "SmartCache Plus",
135 "2011", "SmartCache Plus",
136 NULL, "<unknown>",
137 };
138
139 static void *dpt_sdh;
140
141 dev_type_open(dptopen);
142 dev_type_ioctl(dptioctl);
143
144 const struct cdevsw dpt_cdevsw = {
145 dptopen, nullclose, noread, nowrite, dptioctl,
146 nostop, notty, nopoll, nommap, nokqfilter,
147 };
148
149 extern struct cfdriver dpt_cd;
150
151 static struct dpt_sig dpt_sig = {
152 { 'd', 'P', 't', 'S', 'i', 'G'},
153 SIG_VERSION,
154 #if defined(i386)
155 PROC_INTEL,
156 #elif defined(powerpc)
157 PROC_POWERPC,
158 #elif defined(alpha)
159 PROC_ALPHA,
160 #elif defined(__mips__)
161 PROC_MIPS,
162 #elif defined(sparc64)
163 PROC_ULTRASPARC,
164 #endif
165 #if defined(i386)
166 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
167 #else
168 0,
169 #endif
170 FT_HBADRVR,
171 0,
172 OEM_DPT,
173 OS_FREE_BSD, /* XXX */
174 CAP_ABOVE16MB,
175 DEV_ALL,
176 ADF_ALL_EATA,
177 0,
178 0,
179 DPT_VERSION,
180 DPT_REVISION,
181 DPT_SUBREVISION,
182 DPT_MONTH,
183 DPT_DAY,
184 DPT_YEAR,
185 "" /* Will be filled later */
186 };
187
188 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
189 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
190 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
191 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
192 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
193 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
194 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
195 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
196 static void dpt_minphys(struct buf *);
197 static int dpt_passthrough(struct dpt_softc *, struct eata_cp *,
198 struct proc *);
199 static void dpt_scsipi_request(struct scsipi_channel *,
200 scsipi_adapter_req_t, void *);
201 static void dpt_shutdown(void *);
202 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
203 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
204
205 static __inline__ struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
206 static __inline__ void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
207
208 static __inline__ struct dpt_ccb *
209 dpt_ccb_alloc(struct dpt_softc *sc)
210 {
211 struct dpt_ccb *ccb;
212 int s;
213
214 s = splbio();
215 ccb = SLIST_FIRST(&sc->sc_ccb_free);
216 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
217 splx(s);
218
219 return (ccb);
220 }
221
222 static __inline__ void
223 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
224 {
225 int s;
226
227 ccb->ccb_flg = 0;
228 ccb->ccb_savesp = NULL;
229 s = splbio();
230 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
231 splx(s);
232 }
233
234 /*
235 * Handle an interrupt from the HBA.
236 */
237 int
238 dpt_intr(void *cookie)
239 {
240 struct dpt_softc *sc;
241 struct dpt_ccb *ccb;
242 struct eata_sp *sp;
243 volatile int junk;
244 int forus;
245
246 sc = cookie;
247 sp = sc->sc_stp;
248 forus = 0;
249
250 for (;;) {
251 /*
252 * HBA might have interrupted while we were dealing with the
253 * last completed command, since we ACK before we deal; keep
254 * polling.
255 */
256 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
257 break;
258 forus = 1;
259
260 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
261 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
262
263 /* Might have looped before HBA can reset HBA_AUX_INTR. */
264 if (sp->sp_ccbid == -1) {
265 DELAY(50);
266
267 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
268 return (0);
269
270 printf("%s: no status\n", sc->sc_dv.dv_xname);
271
272 /* Re-sync DMA map */
273 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
274 sc->sc_stpoff, sizeof(struct eata_sp),
275 BUS_DMASYNC_POSTREAD);
276 }
277
278 /* Make sure CCB ID from status packet is realistic. */
279 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
280 printf("%s: bogus status (returned CCB id %d)\n",
281 sc->sc_dv.dv_xname, sp->sp_ccbid);
282
283 /* Ack the interrupt */
284 sp->sp_ccbid = -1;
285 junk = dpt_inb(sc, HA_STATUS);
286 continue;
287 }
288
289 /* Sync up DMA map and cache cmd status. */
290 ccb = sc->sc_ccbs + sp->sp_ccbid;
291
292 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
293 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
294
295 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
296 ccb->ccb_scsi_status = sp->sp_scsi_status;
297 if (ccb->ccb_savesp != NULL)
298 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
299
300 /*
301 * Ack the interrupt and process the CCB. If this
302 * is a private CCB it's up to dpt_ccb_poll() to
303 * notice.
304 */
305 sp->sp_ccbid = -1;
306 ccb->ccb_flg |= CCB_INTR;
307 junk = dpt_inb(sc, HA_STATUS);
308 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
309 dpt_ccb_done(sc, ccb);
310 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
311 wakeup(ccb);
312 }
313
314 return (forus);
315 }
316
317 /*
318 * Initialize and attach the HBA. This is the entry point from bus
319 * specific probe-and-attach code.
320 */
321 void
322 dpt_init(struct dpt_softc *sc, const char *intrstr)
323 {
324 struct scsipi_adapter *adapt;
325 struct scsipi_channel *chan;
326 struct eata_inquiry_data *ei;
327 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
328 bus_dma_segment_t seg;
329 struct eata_cfg *ec;
330 struct dpt_ccb *ccb;
331 char model[16];
332
333 ec = &sc->sc_ec;
334 sprintf(dpt_sig.dsDescription, "NetBSD %s DPT driver", osrelease);
335
336 /*
337 * Allocate the CCB/status packet/scratch DMA map and load.
338 */
339 sc->sc_nccbs =
340 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
341 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
342 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
343 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
344 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
345
346 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
347 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
348 printf("%s: unable to allocate CCBs, rv = %d\n",
349 sc->sc_dv.dv_xname, rv);
350 return;
351 }
352
353 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
354 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
355 printf("%s: unable to map CCBs, rv = %d\n",
356 sc->sc_dv.dv_xname, rv);
357 return;
358 }
359
360 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
361 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
362 printf("%s: unable to create CCB DMA map, rv = %d\n",
363 sc->sc_dv.dv_xname, rv);
364 return;
365 }
366
367 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
368 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
369 printf("%s: unable to load CCB DMA map, rv = %d\n",
370 sc->sc_dv.dv_xname, rv);
371 return;
372 }
373
374 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
375 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
376 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
377 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
378 sc->sc_stp->sp_ccbid = -1;
379
380 /*
381 * Create the CCBs.
382 */
383 SLIST_INIT(&sc->sc_ccb_free);
384 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
385
386 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
387 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
388 DPT_SG_SIZE, DPT_MAX_XFER, 0,
389 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
390 &ccb->ccb_dmamap_xfer);
391 if (rv) {
392 printf("%s: can't create ccb dmamap (%d)\n",
393 sc->sc_dv.dv_xname, rv);
394 break;
395 }
396
397 ccb->ccb_id = i;
398 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
399 CCB_OFF(sc, ccb);
400 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
401 }
402
403 if (i == 0) {
404 printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
405 return;
406 } else if (i != sc->sc_nccbs) {
407 printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
408 sc->sc_nccbs);
409 sc->sc_nccbs = i;
410 }
411
412 /* Set shutdownhook before we start any device activity. */
413 if (dpt_sdh == NULL)
414 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
415
416 /* Get the inquiry data from the HBA. */
417 dpt_hba_inquire(sc, &ei);
418
419 /*
420 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
421 * dpt0: interrupting at irq 10
422 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
423 */
424 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
425 ;
426 ei->ei_vendor[i] = '\0';
427
428 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
429 model[i] = ei->ei_model[i];
430 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++)
431 model[i] = ei->ei_model[i];
432 model[i] = '\0';
433
434 /* Find the marketing name for the board. */
435 for (i = 0; dpt_cname[i] != NULL; i += 2)
436 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
437 break;
438
439 printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
440
441 if (intrstr != NULL)
442 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
443 intrstr);
444
445 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
446 EC_F3_MAX_CHANNEL_SHIFT;
447 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
448 EC_F3_MAX_TARGET_SHIFT;
449
450 printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
451 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
452
453 for (i = 0; i <= maxchannel; i++) {
454 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
455 printf(" %d", sc->sc_hbaid[i]);
456 }
457 printf("\n");
458
459 /*
460 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
461 * this for each bus?
462 */
463 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
464 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
465
466 /* Fill in the scsipi_adapter. */
467 adapt = &sc->sc_adapt;
468 memset(adapt, 0, sizeof(*adapt));
469 adapt->adapt_dev = &sc->sc_dv;
470 adapt->adapt_nchannels = maxchannel + 1;
471 adapt->adapt_openings = sc->sc_nccbs - 1;
472 adapt->adapt_max_periph = sc->sc_nccbs - 1;
473 adapt->adapt_request = dpt_scsipi_request;
474 adapt->adapt_minphys = dpt_minphys;
475
476 for (i = 0; i <= maxchannel; i++) {
477 /* Fill in the scsipi_channel. */
478 chan = &sc->sc_chans[i];
479 memset(chan, 0, sizeof(*chan));
480 chan->chan_adapter = adapt;
481 chan->chan_bustype = &scsi_bustype;
482 chan->chan_channel = i;
483 chan->chan_ntargets = maxtarget + 1;
484 chan->chan_nluns = ec->ec_maxlun + 1;
485 chan->chan_id = sc->sc_hbaid[i];
486 config_found(&sc->sc_dv, chan, scsiprint);
487 }
488 }
489
490 /*
491 * Read the EATA configuration from the HBA and perform some sanity checks.
492 */
493 int
494 dpt_readcfg(struct dpt_softc *sc)
495 {
496 struct eata_cfg *ec;
497 int i, j, stat;
498 u_int16_t *p;
499
500 ec = &sc->sc_ec;
501
502 /* Older firmware may puke if we talk to it too soon after reset. */
503 dpt_outb(sc, HA_COMMAND, CP_RESET);
504 DELAY(750000);
505
506 for (i = 1000; i; i--) {
507 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
508 break;
509 DELAY(2000);
510 }
511
512 if (i == 0) {
513 printf("%s: HBA not ready after reset (hba status:%02x)\n",
514 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
515 return (-1);
516 }
517
518 while((((stat = dpt_inb(sc, HA_STATUS))
519 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
520 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
521 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
522 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
523 /* RAID drives still spinning up? */
524 if(dpt_inb(sc, HA_ERROR) != 'D' ||
525 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
526 dpt_inb(sc, HA_ERROR + 2) != 'T') {
527 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
528 return (-1);
529 }
530 }
531
532 /*
533 * Issue the read-config command and wait for the data to appear.
534 *
535 * Apparently certian firmware revisions won't DMA later on if we
536 * request the config data using PIO, but it makes it a lot easier
537 * as no DMA setup is required.
538 */
539 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
540 memset(ec, 0, sizeof(*ec));
541 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
542 sizeof(ec->ec_cfglen)) >> 1;
543 p = (u_int16_t *)ec;
544
545 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
546 printf("%s: cfg data didn't appear (hba status:%02x)\n",
547 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
548 return (-1);
549 }
550
551 /* Begin reading. */
552 while (i--)
553 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
554
555 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
556 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
557 - sizeof(ec->ec_cfglen)))
558 i = sizeof(struct eata_cfg)
559 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
560 - sizeof(ec->ec_cfglen);
561
562 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
563 sizeof(ec->ec_cfglen);
564 i >>= 1;
565
566 while (i--)
567 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
568
569 /* Flush until we have read 512 bytes. */
570 i = (512 - j + 1) >> 1;
571 while (i--)
572 bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
573
574 /* Defaults for older firmware... */
575 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
576 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
577
578 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
579 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
580 return (-1);
581 }
582
583 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
584 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
585 return (-1);
586 }
587
588 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
589 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
590 return (-1);
591 }
592
593 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
594 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
595 return (-1);
596 }
597
598 return (0);
599 }
600
601 /*
602 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
603 * data from it's cache and mark array groups as clean.
604 *
605 * XXX This doesn't always work (i.e., the HBA may still be flushing after
606 * we tell root that it's safe to power off).
607 */
608 static void
609 dpt_shutdown(void *cookie)
610 {
611 extern struct cfdriver dpt_cd;
612 struct dpt_softc *sc;
613 int i;
614
615 printf("shutting down dpt devices...");
616
617 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
618 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
619 continue;
620 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
621 }
622
623 delay(10000*1000);
624 printf(" done\n");
625 }
626
627 /*
628 * Send an EATA command to the HBA.
629 */
630 static int
631 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
632 {
633 u_int32_t pa;
634 int i, s;
635
636 s = splbio();
637
638 for (i = 20000; i != 0; i--) {
639 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
640 break;
641 DELAY(50);
642 }
643 if (i == 0) {
644 splx(s);
645 return (-1);
646 }
647
648 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
649 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
650 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
651 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
652 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
653
654 if (eatacmd == CP_IMMEDIATE)
655 dpt_outb(sc, HA_ICMD, icmd);
656
657 dpt_outb(sc, HA_COMMAND, eatacmd);
658
659 splx(s);
660 return (0);
661 }
662
663 /*
664 * Wait for the HBA status register to reach a specific state.
665 */
666 static int
667 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
668 {
669
670 for (ms *= 10; ms != 0; ms--) {
671 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
672 return (0);
673 DELAY(100);
674 }
675
676 return (-1);
677 }
678
679 /*
680 * Spin waiting for a command to finish. The timeout value from the CCB is
681 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
682 * recycled before we get a look at it.
683 */
684 static int
685 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
686 {
687 int i, s;
688
689 #ifdef DEBUG
690 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
691 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
692 #endif
693
694 s = splbio();
695
696 if ((ccb->ccb_flg & CCB_INTR) != 0) {
697 splx(s);
698 return (0);
699 }
700
701 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
702 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
703 dpt_intr(sc);
704 if ((ccb->ccb_flg & CCB_INTR) != 0)
705 break;
706 DELAY(50);
707 }
708
709 splx(s);
710 return (i == 0);
711 }
712
713 /*
714 * We have a command which has been processed by the HBA, so now we look to
715 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
716 * by dpt_intr().
717 */
718 static void
719 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
720 {
721 struct scsipi_xfer *xs;
722
723 xs = ccb->ccb_xs;
724
725 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
726
727 /*
728 * If we were a data transfer, unload the map that described the
729 * data buffer.
730 */
731 if (xs->datalen != 0)
732 dpt_ccb_unmap(sc, ccb);
733
734 if (xs->error == XS_NOERROR) {
735 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
736 switch (ccb->ccb_hba_status) {
737 case SP_HBA_ERROR_SEL_TO:
738 xs->error = XS_SELTIMEOUT;
739 break;
740 case SP_HBA_ERROR_RESET:
741 xs->error = XS_RESET;
742 break;
743 default:
744 printf("%s: HBA status %x\n",
745 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
746 xs->error = XS_DRIVER_STUFFUP;
747 break;
748 }
749 } else if (ccb->ccb_scsi_status != SCSI_OK) {
750 switch (ccb->ccb_scsi_status) {
751 case SCSI_CHECK:
752 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
753 sizeof(xs->sense.scsi_sense));
754 xs->error = XS_SENSE;
755 break;
756 case SCSI_BUSY:
757 case SCSI_QUEUE_FULL:
758 xs->error = XS_BUSY;
759 break;
760 default:
761 scsipi_printaddr(xs->xs_periph);
762 printf("SCSI status %x\n",
763 ccb->ccb_scsi_status);
764 xs->error = XS_DRIVER_STUFFUP;
765 break;
766 }
767 } else
768 xs->resid = 0;
769
770 xs->status = ccb->ccb_scsi_status;
771 }
772
773 /* Free up the CCB and mark the command as done. */
774 dpt_ccb_free(sc, ccb);
775 scsipi_done(xs);
776 }
777
778 /*
779 * Specified CCB has timed out, abort it.
780 */
781 static void
782 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
783 {
784 struct scsipi_periph *periph;
785 struct scsipi_xfer *xs;
786 int s;
787
788 xs = ccb->ccb_xs;
789 periph = xs->xs_periph;
790
791 scsipi_printaddr(periph);
792 printf("timed out (status:%02x aux status:%02x)",
793 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
794
795 s = splbio();
796
797 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
798 /* Abort timed out, reset the HBA */
799 printf(" AGAIN, resetting HBA\n");
800 dpt_outb(sc, HA_COMMAND, CP_RESET);
801 DELAY(750000);
802 } else {
803 /* Abort the operation that has timed out */
804 printf("\n");
805 xs->error = XS_TIMEOUT;
806 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
807 ccb->ccb_flg |= CCB_ABORT;
808 /* Start the abort */
809 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
810 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
811 }
812
813 splx(s);
814 }
815
816 /*
817 * Map a data transfer.
818 */
819 static int
820 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
821 {
822 struct scsipi_xfer *xs;
823 bus_dmamap_t xfer;
824 bus_dma_segment_t *ds;
825 struct eata_sg *sg;
826 struct eata_cp *cp;
827 int rv, i;
828
829 xs = ccb->ccb_xs;
830 xfer = ccb->ccb_dmamap_xfer;
831 cp = &ccb->ccb_eata_cp;
832
833 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
834 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
835 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
836 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
837
838 switch (rv) {
839 case 0:
840 break;
841 case ENOMEM:
842 case EAGAIN:
843 xs->error = XS_RESOURCE_SHORTAGE;
844 break;
845 default:
846 xs->error = XS_DRIVER_STUFFUP;
847 printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
848 break;
849 }
850
851 if (xs->error != XS_NOERROR) {
852 dpt_ccb_free(sc, ccb);
853 scsipi_done(xs);
854 return (-1);
855 }
856
857 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
858 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
859 BUS_DMASYNC_PREWRITE);
860
861 /* Don't bother using scatter/gather for just 1 seg */
862 if (xfer->dm_nsegs == 1) {
863 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
864 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
865 } else {
866 /*
867 * Load the hardware scatter/gather map with
868 * the contents of the DMA map.
869 */
870 sg = ccb->ccb_sg;
871 ds = xfer->dm_segs;
872 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
873 sg->sg_addr = htobe32(ds->ds_addr);
874 sg->sg_len = htobe32(ds->ds_len);
875 }
876 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
877 sc->sc_dmamap->dm_segs[0].ds_addr +
878 offsetof(struct dpt_ccb, ccb_sg));
879 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
880 cp->cp_ctl0 |= CP_C0_SCATTER;
881 }
882
883 return (0);
884 }
885
886 /*
887 * Unmap a transfer.
888 */
889 static void
890 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
891 {
892
893 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
894 ccb->ccb_dmamap_xfer->dm_mapsize,
895 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
896 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
897 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
898 }
899
900 /*
901 * Adjust the size of each I/O before it passes to the SCSI layer.
902 */
903 static void
904 dpt_minphys(struct buf *bp)
905 {
906
907 if (bp->b_bcount > DPT_MAX_XFER)
908 bp->b_bcount = DPT_MAX_XFER;
909 minphys(bp);
910 }
911
912 /*
913 * Start a SCSI command.
914 */
915 static void
916 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
917 void *arg)
918 {
919 struct dpt_softc *sc;
920 struct scsipi_xfer *xs;
921 int flags;
922 struct scsipi_periph *periph;
923 struct dpt_ccb *ccb;
924 struct eata_cp *cp;
925
926 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
927
928 switch (req) {
929 case ADAPTER_REQ_RUN_XFER:
930 xs = arg;
931 periph = xs->xs_periph;
932 flags = xs->xs_control;
933
934 #ifdef DIAGNOSTIC
935 /* Cmds must be no more than 12 bytes for us. */
936 if (xs->cmdlen > 12) {
937 xs->error = XS_DRIVER_STUFFUP;
938 scsipi_done(xs);
939 break;
940 }
941 #endif
942 /*
943 * XXX We can't reset devices just yet. Apparently some
944 * older firmware revisions don't even support it.
945 */
946 if ((flags & XS_CTL_RESET) != 0) {
947 xs->error = XS_DRIVER_STUFFUP;
948 scsipi_done(xs);
949 break;
950 }
951
952 /*
953 * Get a CCB and fill it.
954 */
955 ccb = dpt_ccb_alloc(sc);
956 ccb->ccb_xs = xs;
957 ccb->ccb_timeout = xs->timeout;
958
959 cp = &ccb->ccb_eata_cp;
960 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
961 cp->cp_ccbid = ccb->ccb_id;
962 cp->cp_senselen = sizeof(ccb->ccb_sense);
963 cp->cp_stataddr = htobe32(sc->sc_stppa);
964 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
965 cp->cp_ctl1 = 0;
966 cp->cp_ctl2 = 0;
967 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
968 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
969 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
970 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
971
972 if ((flags & XS_CTL_DATA_IN) != 0)
973 cp->cp_ctl0 |= CP_C0_DATA_IN;
974 if ((flags & XS_CTL_DATA_OUT) != 0)
975 cp->cp_ctl0 |= CP_C0_DATA_OUT;
976 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
977 cp->cp_ctl0 |= CP_C0_INTERPRET;
978
979 /* Synchronous xfers musn't write-back through the cache. */
980 if (xs->bp != NULL)
981 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
982 cp->cp_ctl2 |= CP_C2_NO_CACHE;
983
984 cp->cp_senseaddr =
985 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
986 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
987
988 if (xs->datalen != 0) {
989 if (dpt_ccb_map(sc, ccb))
990 break;
991 } else {
992 cp->cp_dataaddr = 0;
993 cp->cp_datalen = 0;
994 }
995
996 /* Sync up CCB and status packet. */
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
998 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
999 BUS_DMASYNC_PREWRITE);
1000 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1001 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1002
1003 /*
1004 * Start the command.
1005 */
1006 if ((xs->xs_control & XS_CTL_POLL) != 0)
1007 ccb->ccb_flg |= CCB_PRIVATE;
1008
1009 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1010 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1011 xs->error = XS_DRIVER_STUFFUP;
1012 if (xs->datalen != 0)
1013 dpt_ccb_unmap(sc, ccb);
1014 dpt_ccb_free(sc, ccb);
1015 break;
1016 }
1017
1018 if ((xs->xs_control & XS_CTL_POLL) == 0)
1019 break;
1020
1021 if (dpt_ccb_poll(sc, ccb)) {
1022 dpt_ccb_abort(sc, ccb);
1023 /* Wait for abort to complete... */
1024 if (dpt_ccb_poll(sc, ccb))
1025 dpt_ccb_abort(sc, ccb);
1026 }
1027
1028 dpt_ccb_done(sc, ccb);
1029 break;
1030
1031 case ADAPTER_REQ_GROW_RESOURCES:
1032 /*
1033 * Not supported, since we allocate the maximum number of
1034 * CCBs up front.
1035 */
1036 break;
1037
1038 case ADAPTER_REQ_SET_XFER_MODE:
1039 /*
1040 * This will be handled by the HBA itself, and we can't
1041 * modify that (ditto for tagged queueing).
1042 */
1043 break;
1044 }
1045 }
1046
1047 /*
1048 * Get inquiry data from the adapter.
1049 */
1050 static void
1051 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1052 {
1053 struct dpt_ccb *ccb;
1054 struct eata_cp *cp;
1055
1056 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1057
1058 /* Get a CCB and mark as private */
1059 ccb = dpt_ccb_alloc(sc);
1060 ccb->ccb_flg |= CCB_PRIVATE;
1061 ccb->ccb_timeout = 200;
1062
1063 /* Put all the arguments into the CCB. */
1064 cp = &ccb->ccb_eata_cp;
1065 cp->cp_ccbid = ccb->ccb_id;
1066 cp->cp_senselen = sizeof(ccb->ccb_sense);
1067 cp->cp_senseaddr = 0;
1068 cp->cp_stataddr = htobe32(sc->sc_stppa);
1069 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1070 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1071 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1072 cp->cp_ctl1 = 0;
1073 cp->cp_ctl2 = 0;
1074 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1075 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1076
1077 /* Put together the SCSI inquiry command. */
1078 memset(&cp->cp_cdb_cmd, 0, 12);
1079 cp->cp_cdb_cmd = INQUIRY;
1080 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1081
1082 /* Sync up CCB, status packet and scratch area. */
1083 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1084 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1085 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1086 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1087 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1088 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1089
1090 /* Start the command and poll on completion. */
1091 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1092 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1093
1094 if (dpt_ccb_poll(sc, ccb))
1095 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1096
1097 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1098 ccb->ccb_scsi_status != SCSI_OK)
1099 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1100 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1101 ccb->ccb_scsi_status);
1102
1103 /* Sync up the DMA map and free CCB, returning. */
1104 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1105 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1106 dpt_ccb_free(sc, ccb);
1107 }
1108
1109 int
1110 dptopen(dev_t dev, int flag, int mode, struct proc *p)
1111 {
1112
1113 if (securelevel > 1)
1114 return (EPERM);
1115 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1116 return (ENXIO);
1117
1118 return (0);
1119 }
1120
1121 int
1122 dptioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1123 {
1124 struct dpt_softc *sc;
1125 int rv;
1126
1127 sc = device_lookup(&dpt_cd, minor(dev));
1128
1129 switch (cmd & 0xffff) {
1130 case DPT_SIGNATURE:
1131 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1132 break;
1133
1134 case DPT_CTRLINFO:
1135 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1136 break;
1137
1138 case DPT_SYSINFO:
1139 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1140 break;
1141
1142 case DPT_BLINKLED:
1143 /*
1144 * XXX Don't know how to get this from EATA boards. I think
1145 * it involves waiting for a "DPT" sequence from HA_ERROR
1146 * and then reading one of the HA_ICMD registers.
1147 */
1148 *(int *)data = 0;
1149 break;
1150
1151 case DPT_EATAUSRCMD:
1152 if (sc->sc_uactive++)
1153 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
1154
1155 rv = dpt_passthrough(sc, (struct eata_cp *)data, p);
1156
1157 sc->sc_uactive--;
1158 wakeup_one(&sc->sc_uactive);
1159 return (rv);
1160
1161 default:
1162 DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd));
1163 return (ENOTTY);
1164 }
1165
1166 return (0);
1167 }
1168
1169 void
1170 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1171 {
1172
1173 memset(info, 0, sizeof(*info));
1174 info->id = sc->sc_hbaid[0];
1175 info->vect = sc->sc_isairq;
1176 info->base = sc->sc_isaport;
1177 info->qdepth = sc->sc_nccbs;
1178 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1179 info->heads = 16;
1180 info->sectors = 63;
1181 info->do_drive32 = 1;
1182 info->primary = 1;
1183 info->cpLength = sizeof(struct eata_cp);
1184 info->spLength = sizeof(struct eata_sp);
1185 info->drqNum = sc->sc_isadrq;
1186 }
1187
1188 void
1189 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1190 {
1191 #ifdef i386
1192 int i, j;
1193 #endif
1194
1195 memset(info, 0, sizeof(*info));
1196
1197 #ifdef i386
1198 outb (0x70, 0x12);
1199 i = inb(0x71);
1200 j = i >> 4;
1201 if (i == 0x0f) {
1202 outb (0x70, 0x19);
1203 j = inb (0x71);
1204 }
1205 info->drive0CMOS = j;
1206
1207 j = i & 0x0f;
1208 if (i == 0x0f) {
1209 outb (0x70, 0x1a);
1210 j = inb (0x71);
1211 }
1212 info->drive1CMOS = j;
1213 info->processorFamily = dpt_sig.dsProcessorFamily;
1214
1215 /*
1216 * Get the conventional memory size from CMOS.
1217 */
1218 outb(0x70, 0x16);
1219 j = inb(0x71);
1220 j <<= 8;
1221 outb(0x70, 0x15);
1222 j |= inb(0x71);
1223 info->conventionalMemSize = j;
1224
1225 /*
1226 * Get the extended memory size from CMOS.
1227 */
1228 outb(0x70, 0x31);
1229 j = inb(0x71);
1230 j <<= 8;
1231 outb(0x70, 0x30);
1232 j |= inb(0x71);
1233 info->extendedMemSize = j;
1234
1235 switch (cpu_class) {
1236 case CPUCLASS_386:
1237 info->processorType = PROC_386;
1238 break;
1239 case CPUCLASS_486:
1240 info->processorType = PROC_486;
1241 break;
1242 case CPUCLASS_586:
1243 info->processorType = PROC_PENTIUM;
1244 break;
1245 case CPUCLASS_686:
1246 default:
1247 info->processorType = PROC_SEXIUM;
1248 break;
1249 }
1250
1251 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1252 SI_MemorySizeValid | SI_NO_SmartROM;
1253 #else
1254 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1255 #endif
1256
1257 info->busType = sc->sc_bustype;
1258 }
1259
1260 int
1261 dpt_passthrough(struct dpt_softc *sc, struct eata_cp *ucp, struct proc *proc)
1262 {
1263 struct dpt_ccb *ccb;
1264 struct eata_sp sp;
1265 struct eata_cp *cp;
1266 struct eata_sg *sg;
1267 bus_dmamap_t xfer;
1268 bus_dma_segment_t *ds;
1269 int datain, s, rv, i;
1270
1271 /*
1272 * Get a CCB and fill.
1273 */
1274 ccb = dpt_ccb_alloc(sc);
1275 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1276 ccb->ccb_timeout = 0;
1277 ccb->ccb_savesp = &sp;
1278
1279 cp = &ccb->ccb_eata_cp;
1280 memcpy(cp, ucp, sizeof(*cp));
1281 cp->cp_ccbid = ccb->ccb_id;
1282 cp->cp_senselen = sizeof(ccb->ccb_sense);
1283 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1284 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1285 cp->cp_stataddr = htobe32(sc->sc_stppa);
1286
1287 /*
1288 * Map data transfers.
1289 */
1290 if (ucp->cp_dataaddr && ucp->cp_datalen) {
1291 xfer = ccb->ccb_dmamap_xfer;
1292 datain = ((ucp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1293
1294 if (ucp->cp_datalen > DPT_MAX_XFER) {
1295 DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname));
1296 dpt_ccb_free(sc, ccb);
1297 return (EFBIG);
1298 }
1299 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1300 (caddr_t)ucp->cp_dataaddr, ucp->cp_datalen, proc,
1301 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1302 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1303 if (rv != 0) {
1304 DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname,
1305 rv));
1306 dpt_ccb_free(sc, ccb);
1307 return (rv);
1308 }
1309
1310 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1311 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1312
1313 sg = ccb->ccb_sg;
1314 ds = xfer->dm_segs;
1315 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1316 sg->sg_addr = htobe32(ds->ds_addr);
1317 sg->sg_len = htobe32(ds->ds_len);
1318 }
1319 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1320 sc->sc_dmamap->dm_segs[0].ds_addr +
1321 offsetof(struct dpt_ccb, ccb_sg));
1322 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1323 cp->cp_ctl0 |= CP_C0_SCATTER;
1324 } else {
1325 cp->cp_dataaddr = 0;
1326 cp->cp_datalen = 0;
1327 }
1328
1329 /*
1330 * Start the command and sleep on completion.
1331 */
1332 PHOLD(proc);
1333 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1334 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1335 s = splbio();
1336 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1337 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1338 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1339 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1340 tsleep(ccb, PWAIT, "dptucmd", 0);
1341 splx(s);
1342 PRELE(proc);
1343
1344 /*
1345 * Sync up the DMA map and copy out results.
1346 */
1347 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1348 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1349
1350 if (cp->cp_datalen != 0) {
1351 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1352 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1353 bus_dmamap_unload(sc->sc_dmat, xfer);
1354 }
1355
1356 if (ucp->cp_stataddr != NULL) {
1357 rv = copyout(&sp, (caddr_t)ucp->cp_stataddr, sizeof(sp));
1358 if (rv != 0)
1359 DPRINTF(("%s: sp copyout() failed\n",
1360 sc->sc_dv.dv_xname));
1361 }
1362 if (rv == 0 && ucp->cp_senseaddr != NULL) {
1363 i = min(ucp->cp_senselen, sizeof(ccb->ccb_sense));
1364 rv = copyout(&ccb->ccb_sense, (caddr_t)ucp->cp_senseaddr, i);
1365 if (rv != 0)
1366 DPRINTF(("%s: sense copyout() failed\n",
1367 sc->sc_dv.dv_xname));
1368 }
1369
1370 dpt_ccb_free(sc, ccb);
1371 return (rv);
1372 }
1373