dpt.c revision 1.69 1 /* $NetBSD: dpt.c,v 1.69 2013/09/12 19:49:08 martin Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
35 * Copyright (c) 2000 Adaptec Corporation
36 * All rights reserved.
37 *
38 * TERMS AND CONDITIONS OF USE
39 *
40 * Redistribution and use in source form, with or without modification, are
41 * permitted provided that redistributions of source code must retain the
42 * above copyright notice, this list of conditions and the following disclaimer.
43 *
44 * This software is provided `as is' by Adaptec and any express or implied
45 * warranties, including, but not limited to, the implied warranties of
46 * merchantability and fitness for a particular purpose, are disclaimed. In no
47 * event shall Adaptec be liable for any direct, indirect, incidental, special,
48 * exemplary or consequential damages (including, but not limited to,
49 * procurement of substitute goods or services; loss of use, data, or profits;
50 * or business interruptions) however caused and on any theory of liability,
51 * whether in contract, strict liability, or tort (including negligence or
52 * otherwise) arising in any way out of the use of this driver software, even
53 * if advised of the possibility of such damage.
54 */
55
56 /*
57 * Portions of this code fall under the following copyright:
58 *
59 * Originally written by Julian Elischer (julian (at) tfs.com)
60 * for TRW Financial Systems for use under the MACH(2.5) operating system.
61 *
62 * TRW Financial Systems, in accordance with their agreement with Carnegie
63 * Mellon University, makes this software available to CMU to distribute
64 * or use in any manner that they see fit as long as this message is kept with
65 * the software. For this reason TFS also grants any other persons or
66 * organisations permission to use or modify this software.
67 *
68 * TFS supplies this software to be publicly redistributed
69 * on the understanding that TFS is not responsible for the correct
70 * functioning of this software in any circumstances.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.69 2013/09/12 19:49:08 martin Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/kauth.h>
84 #include <sys/proc.h>
85 #include <sys/mutex.h>
86
87 #include <sys/bus.h>
88 #ifdef i386
89 #include <machine/pio.h>
90 #include <machine/cputypes.h>
91 #endif
92
93 #include <dev/scsipi/scsi_all.h>
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsiconf.h>
96
97 #include <dev/ic/dptreg.h>
98 #include <dev/ic/dptvar.h>
99
100 #include <dev/i2o/dptivar.h>
101
102 #ifdef DEBUG
103 #define DPRINTF(x) printf x
104 #else
105 #define DPRINTF(x)
106 #endif
107
108 #define dpt_inb(x, o) \
109 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
110 #define dpt_outb(x, o, d) \
111 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
112
113 static const char * const dpt_cname[] = {
114 "3334", "SmartRAID IV",
115 "3332", "SmartRAID IV",
116 "2144", "SmartCache IV",
117 "2044", "SmartCache IV",
118 "2142", "SmartCache IV",
119 "2042", "SmartCache IV",
120 "2041", "SmartCache IV",
121 "3224", "SmartRAID III",
122 "3222", "SmartRAID III",
123 "3021", "SmartRAID III",
124 "2124", "SmartCache III",
125 "2024", "SmartCache III",
126 "2122", "SmartCache III",
127 "2022", "SmartCache III",
128 "2021", "SmartCache III",
129 "2012", "SmartCache Plus",
130 "2011", "SmartCache Plus",
131 NULL, "<unknown>",
132 };
133
134 static void *dpt_sdh;
135
136 dev_type_open(dptopen);
137 dev_type_ioctl(dptioctl);
138
139 const struct cdevsw dpt_cdevsw = {
140 dptopen, nullclose, noread, nowrite, dptioctl,
141 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
142 };
143
144 extern struct cfdriver dpt_cd;
145
146 static struct dpt_sig dpt_sig = {
147 { 'd', 'P', 't', 'S', 'i', 'G'},
148 SIG_VERSION,
149 #if defined(i386)
150 PROC_INTEL,
151 #elif defined(powerpc)
152 PROC_POWERPC,
153 #elif defined(alpha)
154 PROC_ALPHA,
155 #elif defined(__mips__)
156 PROC_MIPS,
157 #elif defined(sparc64)
158 PROC_ULTRASPARC,
159 #else
160 0xff,
161 #endif
162 #if defined(i386)
163 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
164 #else
165 0,
166 #endif
167 FT_HBADRVR,
168 0,
169 OEM_DPT,
170 OS_FREE_BSD, /* XXX */
171 CAP_ABOVE16MB,
172 DEV_ALL,
173 ADF_ALL_EATA,
174 0,
175 0,
176 DPT_VERSION,
177 DPT_REVISION,
178 DPT_SUBREVISION,
179 DPT_MONTH,
180 DPT_DAY,
181 DPT_YEAR,
182 "" /* Will be filled later */
183 };
184
185 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
186 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
187 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
188 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
189 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
190 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
191 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
192 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
193 static void dpt_minphys(struct buf *);
194 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
195 struct lwp *);
196 static void dpt_scsipi_request(struct scsipi_channel *,
197 scsipi_adapter_req_t, void *);
198 static void dpt_shutdown(void *);
199 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
200 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
201
202 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
203 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
204
205 static inline struct dpt_ccb *
206 dpt_ccb_alloc(struct dpt_softc *sc)
207 {
208 struct dpt_ccb *ccb;
209 int s;
210
211 s = splbio();
212 ccb = SLIST_FIRST(&sc->sc_ccb_free);
213 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
214 splx(s);
215
216 return (ccb);
217 }
218
219 static inline void
220 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
221 {
222 int s;
223
224 ccb->ccb_flg = 0;
225 ccb->ccb_savesp = NULL;
226 s = splbio();
227 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
228 splx(s);
229 }
230
231 /*
232 * Handle an interrupt from the HBA.
233 */
234 int
235 dpt_intr(void *cookie)
236 {
237 struct dpt_softc *sc;
238 struct dpt_ccb *ccb;
239 struct eata_sp *sp;
240 int forus;
241
242 sc = cookie;
243 sp = sc->sc_stp;
244 forus = 0;
245
246 for (;;) {
247 /*
248 * HBA might have interrupted while we were dealing with the
249 * last completed command, since we ACK before we deal; keep
250 * polling.
251 */
252 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
253 break;
254 forus = 1;
255
256 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
257 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
258
259 /* Might have looped before HBA can reset HBA_AUX_INTR. */
260 if (sp->sp_ccbid == -1) {
261 DELAY(50);
262
263 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
264 return (0);
265
266 printf("%s: no status\n", device_xname(sc->sc_dev));
267
268 /* Re-sync DMA map */
269 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
270 sc->sc_stpoff, sizeof(struct eata_sp),
271 BUS_DMASYNC_POSTREAD);
272 }
273
274 /* Make sure CCB ID from status packet is realistic. */
275 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
276 printf("%s: bogus status (returned CCB id %d)\n",
277 device_xname(sc->sc_dev), sp->sp_ccbid);
278
279 /* Ack the interrupt */
280 sp->sp_ccbid = -1;
281 (void)dpt_inb(sc, HA_STATUS);
282 continue;
283 }
284
285 /* Sync up DMA map and cache cmd status. */
286 ccb = sc->sc_ccbs + sp->sp_ccbid;
287
288 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
289 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
290
291 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
292 ccb->ccb_scsi_status = sp->sp_scsi_status;
293 if (ccb->ccb_savesp != NULL)
294 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
295
296 /*
297 * Ack the interrupt and process the CCB. If this
298 * is a private CCB it's up to dpt_ccb_poll() to
299 * notice.
300 */
301 sp->sp_ccbid = -1;
302 ccb->ccb_flg |= CCB_INTR;
303 (void)dpt_inb(sc, HA_STATUS);
304 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
305 dpt_ccb_done(sc, ccb);
306 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
307 wakeup(ccb);
308 }
309
310 return (forus);
311 }
312
313 /*
314 * Initialize and attach the HBA. This is the entry point from bus
315 * specific probe-and-attach code.
316 */
317 void
318 dpt_init(struct dpt_softc *sc, const char *intrstr)
319 {
320 struct scsipi_adapter *adapt;
321 struct scsipi_channel *chan;
322 struct eata_inquiry_data *ei;
323 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
324 bus_dma_segment_t seg;
325 struct eata_cfg *ec;
326 struct dpt_ccb *ccb;
327 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
328 char vendor[__arraycount(ei->ei_vendor) + 1];
329
330 ec = &sc->sc_ec;
331 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
332 "NetBSD %s DPT driver", osrelease);
333 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
334
335 /*
336 * Allocate the CCB/status packet/scratch DMA map and load.
337 */
338 sc->sc_nccbs =
339 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
340 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
341 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
342 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
343 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
344
345 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
346 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
347 aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, rv = %d\n", rv);
348 return;
349 }
350
351 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
352 (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
353 aprint_error_dev(sc->sc_dev, "unable to map CCBs, rv = %d\n",
354 rv);
355 return;
356 }
357
358 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
359 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
360 aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, rv = %d\n", rv);
361 return;
362 }
363
364 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
365 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
366 aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, rv = %d\n", rv);
367 return;
368 }
369
370 sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
371 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
372 sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
373 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
374 sc->sc_stp->sp_ccbid = -1;
375
376 /*
377 * Create the CCBs.
378 */
379 SLIST_INIT(&sc->sc_ccb_free);
380 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
381
382 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
383 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
384 DPT_SG_SIZE, DPT_MAX_XFER, 0,
385 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
386 &ccb->ccb_dmamap_xfer);
387 if (rv) {
388 aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", rv);
389 break;
390 }
391
392 ccb->ccb_id = i;
393 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
394 CCB_OFF(sc, ccb);
395 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
396 }
397
398 if (i == 0) {
399 aprint_error_dev(sc->sc_dev, "unable to create CCBs\n");
400 return;
401 } else if (i != sc->sc_nccbs) {
402 aprint_error_dev(sc->sc_dev, "%d/%d CCBs created!\n",
403 i, sc->sc_nccbs);
404 sc->sc_nccbs = i;
405 }
406
407 /* Set shutdownhook before we start any device activity. */
408 if (dpt_sdh == NULL)
409 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
410
411 /* Get the inquiry data from the HBA. */
412 dpt_hba_inquire(sc, &ei);
413
414 /*
415 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
416 * dpt0: interrupting at irq 10
417 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
418 */
419 for (i = 0; i < __arraycount(ei->ei_vendor) && ei->ei_vendor[i] != ' ';
420 i++)
421 vendor[i] = ei->ei_vendor[i];
422 vendor[i] = '\0';
423
424 for (i = 0; i < __arraycount(ei->ei_model) && ei->ei_model[i] != ' ';
425 i++)
426 model[i] = ei->ei_model[i];
427 for (j = 0; j < __arraycount(ei->ei_suffix) && ei->ei_suffix[j] != ' ';
428 i++, j++)
429 model[i] = ei->ei_suffix[j];
430 model[i] = '\0';
431
432 /* Find the marketing name for the board. */
433 for (i = 0; dpt_cname[i] != NULL; i += 2)
434 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
435 break;
436
437 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
438
439 if (intrstr != NULL)
440 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n",
441 intrstr);
442
443 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
444 EC_F3_MAX_CHANNEL_SHIFT;
445 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
446 EC_F3_MAX_TARGET_SHIFT;
447
448 aprint_normal_dev(sc->sc_dev, "%d queued commands, %d channel(s), adapter on ID(s)",
449 sc->sc_nccbs, maxchannel + 1);
450
451 for (i = 0; i <= maxchannel; i++) {
452 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
453 aprint_normal(" %d", sc->sc_hbaid[i]);
454 }
455 aprint_normal("\n");
456
457 /*
458 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
459 * this for each bus?
460 */
461 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
462 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
463
464 /* Fill in the scsipi_adapter. */
465 adapt = &sc->sc_adapt;
466 memset(adapt, 0, sizeof(*adapt));
467 adapt->adapt_dev = sc->sc_dev;
468 adapt->adapt_nchannels = maxchannel + 1;
469 adapt->adapt_openings = sc->sc_nccbs - 1;
470 adapt->adapt_max_periph = sc->sc_nccbs - 1;
471 adapt->adapt_request = dpt_scsipi_request;
472 adapt->adapt_minphys = dpt_minphys;
473
474 for (i = 0; i <= maxchannel; i++) {
475 /* Fill in the scsipi_channel. */
476 chan = &sc->sc_chans[i];
477 memset(chan, 0, sizeof(*chan));
478 chan->chan_adapter = adapt;
479 chan->chan_bustype = &scsi_bustype;
480 chan->chan_channel = i;
481 chan->chan_ntargets = maxtarget + 1;
482 chan->chan_nluns = ec->ec_maxlun + 1;
483 chan->chan_id = sc->sc_hbaid[i];
484 config_found(sc->sc_dev, chan, scsiprint);
485 }
486 }
487
488 /*
489 * Read the EATA configuration from the HBA and perform some sanity checks.
490 */
491 int
492 dpt_readcfg(struct dpt_softc *sc)
493 {
494 struct eata_cfg *ec;
495 int i, j, stat;
496 u_int16_t *p;
497
498 ec = &sc->sc_ec;
499
500 /* Older firmware may puke if we talk to it too soon after reset. */
501 dpt_outb(sc, HA_COMMAND, CP_RESET);
502 DELAY(750000);
503
504 for (i = 1000; i; i--) {
505 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
506 break;
507 DELAY(2000);
508 }
509
510 if (i == 0) {
511 printf("%s: HBA not ready after reset (hba status:%02x)\n",
512 device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
513 return (-1);
514 }
515
516 while((((stat = dpt_inb(sc, HA_STATUS))
517 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
518 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
519 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
520 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
521 /* RAID drives still spinning up? */
522 if(dpt_inb(sc, HA_ERROR) != 'D' ||
523 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
524 dpt_inb(sc, HA_ERROR + 2) != 'T') {
525 printf("%s: HBA not ready\n", device_xname(sc->sc_dev));
526 return (-1);
527 }
528 }
529
530 /*
531 * Issue the read-config command and wait for the data to appear.
532 *
533 * Apparently certian firmware revisions won't DMA later on if we
534 * request the config data using PIO, but it makes it a lot easier
535 * as no DMA setup is required.
536 */
537 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
538 memset(ec, 0, sizeof(*ec));
539 i = ((int)(uintptr_t)&((struct eata_cfg *)0)->ec_cfglen +
540 sizeof(ec->ec_cfglen)) >> 1;
541 p = (u_int16_t *)ec;
542
543 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
544 printf("%s: cfg data didn't appear (hba status:%02x)\n",
545 device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
546 return (-1);
547 }
548
549 /* Begin reading. */
550 while (i--)
551 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
552
553 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
554 - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
555 - sizeof(ec->ec_cfglen)))
556 i = sizeof(struct eata_cfg)
557 - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
558 - sizeof(ec->ec_cfglen);
559
560 j = i + (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
561 sizeof(ec->ec_cfglen);
562 i >>= 1;
563
564 while (i--)
565 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
566
567 /* Flush until we have read 512 bytes. */
568 i = (512 - j + 1) >> 1;
569 while (i--)
570 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
571
572 /* Defaults for older firmware... */
573 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
574 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
575
576 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
577 aprint_error_dev(sc->sc_dev, "HBA error\n");
578 return (-1);
579 }
580
581 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
582 aprint_error_dev(sc->sc_dev, "EATA signature mismatch\n");
583 return (-1);
584 }
585
586 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
587 aprint_error_dev(sc->sc_dev, "ec_hba field invalid\n");
588 return (-1);
589 }
590
591 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
592 aprint_error_dev(sc->sc_dev, "DMA not supported\n");
593 return (-1);
594 }
595
596 return (0);
597 }
598
599 /*
600 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
601 * data from it's cache and mark array groups as clean.
602 *
603 * XXX This doesn't always work (i.e., the HBA may still be flushing after
604 * we tell root that it's safe to power off).
605 */
606 static void
607 dpt_shutdown(void *cookie)
608 {
609 extern struct cfdriver dpt_cd;
610 struct dpt_softc *sc;
611 int i;
612
613 printf("shutting down dpt devices...");
614
615 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
616 if ((sc = device_lookup_private(&dpt_cd, i)) == NULL)
617 continue;
618 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
619 }
620
621 delay(10000*1000);
622 printf(" done\n");
623 }
624
625 /*
626 * Send an EATA command to the HBA.
627 */
628 static int
629 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
630 {
631 u_int32_t pa;
632 int i, s;
633
634 s = splbio();
635
636 for (i = 20000; i != 0; i--) {
637 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
638 break;
639 DELAY(50);
640 }
641 if (i == 0) {
642 splx(s);
643 return (-1);
644 }
645
646 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
647 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
648 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
649 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
650 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
651
652 if (eatacmd == CP_IMMEDIATE)
653 dpt_outb(sc, HA_ICMD, icmd);
654
655 dpt_outb(sc, HA_COMMAND, eatacmd);
656
657 splx(s);
658 return (0);
659 }
660
661 /*
662 * Wait for the HBA status register to reach a specific state.
663 */
664 static int
665 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
666 {
667
668 for (ms *= 10; ms != 0; ms--) {
669 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
670 return (0);
671 DELAY(100);
672 }
673
674 return (-1);
675 }
676
677 /*
678 * Spin waiting for a command to finish. The timeout value from the CCB is
679 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
680 * recycled before we get a look at it.
681 */
682 static int
683 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
684 {
685 int i, s;
686
687 #ifdef DEBUG
688 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
689 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
690 #endif
691
692 s = splbio();
693
694 if ((ccb->ccb_flg & CCB_INTR) != 0) {
695 splx(s);
696 return (0);
697 }
698
699 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
700 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
701 dpt_intr(sc);
702 if ((ccb->ccb_flg & CCB_INTR) != 0)
703 break;
704 DELAY(50);
705 }
706
707 splx(s);
708 return (i == 0);
709 }
710
711 /*
712 * We have a command which has been processed by the HBA, so now we look to
713 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
714 * by dpt_intr().
715 */
716 static void
717 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
718 {
719 struct scsipi_xfer *xs;
720
721 xs = ccb->ccb_xs;
722
723 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
724
725 /*
726 * If we were a data transfer, unload the map that described the
727 * data buffer.
728 */
729 if (xs->datalen != 0)
730 dpt_ccb_unmap(sc, ccb);
731
732 if (xs->error == XS_NOERROR) {
733 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
734 switch (ccb->ccb_hba_status) {
735 case SP_HBA_ERROR_SEL_TO:
736 xs->error = XS_SELTIMEOUT;
737 break;
738 case SP_HBA_ERROR_RESET:
739 xs->error = XS_RESET;
740 break;
741 default:
742 printf("%s: HBA status %x\n",
743 device_xname(sc->sc_dev), ccb->ccb_hba_status);
744 xs->error = XS_DRIVER_STUFFUP;
745 break;
746 }
747 } else if (ccb->ccb_scsi_status != SCSI_OK) {
748 switch (ccb->ccb_scsi_status) {
749 case SCSI_CHECK:
750 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
751 sizeof(xs->sense.scsi_sense));
752 xs->error = XS_SENSE;
753 break;
754 case SCSI_BUSY:
755 case SCSI_QUEUE_FULL:
756 xs->error = XS_BUSY;
757 break;
758 default:
759 scsipi_printaddr(xs->xs_periph);
760 printf("SCSI status %x\n",
761 ccb->ccb_scsi_status);
762 xs->error = XS_DRIVER_STUFFUP;
763 break;
764 }
765 } else
766 xs->resid = 0;
767
768 xs->status = ccb->ccb_scsi_status;
769 }
770
771 /* Free up the CCB and mark the command as done. */
772 dpt_ccb_free(sc, ccb);
773 scsipi_done(xs);
774 }
775
776 /*
777 * Specified CCB has timed out, abort it.
778 */
779 static void
780 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
781 {
782 struct scsipi_periph *periph;
783 struct scsipi_xfer *xs;
784 int s;
785
786 xs = ccb->ccb_xs;
787 periph = xs->xs_periph;
788
789 scsipi_printaddr(periph);
790 printf("timed out (status:%02x aux status:%02x)",
791 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
792
793 s = splbio();
794
795 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
796 /* Abort timed out, reset the HBA */
797 printf(" AGAIN, resetting HBA\n");
798 dpt_outb(sc, HA_COMMAND, CP_RESET);
799 DELAY(750000);
800 } else {
801 /* Abort the operation that has timed out */
802 printf("\n");
803 xs->error = XS_TIMEOUT;
804 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
805 ccb->ccb_flg |= CCB_ABORT;
806 /* Start the abort */
807 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
808 aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
809 }
810
811 splx(s);
812 }
813
814 /*
815 * Map a data transfer.
816 */
817 static int
818 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
819 {
820 struct scsipi_xfer *xs;
821 bus_dmamap_t xfer;
822 bus_dma_segment_t *ds;
823 struct eata_sg *sg;
824 struct eata_cp *cp;
825 int rv, i;
826
827 xs = ccb->ccb_xs;
828 xfer = ccb->ccb_dmamap_xfer;
829 cp = &ccb->ccb_eata_cp;
830
831 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
832 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
833 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
834 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
835
836 switch (rv) {
837 case 0:
838 break;
839 case ENOMEM:
840 case EAGAIN:
841 xs->error = XS_RESOURCE_SHORTAGE;
842 break;
843 default:
844 xs->error = XS_DRIVER_STUFFUP;
845 printf("%s: error %d loading map\n", device_xname(sc->sc_dev), rv);
846 break;
847 }
848
849 if (xs->error != XS_NOERROR) {
850 dpt_ccb_free(sc, ccb);
851 scsipi_done(xs);
852 return (-1);
853 }
854
855 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
856 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
857 BUS_DMASYNC_PREWRITE);
858
859 /* Don't bother using scatter/gather for just 1 seg */
860 if (xfer->dm_nsegs == 1) {
861 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
862 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
863 } else {
864 /*
865 * Load the hardware scatter/gather map with
866 * the contents of the DMA map.
867 */
868 sg = ccb->ccb_sg;
869 ds = xfer->dm_segs;
870 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
871 sg->sg_addr = htobe32(ds->ds_addr);
872 sg->sg_len = htobe32(ds->ds_len);
873 }
874 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
875 sc->sc_dmamap->dm_segs[0].ds_addr +
876 offsetof(struct dpt_ccb, ccb_sg));
877 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
878 cp->cp_ctl0 |= CP_C0_SCATTER;
879 }
880
881 return (0);
882 }
883
884 /*
885 * Unmap a transfer.
886 */
887 static void
888 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
889 {
890
891 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
892 ccb->ccb_dmamap_xfer->dm_mapsize,
893 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
894 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
895 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
896 }
897
898 /*
899 * Adjust the size of each I/O before it passes to the SCSI layer.
900 */
901 static void
902 dpt_minphys(struct buf *bp)
903 {
904
905 if (bp->b_bcount > DPT_MAX_XFER)
906 bp->b_bcount = DPT_MAX_XFER;
907 minphys(bp);
908 }
909
910 /*
911 * Start a SCSI command.
912 */
913 static void
914 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
915 void *arg)
916 {
917 struct dpt_softc *sc;
918 struct scsipi_xfer *xs;
919 int flags;
920 struct scsipi_periph *periph;
921 struct dpt_ccb *ccb;
922 struct eata_cp *cp;
923
924 sc = device_private(chan->chan_adapter->adapt_dev);
925
926 switch (req) {
927 case ADAPTER_REQ_RUN_XFER:
928 xs = arg;
929 periph = xs->xs_periph;
930 flags = xs->xs_control;
931
932 #ifdef DIAGNOSTIC
933 /* Cmds must be no more than 12 bytes for us. */
934 if (xs->cmdlen > 12) {
935 xs->error = XS_DRIVER_STUFFUP;
936 scsipi_done(xs);
937 break;
938 }
939 #endif
940 /*
941 * XXX We can't reset devices just yet. Apparently some
942 * older firmware revisions don't even support it.
943 */
944 if ((flags & XS_CTL_RESET) != 0) {
945 xs->error = XS_DRIVER_STUFFUP;
946 scsipi_done(xs);
947 break;
948 }
949
950 /*
951 * Get a CCB and fill it.
952 */
953 ccb = dpt_ccb_alloc(sc);
954 ccb->ccb_xs = xs;
955 ccb->ccb_timeout = xs->timeout;
956
957 cp = &ccb->ccb_eata_cp;
958 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
959 cp->cp_ccbid = ccb->ccb_id;
960 cp->cp_senselen = sizeof(ccb->ccb_sense);
961 cp->cp_stataddr = htobe32(sc->sc_stppa);
962 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
963 cp->cp_ctl1 = 0;
964 cp->cp_ctl2 = 0;
965 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
966 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
967 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
968 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
969
970 if ((flags & XS_CTL_DATA_IN) != 0)
971 cp->cp_ctl0 |= CP_C0_DATA_IN;
972 if ((flags & XS_CTL_DATA_OUT) != 0)
973 cp->cp_ctl0 |= CP_C0_DATA_OUT;
974 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
975 cp->cp_ctl0 |= CP_C0_INTERPRET;
976
977 /* Synchronous xfers musn't write-back through the cache. */
978 if (xs->bp != NULL)
979 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
980 cp->cp_ctl2 |= CP_C2_NO_CACHE;
981
982 cp->cp_senseaddr =
983 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
984 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
985
986 if (xs->datalen != 0) {
987 if (dpt_ccb_map(sc, ccb))
988 break;
989 } else {
990 cp->cp_dataaddr = 0;
991 cp->cp_datalen = 0;
992 }
993
994 /* Sync up CCB and status packet. */
995 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
996 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
997 BUS_DMASYNC_PREWRITE);
998 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
999 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1000
1001 /*
1002 * Start the command.
1003 */
1004 if ((xs->xs_control & XS_CTL_POLL) != 0)
1005 ccb->ccb_flg |= CCB_PRIVATE;
1006
1007 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1008 aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
1009 xs->error = XS_DRIVER_STUFFUP;
1010 if (xs->datalen != 0)
1011 dpt_ccb_unmap(sc, ccb);
1012 dpt_ccb_free(sc, ccb);
1013 break;
1014 }
1015
1016 if ((xs->xs_control & XS_CTL_POLL) == 0)
1017 break;
1018
1019 if (dpt_ccb_poll(sc, ccb)) {
1020 dpt_ccb_abort(sc, ccb);
1021 /* Wait for abort to complete... */
1022 if (dpt_ccb_poll(sc, ccb))
1023 dpt_ccb_abort(sc, ccb);
1024 }
1025
1026 dpt_ccb_done(sc, ccb);
1027 break;
1028
1029 case ADAPTER_REQ_GROW_RESOURCES:
1030 /*
1031 * Not supported, since we allocate the maximum number of
1032 * CCBs up front.
1033 */
1034 break;
1035
1036 case ADAPTER_REQ_SET_XFER_MODE:
1037 /*
1038 * This will be handled by the HBA itself, and we can't
1039 * modify that (ditto for tagged queueing).
1040 */
1041 break;
1042 }
1043 }
1044
1045 /*
1046 * Get inquiry data from the adapter.
1047 */
1048 static void
1049 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1050 {
1051 struct dpt_ccb *ccb;
1052 struct eata_cp *cp;
1053
1054 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1055
1056 /* Get a CCB and mark as private */
1057 ccb = dpt_ccb_alloc(sc);
1058 ccb->ccb_flg |= CCB_PRIVATE;
1059 ccb->ccb_timeout = 200;
1060
1061 /* Put all the arguments into the CCB. */
1062 cp = &ccb->ccb_eata_cp;
1063 cp->cp_ccbid = ccb->ccb_id;
1064 cp->cp_senselen = sizeof(ccb->ccb_sense);
1065 cp->cp_senseaddr = 0;
1066 cp->cp_stataddr = htobe32(sc->sc_stppa);
1067 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1068 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1069 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1070 cp->cp_ctl1 = 0;
1071 cp->cp_ctl2 = 0;
1072 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1073 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1074
1075 /* Put together the SCSI inquiry command. */
1076 memset(&cp->cp_cdb_cmd, 0, 12);
1077 cp->cp_cdb_cmd = INQUIRY;
1078 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1079
1080 /* Sync up CCB, status packet and scratch area. */
1081 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1082 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1083 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1084 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1085 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1086 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1087
1088 /* Start the command and poll on completion. */
1089 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1090 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1091
1092 if (dpt_ccb_poll(sc, ccb))
1093 panic("%s: inquiry timed out", device_xname(sc->sc_dev));
1094
1095 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1096 ccb->ccb_scsi_status != SCSI_OK)
1097 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1098 device_xname(sc->sc_dev), ccb->ccb_hba_status,
1099 ccb->ccb_scsi_status);
1100
1101 /* Sync up the DMA map and free CCB, returning. */
1102 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1103 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1104 dpt_ccb_free(sc, ccb);
1105 }
1106
1107 int
1108 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
1109 {
1110
1111 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1112 return (ENXIO);
1113
1114 return (0);
1115 }
1116
1117 int
1118 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1119 {
1120 struct dpt_softc *sc;
1121 int rv;
1122
1123 sc = device_lookup_private(&dpt_cd, minor(dev));
1124
1125 switch (cmd & 0xffff) {
1126 case DPT_SIGNATURE:
1127 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1128 break;
1129
1130 case DPT_CTRLINFO:
1131 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1132 break;
1133
1134 case DPT_SYSINFO:
1135 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1136 break;
1137
1138 case DPT_BLINKLED:
1139 /*
1140 * XXX Don't know how to get this from EATA boards. I think
1141 * it involves waiting for a "DPT" sequence from HA_ERROR
1142 * and then reading one of the HA_ICMD registers.
1143 */
1144 *(int *)data = 0;
1145 break;
1146
1147 case DPT_EATAUSRCMD:
1148 rv = kauth_authorize_device_passthru(l->l_cred, dev,
1149 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1150 if (rv)
1151 return (rv);
1152
1153 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1154 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1155 device_xname(sc->sc_dev), IOCPARM_LEN(cmd),
1156 (unsigned long int)sizeof(struct eata_ucp)));
1157 return (EINVAL);
1158 }
1159
1160 mutex_enter(&sc->sc_lock);
1161 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1162 mutex_exit(&sc->sc_lock);
1163
1164 return (rv);
1165
1166 default:
1167 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd));
1168 return (ENOTTY);
1169 }
1170
1171 return (0);
1172 }
1173
1174 void
1175 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1176 {
1177
1178 memset(info, 0, sizeof(*info));
1179 info->id = sc->sc_hbaid[0];
1180 info->vect = sc->sc_isairq;
1181 info->base = sc->sc_isaport;
1182 info->qdepth = sc->sc_nccbs;
1183 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1184 info->heads = 16;
1185 info->sectors = 63;
1186 info->do_drive32 = 1;
1187 info->primary = 1;
1188 info->cpLength = sizeof(struct eata_cp);
1189 info->spLength = sizeof(struct eata_sp);
1190 info->drqNum = sc->sc_isadrq;
1191 }
1192
1193 void
1194 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1195 {
1196 #ifdef i386
1197 int i, j;
1198 #endif
1199
1200 memset(info, 0, sizeof(*info));
1201
1202 #ifdef i386
1203 outb (0x70, 0x12);
1204 i = inb(0x71);
1205 j = i >> 4;
1206 if (i == 0x0f) {
1207 outb (0x70, 0x19);
1208 j = inb (0x71);
1209 }
1210 info->drive0CMOS = j;
1211
1212 j = i & 0x0f;
1213 if (i == 0x0f) {
1214 outb (0x70, 0x1a);
1215 j = inb (0x71);
1216 }
1217 info->drive1CMOS = j;
1218 info->processorFamily = dpt_sig.dsProcessorFamily;
1219
1220 /*
1221 * Get the conventional memory size from CMOS.
1222 */
1223 outb(0x70, 0x16);
1224 j = inb(0x71);
1225 j <<= 8;
1226 outb(0x70, 0x15);
1227 j |= inb(0x71);
1228 info->conventionalMemSize = j;
1229
1230 /*
1231 * Get the extended memory size from CMOS.
1232 */
1233 outb(0x70, 0x31);
1234 j = inb(0x71);
1235 j <<= 8;
1236 outb(0x70, 0x30);
1237 j |= inb(0x71);
1238 info->extendedMemSize = j;
1239
1240 switch (cpu_class) {
1241 case CPUCLASS_386:
1242 info->processorType = PROC_386;
1243 break;
1244 case CPUCLASS_486:
1245 info->processorType = PROC_486;
1246 break;
1247 case CPUCLASS_586:
1248 info->processorType = PROC_PENTIUM;
1249 break;
1250 case CPUCLASS_686:
1251 default:
1252 info->processorType = PROC_SEXIUM;
1253 break;
1254 }
1255
1256 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1257 SI_MemorySizeValid | SI_NO_SmartROM;
1258 #else
1259 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1260 #endif
1261
1262 info->busType = sc->sc_bustype;
1263 }
1264
1265 int
1266 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1267 {
1268 struct dpt_ccb *ccb;
1269 struct eata_sp sp;
1270 struct eata_cp *cp;
1271 struct eata_sg *sg;
1272 bus_dmamap_t xfer = 0; /* XXX: gcc */
1273 bus_dma_segment_t *ds;
1274 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1275
1276 /*
1277 * Get a CCB and fill.
1278 */
1279 ccb = dpt_ccb_alloc(sc);
1280 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1281 ccb->ccb_timeout = 0;
1282 ccb->ccb_savesp = &sp;
1283
1284 cp = &ccb->ccb_eata_cp;
1285 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1286 uslen = cp->cp_senselen;
1287 cp->cp_ccbid = ccb->ccb_id;
1288 cp->cp_senselen = sizeof(ccb->ccb_sense);
1289 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1290 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1291 cp->cp_stataddr = htobe32(sc->sc_stppa);
1292
1293 /*
1294 * Map data transfers.
1295 */
1296 if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1297 xfer = ccb->ccb_dmamap_xfer;
1298 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1299
1300 if (ucp->ucp_datalen > DPT_MAX_XFER) {
1301 DPRINTF(("%s: xfer too big\n", device_xname(sc->sc_dev)));
1302 dpt_ccb_free(sc, ccb);
1303 return (EFBIG);
1304 }
1305 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1306 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1307 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1308 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1309 if (rv != 0) {
1310 DPRINTF(("%s: map failed; %d\n", device_xname(sc->sc_dev),
1311 rv));
1312 dpt_ccb_free(sc, ccb);
1313 return (rv);
1314 }
1315
1316 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1317 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1318
1319 sg = ccb->ccb_sg;
1320 ds = xfer->dm_segs;
1321 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1322 sg->sg_addr = htobe32(ds->ds_addr);
1323 sg->sg_len = htobe32(ds->ds_len);
1324 }
1325 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1326 sc->sc_dmamap->dm_segs[0].ds_addr +
1327 offsetof(struct dpt_ccb, ccb_sg));
1328 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1329 cp->cp_ctl0 |= CP_C0_SCATTER;
1330 } else {
1331 cp->cp_dataaddr = 0;
1332 cp->cp_datalen = 0;
1333 }
1334
1335 /*
1336 * Start the command and sleep on completion.
1337 */
1338 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1339 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1340 s = splbio();
1341 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1342 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1343 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1344 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1345 tsleep(ccb, PWAIT, "dptucmd", 0);
1346 splx(s);
1347
1348 /*
1349 * Sync up the DMA map and copy out results.
1350 */
1351 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1352 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1353
1354 if (cp->cp_datalen != 0) {
1355 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1356 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1357 bus_dmamap_unload(sc->sc_dmat, xfer);
1358 }
1359
1360 if (ucp->ucp_stataddr != NULL) {
1361 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1362 if (rv != 0) {
1363 DPRINTF(("%s: sp copyout() failed\n",
1364 device_xname(sc->sc_dev)));
1365 }
1366 }
1367 if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1368 i = min(uslen, sizeof(ccb->ccb_sense));
1369 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1370 if (rv != 0) {
1371 DPRINTF(("%s: sense copyout() failed\n",
1372 device_xname(sc->sc_dev)));
1373 }
1374 }
1375
1376 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1377 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1378 dpt_ccb_free(sc, ccb);
1379 return (rv);
1380 }
1381