dpt.c revision 1.59 1 /* $NetBSD: dpt.c,v 1.59 2008/04/08 12:07:26 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
42 * Copyright (c) 2000 Adaptec Corporation
43 * All rights reserved.
44 *
45 * TERMS AND CONDITIONS OF USE
46 *
47 * Redistribution and use in source form, with or without modification, are
48 * permitted provided that redistributions of source code must retain the
49 * above copyright notice, this list of conditions and the following disclaimer.
50 *
51 * This software is provided `as is' by Adaptec and any express or implied
52 * warranties, including, but not limited to, the implied warranties of
53 * merchantability and fitness for a particular purpose, are disclaimed. In no
54 * event shall Adaptec be liable for any direct, indirect, incidental, special,
55 * exemplary or consequential damages (including, but not limited to,
56 * procurement of substitute goods or services; loss of use, data, or profits;
57 * or business interruptions) however caused and on any theory of liability,
58 * whether in contract, strict liability, or tort (including negligence or
59 * otherwise) arising in any way out of the use of this driver software, even
60 * if advised of the possibility of such damage.
61 */
62
63 /*
64 * Portions of this code fall under the following copyright:
65 *
66 * Originally written by Julian Elischer (julian (at) tfs.com)
67 * for TRW Financial Systems for use under the MACH(2.5) operating system.
68 *
69 * TRW Financial Systems, in accordance with their agreement with Carnegie
70 * Mellon University, makes this software available to CMU to distribute
71 * or use in any manner that they see fit as long as this message is kept with
72 * the software. For this reason TFS also grants any other persons or
73 * organisations permission to use or modify this software.
74 *
75 * TFS supplies this software to be publicly redistributed
76 * on the understanding that TFS is not responsible for the correct
77 * functioning of this software in any circumstances.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.59 2008/04/08 12:07:26 cegger Exp $");
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90 #include <sys/kauth.h>
91 #include <sys/proc.h>
92
93 #include <uvm/uvm_extern.h>
94
95 #include <sys/bus.h>
96 #ifdef i386
97 #include <machine/pio.h>
98 #endif
99
100 #include <dev/scsipi/scsi_all.h>
101 #include <dev/scsipi/scsipi_all.h>
102 #include <dev/scsipi/scsiconf.h>
103
104 #include <dev/ic/dptreg.h>
105 #include <dev/ic/dptvar.h>
106
107 #include <dev/i2o/dptivar.h>
108
109 #ifdef DEBUG
110 #define DPRINTF(x) printf x
111 #else
112 #define DPRINTF(x)
113 #endif
114
115 #define dpt_inb(x, o) \
116 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
117 #define dpt_outb(x, o, d) \
118 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
119
120 static const char * const dpt_cname[] = {
121 "3334", "SmartRAID IV",
122 "3332", "SmartRAID IV",
123 "2144", "SmartCache IV",
124 "2044", "SmartCache IV",
125 "2142", "SmartCache IV",
126 "2042", "SmartCache IV",
127 "2041", "SmartCache IV",
128 "3224", "SmartRAID III",
129 "3222", "SmartRAID III",
130 "3021", "SmartRAID III",
131 "2124", "SmartCache III",
132 "2024", "SmartCache III",
133 "2122", "SmartCache III",
134 "2022", "SmartCache III",
135 "2021", "SmartCache III",
136 "2012", "SmartCache Plus",
137 "2011", "SmartCache Plus",
138 NULL, "<unknown>",
139 };
140
141 static void *dpt_sdh;
142
143 dev_type_open(dptopen);
144 dev_type_ioctl(dptioctl);
145
146 const struct cdevsw dpt_cdevsw = {
147 dptopen, nullclose, noread, nowrite, dptioctl,
148 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
149 };
150
151 extern struct cfdriver dpt_cd;
152
153 static struct dpt_sig dpt_sig = {
154 { 'd', 'P', 't', 'S', 'i', 'G'},
155 SIG_VERSION,
156 #if defined(i386)
157 PROC_INTEL,
158 #elif defined(powerpc)
159 PROC_POWERPC,
160 #elif defined(alpha)
161 PROC_ALPHA,
162 #elif defined(__mips__)
163 PROC_MIPS,
164 #elif defined(sparc64)
165 PROC_ULTRASPARC,
166 #else
167 0xff,
168 #endif
169 #if defined(i386)
170 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
171 #else
172 0,
173 #endif
174 FT_HBADRVR,
175 0,
176 OEM_DPT,
177 OS_FREE_BSD, /* XXX */
178 CAP_ABOVE16MB,
179 DEV_ALL,
180 ADF_ALL_EATA,
181 0,
182 0,
183 DPT_VERSION,
184 DPT_REVISION,
185 DPT_SUBREVISION,
186 DPT_MONTH,
187 DPT_DAY,
188 DPT_YEAR,
189 "" /* Will be filled later */
190 };
191
192 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
193 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
194 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
195 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
196 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
197 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
198 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
199 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
200 static void dpt_minphys(struct buf *);
201 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
202 struct lwp *);
203 static void dpt_scsipi_request(struct scsipi_channel *,
204 scsipi_adapter_req_t, void *);
205 static void dpt_shutdown(void *);
206 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
207 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
208
209 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
210 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
211
212 static inline struct dpt_ccb *
213 dpt_ccb_alloc(struct dpt_softc *sc)
214 {
215 struct dpt_ccb *ccb;
216 int s;
217
218 s = splbio();
219 ccb = SLIST_FIRST(&sc->sc_ccb_free);
220 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
221 splx(s);
222
223 return (ccb);
224 }
225
226 static inline void
227 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
228 {
229 int s;
230
231 ccb->ccb_flg = 0;
232 ccb->ccb_savesp = NULL;
233 s = splbio();
234 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
235 splx(s);
236 }
237
238 /*
239 * Handle an interrupt from the HBA.
240 */
241 int
242 dpt_intr(void *cookie)
243 {
244 struct dpt_softc *sc;
245 struct dpt_ccb *ccb;
246 struct eata_sp *sp;
247 volatile int junk;
248 int forus;
249
250 sc = cookie;
251 sp = sc->sc_stp;
252 forus = 0;
253
254 for (;;) {
255 /*
256 * HBA might have interrupted while we were dealing with the
257 * last completed command, since we ACK before we deal; keep
258 * polling.
259 */
260 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
261 break;
262 forus = 1;
263
264 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
265 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
266
267 /* Might have looped before HBA can reset HBA_AUX_INTR. */
268 if (sp->sp_ccbid == -1) {
269 DELAY(50);
270
271 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
272 return (0);
273
274 printf("%s: no status\n", device_xname(&sc->sc_dv));
275
276 /* Re-sync DMA map */
277 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
278 sc->sc_stpoff, sizeof(struct eata_sp),
279 BUS_DMASYNC_POSTREAD);
280 }
281
282 /* Make sure CCB ID from status packet is realistic. */
283 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
284 printf("%s: bogus status (returned CCB id %d)\n",
285 device_xname(&sc->sc_dv), sp->sp_ccbid);
286
287 /* Ack the interrupt */
288 sp->sp_ccbid = -1;
289 junk = dpt_inb(sc, HA_STATUS);
290 continue;
291 }
292
293 /* Sync up DMA map and cache cmd status. */
294 ccb = sc->sc_ccbs + sp->sp_ccbid;
295
296 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
297 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
298
299 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
300 ccb->ccb_scsi_status = sp->sp_scsi_status;
301 if (ccb->ccb_savesp != NULL)
302 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
303
304 /*
305 * Ack the interrupt and process the CCB. If this
306 * is a private CCB it's up to dpt_ccb_poll() to
307 * notice.
308 */
309 sp->sp_ccbid = -1;
310 ccb->ccb_flg |= CCB_INTR;
311 junk = dpt_inb(sc, HA_STATUS);
312 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
313 dpt_ccb_done(sc, ccb);
314 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
315 wakeup(ccb);
316 }
317
318 return (forus);
319 }
320
321 /*
322 * Initialize and attach the HBA. This is the entry point from bus
323 * specific probe-and-attach code.
324 */
325 void
326 dpt_init(struct dpt_softc *sc, const char *intrstr)
327 {
328 struct scsipi_adapter *adapt;
329 struct scsipi_channel *chan;
330 struct eata_inquiry_data *ei;
331 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
332 bus_dma_segment_t seg;
333 struct eata_cfg *ec;
334 struct dpt_ccb *ccb;
335 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
336 char vendor[__arraycount(ei->ei_vendor) + 1];
337
338 ec = &sc->sc_ec;
339 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
340 "NetBSD %s DPT driver", osrelease);
341
342 /*
343 * Allocate the CCB/status packet/scratch DMA map and load.
344 */
345 sc->sc_nccbs =
346 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
347 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
348 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
349 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
350 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
351
352 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
353 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
354 aprint_error_dev(&sc->sc_dv, "unable to allocate CCBs, rv = %d\n", rv);
355 return;
356 }
357
358 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
359 (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
360 aprint_error_dev(&sc->sc_dv, "unable to map CCBs, rv = %d\n",
361 rv);
362 return;
363 }
364
365 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
366 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
367 aprint_error_dev(&sc->sc_dv, "unable to create CCB DMA map, rv = %d\n", rv);
368 return;
369 }
370
371 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
372 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
373 aprint_error_dev(&sc->sc_dv, "unable to load CCB DMA map, rv = %d\n", rv);
374 return;
375 }
376
377 sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
378 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
379 sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
380 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
381 sc->sc_stp->sp_ccbid = -1;
382
383 /*
384 * Create the CCBs.
385 */
386 SLIST_INIT(&sc->sc_ccb_free);
387 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
388
389 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
390 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
391 DPT_SG_SIZE, DPT_MAX_XFER, 0,
392 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
393 &ccb->ccb_dmamap_xfer);
394 if (rv) {
395 aprint_error_dev(&sc->sc_dv, "can't create ccb dmamap (%d)\n", rv);
396 break;
397 }
398
399 ccb->ccb_id = i;
400 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
401 CCB_OFF(sc, ccb);
402 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
403 }
404
405 if (i == 0) {
406 aprint_error_dev(&sc->sc_dv, "unable to create CCBs\n");
407 return;
408 } else if (i != sc->sc_nccbs) {
409 aprint_error_dev(&sc->sc_dv, "%d/%d CCBs created!\n",
410 i, sc->sc_nccbs);
411 sc->sc_nccbs = i;
412 }
413
414 /* Set shutdownhook before we start any device activity. */
415 if (dpt_sdh == NULL)
416 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
417
418 /* Get the inquiry data from the HBA. */
419 dpt_hba_inquire(sc, &ei);
420
421 /*
422 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
423 * dpt0: interrupting at irq 10
424 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
425 */
426 for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor);
427 i++)
428 vendor[i] = ei->ei_vendor[i];
429 vendor[i] = '\0';
430
431 for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model);
432 i++)
433 model[i] = ei->ei_model[i];
434 for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix);
435 i++, j++)
436 model[i] = ei->ei_suffix[j];
437 model[i] = '\0';
438
439 /* Find the marketing name for the board. */
440 for (i = 0; dpt_cname[i] != NULL; i += 2)
441 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
442 break;
443
444 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
445
446 if (intrstr != NULL)
447 aprint_normal_dev(&sc->sc_dv, "interrupting at %s\n",
448 intrstr);
449
450 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
451 EC_F3_MAX_CHANNEL_SHIFT;
452 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
453 EC_F3_MAX_TARGET_SHIFT;
454
455 aprint_normal_dev(&sc->sc_dv, "%d queued commands, %d channel(s), adapter on ID(s)",
456 sc->sc_nccbs, maxchannel + 1);
457
458 for (i = 0; i <= maxchannel; i++) {
459 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
460 aprint_normal(" %d", sc->sc_hbaid[i]);
461 }
462 aprint_normal("\n");
463
464 /*
465 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
466 * this for each bus?
467 */
468 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
469 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
470
471 /* Fill in the scsipi_adapter. */
472 adapt = &sc->sc_adapt;
473 memset(adapt, 0, sizeof(*adapt));
474 adapt->adapt_dev = &sc->sc_dv;
475 adapt->adapt_nchannels = maxchannel + 1;
476 adapt->adapt_openings = sc->sc_nccbs - 1;
477 adapt->adapt_max_periph = sc->sc_nccbs - 1;
478 adapt->adapt_request = dpt_scsipi_request;
479 adapt->adapt_minphys = dpt_minphys;
480
481 for (i = 0; i <= maxchannel; i++) {
482 /* Fill in the scsipi_channel. */
483 chan = &sc->sc_chans[i];
484 memset(chan, 0, sizeof(*chan));
485 chan->chan_adapter = adapt;
486 chan->chan_bustype = &scsi_bustype;
487 chan->chan_channel = i;
488 chan->chan_ntargets = maxtarget + 1;
489 chan->chan_nluns = ec->ec_maxlun + 1;
490 chan->chan_id = sc->sc_hbaid[i];
491 config_found(&sc->sc_dv, chan, scsiprint);
492 }
493 }
494
495 /*
496 * Read the EATA configuration from the HBA and perform some sanity checks.
497 */
498 int
499 dpt_readcfg(struct dpt_softc *sc)
500 {
501 struct eata_cfg *ec;
502 int i, j, stat;
503 u_int16_t *p;
504
505 ec = &sc->sc_ec;
506
507 /* Older firmware may puke if we talk to it too soon after reset. */
508 dpt_outb(sc, HA_COMMAND, CP_RESET);
509 DELAY(750000);
510
511 for (i = 1000; i; i--) {
512 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
513 break;
514 DELAY(2000);
515 }
516
517 if (i == 0) {
518 printf("%s: HBA not ready after reset (hba status:%02x)\n",
519 device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS));
520 return (-1);
521 }
522
523 while((((stat = dpt_inb(sc, HA_STATUS))
524 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
525 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
526 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
527 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
528 /* RAID drives still spinning up? */
529 if(dpt_inb(sc, HA_ERROR) != 'D' ||
530 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
531 dpt_inb(sc, HA_ERROR + 2) != 'T') {
532 printf("%s: HBA not ready\n", device_xname(&sc->sc_dv));
533 return (-1);
534 }
535 }
536
537 /*
538 * Issue the read-config command and wait for the data to appear.
539 *
540 * Apparently certian firmware revisions won't DMA later on if we
541 * request the config data using PIO, but it makes it a lot easier
542 * as no DMA setup is required.
543 */
544 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
545 memset(ec, 0, sizeof(*ec));
546 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
547 sizeof(ec->ec_cfglen)) >> 1;
548 p = (u_int16_t *)ec;
549
550 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
551 printf("%s: cfg data didn't appear (hba status:%02x)\n",
552 device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS));
553 return (-1);
554 }
555
556 /* Begin reading. */
557 while (i--)
558 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
559
560 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
561 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
562 - sizeof(ec->ec_cfglen)))
563 i = sizeof(struct eata_cfg)
564 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
565 - sizeof(ec->ec_cfglen);
566
567 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
568 sizeof(ec->ec_cfglen);
569 i >>= 1;
570
571 while (i--)
572 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
573
574 /* Flush until we have read 512 bytes. */
575 i = (512 - j + 1) >> 1;
576 while (i--)
577 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
578
579 /* Defaults for older firmware... */
580 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
581 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
582
583 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
584 aprint_error_dev(&sc->sc_dv, "HBA error\n");
585 return (-1);
586 }
587
588 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
589 aprint_error_dev(&sc->sc_dv, "EATA signature mismatch\n");
590 return (-1);
591 }
592
593 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
594 aprint_error_dev(&sc->sc_dv, "ec_hba field invalid\n");
595 return (-1);
596 }
597
598 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
599 aprint_error_dev(&sc->sc_dv, "DMA not supported\n");
600 return (-1);
601 }
602
603 return (0);
604 }
605
606 /*
607 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
608 * data from it's cache and mark array groups as clean.
609 *
610 * XXX This doesn't always work (i.e., the HBA may still be flushing after
611 * we tell root that it's safe to power off).
612 */
613 static void
614 dpt_shutdown(void *cookie)
615 {
616 extern struct cfdriver dpt_cd;
617 struct dpt_softc *sc;
618 int i;
619
620 printf("shutting down dpt devices...");
621
622 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
623 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
624 continue;
625 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
626 }
627
628 delay(10000*1000);
629 printf(" done\n");
630 }
631
632 /*
633 * Send an EATA command to the HBA.
634 */
635 static int
636 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
637 {
638 u_int32_t pa;
639 int i, s;
640
641 s = splbio();
642
643 for (i = 20000; i != 0; i--) {
644 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
645 break;
646 DELAY(50);
647 }
648 if (i == 0) {
649 splx(s);
650 return (-1);
651 }
652
653 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
654 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
655 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
656 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
657 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
658
659 if (eatacmd == CP_IMMEDIATE)
660 dpt_outb(sc, HA_ICMD, icmd);
661
662 dpt_outb(sc, HA_COMMAND, eatacmd);
663
664 splx(s);
665 return (0);
666 }
667
668 /*
669 * Wait for the HBA status register to reach a specific state.
670 */
671 static int
672 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
673 {
674
675 for (ms *= 10; ms != 0; ms--) {
676 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
677 return (0);
678 DELAY(100);
679 }
680
681 return (-1);
682 }
683
684 /*
685 * Spin waiting for a command to finish. The timeout value from the CCB is
686 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
687 * recycled before we get a look at it.
688 */
689 static int
690 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
691 {
692 int i, s;
693
694 #ifdef DEBUG
695 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
696 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
697 #endif
698
699 s = splbio();
700
701 if ((ccb->ccb_flg & CCB_INTR) != 0) {
702 splx(s);
703 return (0);
704 }
705
706 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
707 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
708 dpt_intr(sc);
709 if ((ccb->ccb_flg & CCB_INTR) != 0)
710 break;
711 DELAY(50);
712 }
713
714 splx(s);
715 return (i == 0);
716 }
717
718 /*
719 * We have a command which has been processed by the HBA, so now we look to
720 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
721 * by dpt_intr().
722 */
723 static void
724 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
725 {
726 struct scsipi_xfer *xs;
727
728 xs = ccb->ccb_xs;
729
730 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
731
732 /*
733 * If we were a data transfer, unload the map that described the
734 * data buffer.
735 */
736 if (xs->datalen != 0)
737 dpt_ccb_unmap(sc, ccb);
738
739 if (xs->error == XS_NOERROR) {
740 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
741 switch (ccb->ccb_hba_status) {
742 case SP_HBA_ERROR_SEL_TO:
743 xs->error = XS_SELTIMEOUT;
744 break;
745 case SP_HBA_ERROR_RESET:
746 xs->error = XS_RESET;
747 break;
748 default:
749 printf("%s: HBA status %x\n",
750 device_xname(&sc->sc_dv), ccb->ccb_hba_status);
751 xs->error = XS_DRIVER_STUFFUP;
752 break;
753 }
754 } else if (ccb->ccb_scsi_status != SCSI_OK) {
755 switch (ccb->ccb_scsi_status) {
756 case SCSI_CHECK:
757 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
758 sizeof(xs->sense.scsi_sense));
759 xs->error = XS_SENSE;
760 break;
761 case SCSI_BUSY:
762 case SCSI_QUEUE_FULL:
763 xs->error = XS_BUSY;
764 break;
765 default:
766 scsipi_printaddr(xs->xs_periph);
767 printf("SCSI status %x\n",
768 ccb->ccb_scsi_status);
769 xs->error = XS_DRIVER_STUFFUP;
770 break;
771 }
772 } else
773 xs->resid = 0;
774
775 xs->status = ccb->ccb_scsi_status;
776 }
777
778 /* Free up the CCB and mark the command as done. */
779 dpt_ccb_free(sc, ccb);
780 scsipi_done(xs);
781 }
782
783 /*
784 * Specified CCB has timed out, abort it.
785 */
786 static void
787 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
788 {
789 struct scsipi_periph *periph;
790 struct scsipi_xfer *xs;
791 int s;
792
793 xs = ccb->ccb_xs;
794 periph = xs->xs_periph;
795
796 scsipi_printaddr(periph);
797 printf("timed out (status:%02x aux status:%02x)",
798 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
799
800 s = splbio();
801
802 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
803 /* Abort timed out, reset the HBA */
804 printf(" AGAIN, resetting HBA\n");
805 dpt_outb(sc, HA_COMMAND, CP_RESET);
806 DELAY(750000);
807 } else {
808 /* Abort the operation that has timed out */
809 printf("\n");
810 xs->error = XS_TIMEOUT;
811 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
812 ccb->ccb_flg |= CCB_ABORT;
813 /* Start the abort */
814 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
815 aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n");
816 }
817
818 splx(s);
819 }
820
821 /*
822 * Map a data transfer.
823 */
824 static int
825 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
826 {
827 struct scsipi_xfer *xs;
828 bus_dmamap_t xfer;
829 bus_dma_segment_t *ds;
830 struct eata_sg *sg;
831 struct eata_cp *cp;
832 int rv, i;
833
834 xs = ccb->ccb_xs;
835 xfer = ccb->ccb_dmamap_xfer;
836 cp = &ccb->ccb_eata_cp;
837
838 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
839 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
840 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
841 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
842
843 switch (rv) {
844 case 0:
845 break;
846 case ENOMEM:
847 case EAGAIN:
848 xs->error = XS_RESOURCE_SHORTAGE;
849 break;
850 default:
851 xs->error = XS_DRIVER_STUFFUP;
852 printf("%s: error %d loading map\n", device_xname(&sc->sc_dv), rv);
853 break;
854 }
855
856 if (xs->error != XS_NOERROR) {
857 dpt_ccb_free(sc, ccb);
858 scsipi_done(xs);
859 return (-1);
860 }
861
862 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
863 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
864 BUS_DMASYNC_PREWRITE);
865
866 /* Don't bother using scatter/gather for just 1 seg */
867 if (xfer->dm_nsegs == 1) {
868 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
869 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
870 } else {
871 /*
872 * Load the hardware scatter/gather map with
873 * the contents of the DMA map.
874 */
875 sg = ccb->ccb_sg;
876 ds = xfer->dm_segs;
877 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
878 sg->sg_addr = htobe32(ds->ds_addr);
879 sg->sg_len = htobe32(ds->ds_len);
880 }
881 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
882 sc->sc_dmamap->dm_segs[0].ds_addr +
883 offsetof(struct dpt_ccb, ccb_sg));
884 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
885 cp->cp_ctl0 |= CP_C0_SCATTER;
886 }
887
888 return (0);
889 }
890
891 /*
892 * Unmap a transfer.
893 */
894 static void
895 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
896 {
897
898 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
899 ccb->ccb_dmamap_xfer->dm_mapsize,
900 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
901 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
902 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
903 }
904
905 /*
906 * Adjust the size of each I/O before it passes to the SCSI layer.
907 */
908 static void
909 dpt_minphys(struct buf *bp)
910 {
911
912 if (bp->b_bcount > DPT_MAX_XFER)
913 bp->b_bcount = DPT_MAX_XFER;
914 minphys(bp);
915 }
916
917 /*
918 * Start a SCSI command.
919 */
920 static void
921 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
922 void *arg)
923 {
924 struct dpt_softc *sc;
925 struct scsipi_xfer *xs;
926 int flags;
927 struct scsipi_periph *periph;
928 struct dpt_ccb *ccb;
929 struct eata_cp *cp;
930
931 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
932
933 switch (req) {
934 case ADAPTER_REQ_RUN_XFER:
935 xs = arg;
936 periph = xs->xs_periph;
937 flags = xs->xs_control;
938
939 #ifdef DIAGNOSTIC
940 /* Cmds must be no more than 12 bytes for us. */
941 if (xs->cmdlen > 12) {
942 xs->error = XS_DRIVER_STUFFUP;
943 scsipi_done(xs);
944 break;
945 }
946 #endif
947 /*
948 * XXX We can't reset devices just yet. Apparently some
949 * older firmware revisions don't even support it.
950 */
951 if ((flags & XS_CTL_RESET) != 0) {
952 xs->error = XS_DRIVER_STUFFUP;
953 scsipi_done(xs);
954 break;
955 }
956
957 /*
958 * Get a CCB and fill it.
959 */
960 ccb = dpt_ccb_alloc(sc);
961 ccb->ccb_xs = xs;
962 ccb->ccb_timeout = xs->timeout;
963
964 cp = &ccb->ccb_eata_cp;
965 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
966 cp->cp_ccbid = ccb->ccb_id;
967 cp->cp_senselen = sizeof(ccb->ccb_sense);
968 cp->cp_stataddr = htobe32(sc->sc_stppa);
969 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
970 cp->cp_ctl1 = 0;
971 cp->cp_ctl2 = 0;
972 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
973 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
974 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
975 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
976
977 if ((flags & XS_CTL_DATA_IN) != 0)
978 cp->cp_ctl0 |= CP_C0_DATA_IN;
979 if ((flags & XS_CTL_DATA_OUT) != 0)
980 cp->cp_ctl0 |= CP_C0_DATA_OUT;
981 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
982 cp->cp_ctl0 |= CP_C0_INTERPRET;
983
984 /* Synchronous xfers musn't write-back through the cache. */
985 if (xs->bp != NULL)
986 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
987 cp->cp_ctl2 |= CP_C2_NO_CACHE;
988
989 cp->cp_senseaddr =
990 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
991 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
992
993 if (xs->datalen != 0) {
994 if (dpt_ccb_map(sc, ccb))
995 break;
996 } else {
997 cp->cp_dataaddr = 0;
998 cp->cp_datalen = 0;
999 }
1000
1001 /* Sync up CCB and status packet. */
1002 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1003 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1004 BUS_DMASYNC_PREWRITE);
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1006 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1007
1008 /*
1009 * Start the command.
1010 */
1011 if ((xs->xs_control & XS_CTL_POLL) != 0)
1012 ccb->ccb_flg |= CCB_PRIVATE;
1013
1014 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1015 aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n");
1016 xs->error = XS_DRIVER_STUFFUP;
1017 if (xs->datalen != 0)
1018 dpt_ccb_unmap(sc, ccb);
1019 dpt_ccb_free(sc, ccb);
1020 break;
1021 }
1022
1023 if ((xs->xs_control & XS_CTL_POLL) == 0)
1024 break;
1025
1026 if (dpt_ccb_poll(sc, ccb)) {
1027 dpt_ccb_abort(sc, ccb);
1028 /* Wait for abort to complete... */
1029 if (dpt_ccb_poll(sc, ccb))
1030 dpt_ccb_abort(sc, ccb);
1031 }
1032
1033 dpt_ccb_done(sc, ccb);
1034 break;
1035
1036 case ADAPTER_REQ_GROW_RESOURCES:
1037 /*
1038 * Not supported, since we allocate the maximum number of
1039 * CCBs up front.
1040 */
1041 break;
1042
1043 case ADAPTER_REQ_SET_XFER_MODE:
1044 /*
1045 * This will be handled by the HBA itself, and we can't
1046 * modify that (ditto for tagged queueing).
1047 */
1048 break;
1049 }
1050 }
1051
1052 /*
1053 * Get inquiry data from the adapter.
1054 */
1055 static void
1056 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1057 {
1058 struct dpt_ccb *ccb;
1059 struct eata_cp *cp;
1060
1061 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1062
1063 /* Get a CCB and mark as private */
1064 ccb = dpt_ccb_alloc(sc);
1065 ccb->ccb_flg |= CCB_PRIVATE;
1066 ccb->ccb_timeout = 200;
1067
1068 /* Put all the arguments into the CCB. */
1069 cp = &ccb->ccb_eata_cp;
1070 cp->cp_ccbid = ccb->ccb_id;
1071 cp->cp_senselen = sizeof(ccb->ccb_sense);
1072 cp->cp_senseaddr = 0;
1073 cp->cp_stataddr = htobe32(sc->sc_stppa);
1074 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1075 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1076 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1077 cp->cp_ctl1 = 0;
1078 cp->cp_ctl2 = 0;
1079 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1080 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1081
1082 /* Put together the SCSI inquiry command. */
1083 memset(&cp->cp_cdb_cmd, 0, 12);
1084 cp->cp_cdb_cmd = INQUIRY;
1085 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1086
1087 /* Sync up CCB, status packet and scratch area. */
1088 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1089 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1090 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1091 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1092 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1093 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1094
1095 /* Start the command and poll on completion. */
1096 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1097 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
1098
1099 if (dpt_ccb_poll(sc, ccb))
1100 panic("%s: inquiry timed out", device_xname(&sc->sc_dv));
1101
1102 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1103 ccb->ccb_scsi_status != SCSI_OK)
1104 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1105 device_xname(&sc->sc_dv), ccb->ccb_hba_status,
1106 ccb->ccb_scsi_status);
1107
1108 /* Sync up the DMA map and free CCB, returning. */
1109 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1110 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1111 dpt_ccb_free(sc, ccb);
1112 }
1113
1114 int
1115 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
1116 {
1117
1118 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1119 return (ENXIO);
1120
1121 return (0);
1122 }
1123
1124 int
1125 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1126 {
1127 struct dpt_softc *sc;
1128 int rv;
1129
1130 sc = device_lookup(&dpt_cd, minor(dev));
1131
1132 switch (cmd & 0xffff) {
1133 case DPT_SIGNATURE:
1134 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1135 break;
1136
1137 case DPT_CTRLINFO:
1138 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1139 break;
1140
1141 case DPT_SYSINFO:
1142 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1143 break;
1144
1145 case DPT_BLINKLED:
1146 /*
1147 * XXX Don't know how to get this from EATA boards. I think
1148 * it involves waiting for a "DPT" sequence from HA_ERROR
1149 * and then reading one of the HA_ICMD registers.
1150 */
1151 *(int *)data = 0;
1152 break;
1153
1154 case DPT_EATAUSRCMD:
1155 rv = kauth_authorize_device_passthru(l->l_cred, dev,
1156 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1157 if (rv)
1158 return (rv);
1159
1160 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1161 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1162 device_xname(&sc->sc_dv), IOCPARM_LEN(cmd),
1163 (unsigned long int)sizeof(struct eata_ucp)));
1164 return (EINVAL);
1165 }
1166
1167 if (sc->sc_uactive++)
1168 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
1169
1170 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1171
1172 sc->sc_uactive--;
1173 wakeup_one(&sc->sc_uactive);
1174 return (rv);
1175
1176 default:
1177 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd));
1178 return (ENOTTY);
1179 }
1180
1181 return (0);
1182 }
1183
1184 void
1185 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1186 {
1187
1188 memset(info, 0, sizeof(*info));
1189 info->id = sc->sc_hbaid[0];
1190 info->vect = sc->sc_isairq;
1191 info->base = sc->sc_isaport;
1192 info->qdepth = sc->sc_nccbs;
1193 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1194 info->heads = 16;
1195 info->sectors = 63;
1196 info->do_drive32 = 1;
1197 info->primary = 1;
1198 info->cpLength = sizeof(struct eata_cp);
1199 info->spLength = sizeof(struct eata_sp);
1200 info->drqNum = sc->sc_isadrq;
1201 }
1202
1203 void
1204 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1205 {
1206 #ifdef i386
1207 int i, j;
1208 #endif
1209
1210 memset(info, 0, sizeof(*info));
1211
1212 #ifdef i386
1213 outb (0x70, 0x12);
1214 i = inb(0x71);
1215 j = i >> 4;
1216 if (i == 0x0f) {
1217 outb (0x70, 0x19);
1218 j = inb (0x71);
1219 }
1220 info->drive0CMOS = j;
1221
1222 j = i & 0x0f;
1223 if (i == 0x0f) {
1224 outb (0x70, 0x1a);
1225 j = inb (0x71);
1226 }
1227 info->drive1CMOS = j;
1228 info->processorFamily = dpt_sig.dsProcessorFamily;
1229
1230 /*
1231 * Get the conventional memory size from CMOS.
1232 */
1233 outb(0x70, 0x16);
1234 j = inb(0x71);
1235 j <<= 8;
1236 outb(0x70, 0x15);
1237 j |= inb(0x71);
1238 info->conventionalMemSize = j;
1239
1240 /*
1241 * Get the extended memory size from CMOS.
1242 */
1243 outb(0x70, 0x31);
1244 j = inb(0x71);
1245 j <<= 8;
1246 outb(0x70, 0x30);
1247 j |= inb(0x71);
1248 info->extendedMemSize = j;
1249
1250 switch (cpu_class) {
1251 case CPUCLASS_386:
1252 info->processorType = PROC_386;
1253 break;
1254 case CPUCLASS_486:
1255 info->processorType = PROC_486;
1256 break;
1257 case CPUCLASS_586:
1258 info->processorType = PROC_PENTIUM;
1259 break;
1260 case CPUCLASS_686:
1261 default:
1262 info->processorType = PROC_SEXIUM;
1263 break;
1264 }
1265
1266 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1267 SI_MemorySizeValid | SI_NO_SmartROM;
1268 #else
1269 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1270 #endif
1271
1272 info->busType = sc->sc_bustype;
1273 }
1274
1275 int
1276 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1277 {
1278 struct dpt_ccb *ccb;
1279 struct eata_sp sp;
1280 struct eata_cp *cp;
1281 struct eata_sg *sg;
1282 bus_dmamap_t xfer = 0; /* XXX: gcc */
1283 bus_dma_segment_t *ds;
1284 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1285
1286 /*
1287 * Get a CCB and fill.
1288 */
1289 ccb = dpt_ccb_alloc(sc);
1290 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1291 ccb->ccb_timeout = 0;
1292 ccb->ccb_savesp = &sp;
1293
1294 cp = &ccb->ccb_eata_cp;
1295 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1296 uslen = cp->cp_senselen;
1297 cp->cp_ccbid = ccb->ccb_id;
1298 cp->cp_senselen = sizeof(ccb->ccb_sense);
1299 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1300 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1301 cp->cp_stataddr = htobe32(sc->sc_stppa);
1302
1303 /*
1304 * Map data transfers.
1305 */
1306 if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1307 xfer = ccb->ccb_dmamap_xfer;
1308 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1309
1310 if (ucp->ucp_datalen > DPT_MAX_XFER) {
1311 DPRINTF(("%s: xfer too big\n", device_xname(&sc->sc_dv)));
1312 dpt_ccb_free(sc, ccb);
1313 return (EFBIG);
1314 }
1315 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1316 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1317 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1318 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1319 if (rv != 0) {
1320 DPRINTF(("%s: map failed; %d\n", device_xname(&sc->sc_dv),
1321 rv));
1322 dpt_ccb_free(sc, ccb);
1323 return (rv);
1324 }
1325
1326 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1327 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1328
1329 sg = ccb->ccb_sg;
1330 ds = xfer->dm_segs;
1331 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1332 sg->sg_addr = htobe32(ds->ds_addr);
1333 sg->sg_len = htobe32(ds->ds_len);
1334 }
1335 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1336 sc->sc_dmamap->dm_segs[0].ds_addr +
1337 offsetof(struct dpt_ccb, ccb_sg));
1338 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1339 cp->cp_ctl0 |= CP_C0_SCATTER;
1340 } else {
1341 cp->cp_dataaddr = 0;
1342 cp->cp_datalen = 0;
1343 }
1344
1345 /*
1346 * Start the command and sleep on completion.
1347 */
1348 uvm_lwp_hold(curlwp);
1349 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1350 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1351 s = splbio();
1352 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1353 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1354 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1355 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
1356 tsleep(ccb, PWAIT, "dptucmd", 0);
1357 splx(s);
1358 uvm_lwp_rele(curlwp);
1359
1360 /*
1361 * Sync up the DMA map and copy out results.
1362 */
1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1364 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1365
1366 if (cp->cp_datalen != 0) {
1367 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1368 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1369 bus_dmamap_unload(sc->sc_dmat, xfer);
1370 }
1371
1372 if (ucp->ucp_stataddr != NULL) {
1373 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1374 if (rv != 0) {
1375 DPRINTF(("%s: sp copyout() failed\n",
1376 device_xname(&sc->sc_dv)));
1377 }
1378 }
1379 if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1380 i = min(uslen, sizeof(ccb->ccb_sense));
1381 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1382 if (rv != 0) {
1383 DPRINTF(("%s: sense copyout() failed\n",
1384 device_xname(&sc->sc_dv)));
1385 }
1386 }
1387
1388 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1389 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1390 dpt_ccb_free(sc, ccb);
1391 return (rv);
1392 }
1393