dpt.c revision 1.52 1 /* $NetBSD: dpt.c,v 1.52 2006/10/12 01:31:00 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
42 * Copyright (c) 2000 Adaptec Corporation
43 * All rights reserved.
44 *
45 * TERMS AND CONDITIONS OF USE
46 *
47 * Redistribution and use in source form, with or without modification, are
48 * permitted provided that redistributions of source code must retain the
49 * above copyright notice, this list of conditions and the following disclaimer.
50 *
51 * This software is provided `as is' by Adaptec and any express or implied
52 * warranties, including, but not limited to, the implied warranties of
53 * merchantability and fitness for a particular purpose, are disclaimed. In no
54 * event shall Adaptec be liable for any direct, indirect, incidental, special,
55 * exemplary or consequential damages (including, but not limited to,
56 * procurement of substitute goods or services; loss of use, data, or profits;
57 * or business interruptions) however caused and on any theory of liability,
58 * whether in contract, strict liability, or tort (including negligence or
59 * otherwise) arising in any way out of the use of this driver software, even
60 * if advised of the possibility of such damage.
61 */
62
63 /*
64 * Portions of this code fall under the following copyright:
65 *
66 * Originally written by Julian Elischer (julian (at) tfs.com)
67 * for TRW Financial Systems for use under the MACH(2.5) operating system.
68 *
69 * TRW Financial Systems, in accordance with their agreement with Carnegie
70 * Mellon University, makes this software available to CMU to distribute
71 * or use in any manner that they see fit as long as this message is kept with
72 * the software. For this reason TFS also grants any other persons or
73 * organisations permission to use or modify this software.
74 *
75 * TFS supplies this software to be publicly redistributed
76 * on the understanding that TFS is not responsible for the correct
77 * functioning of this software in any circumstances.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.52 2006/10/12 01:31:00 christos Exp $");
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90
91 #include <uvm/uvm_extern.h>
92
93 #include <machine/bus.h>
94 #ifdef i386
95 #include <machine/pio.h>
96 #endif
97
98 #include <dev/scsipi/scsi_all.h>
99 #include <dev/scsipi/scsipi_all.h>
100 #include <dev/scsipi/scsiconf.h>
101
102 #include <dev/ic/dptreg.h>
103 #include <dev/ic/dptvar.h>
104
105 #include <dev/i2o/dptivar.h>
106
107 #ifdef DEBUG
108 #define DPRINTF(x) printf x
109 #else
110 #define DPRINTF(x)
111 #endif
112
113 #define dpt_inb(x, o) \
114 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
115 #define dpt_outb(x, o, d) \
116 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
117
118 static const char * const dpt_cname[] = {
119 "3334", "SmartRAID IV",
120 "3332", "SmartRAID IV",
121 "2144", "SmartCache IV",
122 "2044", "SmartCache IV",
123 "2142", "SmartCache IV",
124 "2042", "SmartCache IV",
125 "2041", "SmartCache IV",
126 "3224", "SmartRAID III",
127 "3222", "SmartRAID III",
128 "3021", "SmartRAID III",
129 "2124", "SmartCache III",
130 "2024", "SmartCache III",
131 "2122", "SmartCache III",
132 "2022", "SmartCache III",
133 "2021", "SmartCache III",
134 "2012", "SmartCache Plus",
135 "2011", "SmartCache Plus",
136 NULL, "<unknown>",
137 };
138
139 static void *dpt_sdh;
140
141 dev_type_open(dptopen);
142 dev_type_ioctl(dptioctl);
143
144 const struct cdevsw dpt_cdevsw = {
145 dptopen, nullclose, noread, nowrite, dptioctl,
146 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
147 };
148
149 extern struct cfdriver dpt_cd;
150
151 static struct dpt_sig dpt_sig = {
152 { 'd', 'P', 't', 'S', 'i', 'G'},
153 SIG_VERSION,
154 #if defined(i386)
155 PROC_INTEL,
156 #elif defined(powerpc)
157 PROC_POWERPC,
158 #elif defined(alpha)
159 PROC_ALPHA,
160 #elif defined(__mips__)
161 PROC_MIPS,
162 #elif defined(sparc64)
163 PROC_ULTRASPARC,
164 #else
165 0xff,
166 #endif
167 #if defined(i386)
168 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
169 #else
170 0,
171 #endif
172 FT_HBADRVR,
173 0,
174 OEM_DPT,
175 OS_FREE_BSD, /* XXX */
176 CAP_ABOVE16MB,
177 DEV_ALL,
178 ADF_ALL_EATA,
179 0,
180 0,
181 DPT_VERSION,
182 DPT_REVISION,
183 DPT_SUBREVISION,
184 DPT_MONTH,
185 DPT_DAY,
186 DPT_YEAR,
187 "" /* Will be filled later */
188 };
189
190 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
191 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
192 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
193 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
194 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
195 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
196 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
197 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
198 static void dpt_minphys(struct buf *);
199 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
200 struct lwp *);
201 static void dpt_scsipi_request(struct scsipi_channel *,
202 scsipi_adapter_req_t, void *);
203 static void dpt_shutdown(void *);
204 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
205 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
206
207 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
208 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
209
210 static inline struct dpt_ccb *
211 dpt_ccb_alloc(struct dpt_softc *sc)
212 {
213 struct dpt_ccb *ccb;
214 int s;
215
216 s = splbio();
217 ccb = SLIST_FIRST(&sc->sc_ccb_free);
218 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
219 splx(s);
220
221 return (ccb);
222 }
223
224 static inline void
225 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
226 {
227 int s;
228
229 ccb->ccb_flg = 0;
230 ccb->ccb_savesp = NULL;
231 s = splbio();
232 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
233 splx(s);
234 }
235
236 /*
237 * Handle an interrupt from the HBA.
238 */
239 int
240 dpt_intr(void *cookie)
241 {
242 struct dpt_softc *sc;
243 struct dpt_ccb *ccb;
244 struct eata_sp *sp;
245 volatile int junk;
246 int forus;
247
248 sc = cookie;
249 sp = sc->sc_stp;
250 forus = 0;
251
252 for (;;) {
253 /*
254 * HBA might have interrupted while we were dealing with the
255 * last completed command, since we ACK before we deal; keep
256 * polling.
257 */
258 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
259 break;
260 forus = 1;
261
262 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
263 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
264
265 /* Might have looped before HBA can reset HBA_AUX_INTR. */
266 if (sp->sp_ccbid == -1) {
267 DELAY(50);
268
269 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
270 return (0);
271
272 printf("%s: no status\n", sc->sc_dv.dv_xname);
273
274 /* Re-sync DMA map */
275 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
276 sc->sc_stpoff, sizeof(struct eata_sp),
277 BUS_DMASYNC_POSTREAD);
278 }
279
280 /* Make sure CCB ID from status packet is realistic. */
281 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
282 printf("%s: bogus status (returned CCB id %d)\n",
283 sc->sc_dv.dv_xname, sp->sp_ccbid);
284
285 /* Ack the interrupt */
286 sp->sp_ccbid = -1;
287 junk = dpt_inb(sc, HA_STATUS);
288 continue;
289 }
290
291 /* Sync up DMA map and cache cmd status. */
292 ccb = sc->sc_ccbs + sp->sp_ccbid;
293
294 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
295 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
296
297 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
298 ccb->ccb_scsi_status = sp->sp_scsi_status;
299 if (ccb->ccb_savesp != NULL)
300 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
301
302 /*
303 * Ack the interrupt and process the CCB. If this
304 * is a private CCB it's up to dpt_ccb_poll() to
305 * notice.
306 */
307 sp->sp_ccbid = -1;
308 ccb->ccb_flg |= CCB_INTR;
309 junk = dpt_inb(sc, HA_STATUS);
310 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
311 dpt_ccb_done(sc, ccb);
312 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
313 wakeup(ccb);
314 }
315
316 return (forus);
317 }
318
319 /*
320 * Initialize and attach the HBA. This is the entry point from bus
321 * specific probe-and-attach code.
322 */
323 void
324 dpt_init(struct dpt_softc *sc, const char *intrstr)
325 {
326 struct scsipi_adapter *adapt;
327 struct scsipi_channel *chan;
328 struct eata_inquiry_data *ei;
329 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
330 bus_dma_segment_t seg;
331 struct eata_cfg *ec;
332 struct dpt_ccb *ccb;
333 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
334 char vendor[__arraycount(ei->ei_vendor) + 1];
335
336 ec = &sc->sc_ec;
337 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
338 "NetBSD %s DPT driver", osrelease);
339
340 /*
341 * Allocate the CCB/status packet/scratch DMA map and load.
342 */
343 sc->sc_nccbs =
344 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
345 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
346 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
347 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
348 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
349
350 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
351 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
352 aprint_error("%s: unable to allocate CCBs, rv = %d\n",
353 sc->sc_dv.dv_xname, rv);
354 return;
355 }
356
357 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
358 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
359 aprint_error("%s: unable to map CCBs, rv = %d\n",
360 sc->sc_dv.dv_xname, rv);
361 return;
362 }
363
364 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
365 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
366 aprint_error("%s: unable to create CCB DMA map, rv = %d\n",
367 sc->sc_dv.dv_xname, rv);
368 return;
369 }
370
371 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
372 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
373 aprint_error("%s: unable to load CCB DMA map, rv = %d\n",
374 sc->sc_dv.dv_xname, rv);
375 return;
376 }
377
378 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
379 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
380 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
381 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
382 sc->sc_stp->sp_ccbid = -1;
383
384 /*
385 * Create the CCBs.
386 */
387 SLIST_INIT(&sc->sc_ccb_free);
388 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
389
390 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
391 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
392 DPT_SG_SIZE, DPT_MAX_XFER, 0,
393 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
394 &ccb->ccb_dmamap_xfer);
395 if (rv) {
396 aprint_error("%s: can't create ccb dmamap (%d)\n",
397 sc->sc_dv.dv_xname, rv);
398 break;
399 }
400
401 ccb->ccb_id = i;
402 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
403 CCB_OFF(sc, ccb);
404 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
405 }
406
407 if (i == 0) {
408 aprint_error("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
409 return;
410 } else if (i != sc->sc_nccbs) {
411 aprint_error("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname,
412 i, sc->sc_nccbs);
413 sc->sc_nccbs = i;
414 }
415
416 /* Set shutdownhook before we start any device activity. */
417 if (dpt_sdh == NULL)
418 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
419
420 /* Get the inquiry data from the HBA. */
421 dpt_hba_inquire(sc, &ei);
422
423 /*
424 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
425 * dpt0: interrupting at irq 10
426 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
427 */
428 for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor);
429 i++)
430 vendor[i] = ei->ei_vendor[i];
431 vendor[i] = '\0';
432
433 for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model);
434 i++)
435 model[i] = ei->ei_model[i];
436 for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix);
437 i++, j++)
438 model[i] = ei->ei_suffix[j];
439 model[i] = '\0';
440
441 /* Find the marketing name for the board. */
442 for (i = 0; dpt_cname[i] != NULL; i += 2)
443 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
444 break;
445
446 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
447
448 if (intrstr != NULL)
449 aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
450 intrstr);
451
452 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
453 EC_F3_MAX_CHANNEL_SHIFT;
454 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
455 EC_F3_MAX_TARGET_SHIFT;
456
457 aprint_normal("%s: %d queued commands, %d channel(s), adapter on ID(s)",
458 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
459
460 for (i = 0; i <= maxchannel; i++) {
461 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
462 aprint_normal(" %d", sc->sc_hbaid[i]);
463 }
464 aprint_normal("\n");
465
466 /*
467 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
468 * this for each bus?
469 */
470 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
471 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
472
473 /* Fill in the scsipi_adapter. */
474 adapt = &sc->sc_adapt;
475 memset(adapt, 0, sizeof(*adapt));
476 adapt->adapt_dev = &sc->sc_dv;
477 adapt->adapt_nchannels = maxchannel + 1;
478 adapt->adapt_openings = sc->sc_nccbs - 1;
479 adapt->adapt_max_periph = sc->sc_nccbs - 1;
480 adapt->adapt_request = dpt_scsipi_request;
481 adapt->adapt_minphys = dpt_minphys;
482
483 for (i = 0; i <= maxchannel; i++) {
484 /* Fill in the scsipi_channel. */
485 chan = &sc->sc_chans[i];
486 memset(chan, 0, sizeof(*chan));
487 chan->chan_adapter = adapt;
488 chan->chan_bustype = &scsi_bustype;
489 chan->chan_channel = i;
490 chan->chan_ntargets = maxtarget + 1;
491 chan->chan_nluns = ec->ec_maxlun + 1;
492 chan->chan_id = sc->sc_hbaid[i];
493 config_found(&sc->sc_dv, chan, scsiprint);
494 }
495 }
496
497 /*
498 * Read the EATA configuration from the HBA and perform some sanity checks.
499 */
500 int
501 dpt_readcfg(struct dpt_softc *sc)
502 {
503 struct eata_cfg *ec;
504 int i, j, stat;
505 u_int16_t *p;
506
507 ec = &sc->sc_ec;
508
509 /* Older firmware may puke if we talk to it too soon after reset. */
510 dpt_outb(sc, HA_COMMAND, CP_RESET);
511 DELAY(750000);
512
513 for (i = 1000; i; i--) {
514 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
515 break;
516 DELAY(2000);
517 }
518
519 if (i == 0) {
520 printf("%s: HBA not ready after reset (hba status:%02x)\n",
521 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
522 return (-1);
523 }
524
525 while((((stat = dpt_inb(sc, HA_STATUS))
526 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
527 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
528 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
529 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
530 /* RAID drives still spinning up? */
531 if(dpt_inb(sc, HA_ERROR) != 'D' ||
532 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
533 dpt_inb(sc, HA_ERROR + 2) != 'T') {
534 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
535 return (-1);
536 }
537 }
538
539 /*
540 * Issue the read-config command and wait for the data to appear.
541 *
542 * Apparently certian firmware revisions won't DMA later on if we
543 * request the config data using PIO, but it makes it a lot easier
544 * as no DMA setup is required.
545 */
546 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
547 memset(ec, 0, sizeof(*ec));
548 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
549 sizeof(ec->ec_cfglen)) >> 1;
550 p = (u_int16_t *)ec;
551
552 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
553 printf("%s: cfg data didn't appear (hba status:%02x)\n",
554 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
555 return (-1);
556 }
557
558 /* Begin reading. */
559 while (i--)
560 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
561
562 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
563 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
564 - sizeof(ec->ec_cfglen)))
565 i = sizeof(struct eata_cfg)
566 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
567 - sizeof(ec->ec_cfglen);
568
569 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
570 sizeof(ec->ec_cfglen);
571 i >>= 1;
572
573 while (i--)
574 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
575
576 /* Flush until we have read 512 bytes. */
577 i = (512 - j + 1) >> 1;
578 while (i--)
579 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
580
581 /* Defaults for older firmware... */
582 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
583 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
584
585 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
586 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
587 return (-1);
588 }
589
590 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
591 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
592 return (-1);
593 }
594
595 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
596 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
597 return (-1);
598 }
599
600 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
601 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
602 return (-1);
603 }
604
605 return (0);
606 }
607
608 /*
609 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
610 * data from it's cache and mark array groups as clean.
611 *
612 * XXX This doesn't always work (i.e., the HBA may still be flushing after
613 * we tell root that it's safe to power off).
614 */
615 static void
616 dpt_shutdown(void *cookie __unused)
617 {
618 extern struct cfdriver dpt_cd;
619 struct dpt_softc *sc;
620 int i;
621
622 printf("shutting down dpt devices...");
623
624 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
625 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
626 continue;
627 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
628 }
629
630 delay(10000*1000);
631 printf(" done\n");
632 }
633
634 /*
635 * Send an EATA command to the HBA.
636 */
637 static int
638 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
639 {
640 u_int32_t pa;
641 int i, s;
642
643 s = splbio();
644
645 for (i = 20000; i != 0; i--) {
646 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
647 break;
648 DELAY(50);
649 }
650 if (i == 0) {
651 splx(s);
652 return (-1);
653 }
654
655 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
656 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
657 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
658 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
659 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
660
661 if (eatacmd == CP_IMMEDIATE)
662 dpt_outb(sc, HA_ICMD, icmd);
663
664 dpt_outb(sc, HA_COMMAND, eatacmd);
665
666 splx(s);
667 return (0);
668 }
669
670 /*
671 * Wait for the HBA status register to reach a specific state.
672 */
673 static int
674 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
675 {
676
677 for (ms *= 10; ms != 0; ms--) {
678 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
679 return (0);
680 DELAY(100);
681 }
682
683 return (-1);
684 }
685
686 /*
687 * Spin waiting for a command to finish. The timeout value from the CCB is
688 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
689 * recycled before we get a look at it.
690 */
691 static int
692 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
693 {
694 int i, s;
695
696 #ifdef DEBUG
697 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
698 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
699 #endif
700
701 s = splbio();
702
703 if ((ccb->ccb_flg & CCB_INTR) != 0) {
704 splx(s);
705 return (0);
706 }
707
708 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
709 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
710 dpt_intr(sc);
711 if ((ccb->ccb_flg & CCB_INTR) != 0)
712 break;
713 DELAY(50);
714 }
715
716 splx(s);
717 return (i == 0);
718 }
719
720 /*
721 * We have a command which has been processed by the HBA, so now we look to
722 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
723 * by dpt_intr().
724 */
725 static void
726 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
727 {
728 struct scsipi_xfer *xs;
729
730 xs = ccb->ccb_xs;
731
732 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
733
734 /*
735 * If we were a data transfer, unload the map that described the
736 * data buffer.
737 */
738 if (xs->datalen != 0)
739 dpt_ccb_unmap(sc, ccb);
740
741 if (xs->error == XS_NOERROR) {
742 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
743 switch (ccb->ccb_hba_status) {
744 case SP_HBA_ERROR_SEL_TO:
745 xs->error = XS_SELTIMEOUT;
746 break;
747 case SP_HBA_ERROR_RESET:
748 xs->error = XS_RESET;
749 break;
750 default:
751 printf("%s: HBA status %x\n",
752 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
753 xs->error = XS_DRIVER_STUFFUP;
754 break;
755 }
756 } else if (ccb->ccb_scsi_status != SCSI_OK) {
757 switch (ccb->ccb_scsi_status) {
758 case SCSI_CHECK:
759 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
760 sizeof(xs->sense.scsi_sense));
761 xs->error = XS_SENSE;
762 break;
763 case SCSI_BUSY:
764 case SCSI_QUEUE_FULL:
765 xs->error = XS_BUSY;
766 break;
767 default:
768 scsipi_printaddr(xs->xs_periph);
769 printf("SCSI status %x\n",
770 ccb->ccb_scsi_status);
771 xs->error = XS_DRIVER_STUFFUP;
772 break;
773 }
774 } else
775 xs->resid = 0;
776
777 xs->status = ccb->ccb_scsi_status;
778 }
779
780 /* Free up the CCB and mark the command as done. */
781 dpt_ccb_free(sc, ccb);
782 scsipi_done(xs);
783 }
784
785 /*
786 * Specified CCB has timed out, abort it.
787 */
788 static void
789 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
790 {
791 struct scsipi_periph *periph;
792 struct scsipi_xfer *xs;
793 int s;
794
795 xs = ccb->ccb_xs;
796 periph = xs->xs_periph;
797
798 scsipi_printaddr(periph);
799 printf("timed out (status:%02x aux status:%02x)",
800 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
801
802 s = splbio();
803
804 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
805 /* Abort timed out, reset the HBA */
806 printf(" AGAIN, resetting HBA\n");
807 dpt_outb(sc, HA_COMMAND, CP_RESET);
808 DELAY(750000);
809 } else {
810 /* Abort the operation that has timed out */
811 printf("\n");
812 xs->error = XS_TIMEOUT;
813 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
814 ccb->ccb_flg |= CCB_ABORT;
815 /* Start the abort */
816 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
817 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
818 }
819
820 splx(s);
821 }
822
823 /*
824 * Map a data transfer.
825 */
826 static int
827 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
828 {
829 struct scsipi_xfer *xs;
830 bus_dmamap_t xfer;
831 bus_dma_segment_t *ds;
832 struct eata_sg *sg;
833 struct eata_cp *cp;
834 int rv, i;
835
836 xs = ccb->ccb_xs;
837 xfer = ccb->ccb_dmamap_xfer;
838 cp = &ccb->ccb_eata_cp;
839
840 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
841 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
842 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
843 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
844
845 switch (rv) {
846 case 0:
847 break;
848 case ENOMEM:
849 case EAGAIN:
850 xs->error = XS_RESOURCE_SHORTAGE;
851 break;
852 default:
853 xs->error = XS_DRIVER_STUFFUP;
854 printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
855 break;
856 }
857
858 if (xs->error != XS_NOERROR) {
859 dpt_ccb_free(sc, ccb);
860 scsipi_done(xs);
861 return (-1);
862 }
863
864 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
865 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
866 BUS_DMASYNC_PREWRITE);
867
868 /* Don't bother using scatter/gather for just 1 seg */
869 if (xfer->dm_nsegs == 1) {
870 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
871 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
872 } else {
873 /*
874 * Load the hardware scatter/gather map with
875 * the contents of the DMA map.
876 */
877 sg = ccb->ccb_sg;
878 ds = xfer->dm_segs;
879 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
880 sg->sg_addr = htobe32(ds->ds_addr);
881 sg->sg_len = htobe32(ds->ds_len);
882 }
883 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
884 sc->sc_dmamap->dm_segs[0].ds_addr +
885 offsetof(struct dpt_ccb, ccb_sg));
886 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
887 cp->cp_ctl0 |= CP_C0_SCATTER;
888 }
889
890 return (0);
891 }
892
893 /*
894 * Unmap a transfer.
895 */
896 static void
897 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
898 {
899
900 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
901 ccb->ccb_dmamap_xfer->dm_mapsize,
902 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
903 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
904 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
905 }
906
907 /*
908 * Adjust the size of each I/O before it passes to the SCSI layer.
909 */
910 static void
911 dpt_minphys(struct buf *bp)
912 {
913
914 if (bp->b_bcount > DPT_MAX_XFER)
915 bp->b_bcount = DPT_MAX_XFER;
916 minphys(bp);
917 }
918
919 /*
920 * Start a SCSI command.
921 */
922 static void
923 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
924 void *arg)
925 {
926 struct dpt_softc *sc;
927 struct scsipi_xfer *xs;
928 int flags;
929 struct scsipi_periph *periph;
930 struct dpt_ccb *ccb;
931 struct eata_cp *cp;
932
933 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
934
935 switch (req) {
936 case ADAPTER_REQ_RUN_XFER:
937 xs = arg;
938 periph = xs->xs_periph;
939 flags = xs->xs_control;
940
941 #ifdef DIAGNOSTIC
942 /* Cmds must be no more than 12 bytes for us. */
943 if (xs->cmdlen > 12) {
944 xs->error = XS_DRIVER_STUFFUP;
945 scsipi_done(xs);
946 break;
947 }
948 #endif
949 /*
950 * XXX We can't reset devices just yet. Apparently some
951 * older firmware revisions don't even support it.
952 */
953 if ((flags & XS_CTL_RESET) != 0) {
954 xs->error = XS_DRIVER_STUFFUP;
955 scsipi_done(xs);
956 break;
957 }
958
959 /*
960 * Get a CCB and fill it.
961 */
962 ccb = dpt_ccb_alloc(sc);
963 ccb->ccb_xs = xs;
964 ccb->ccb_timeout = xs->timeout;
965
966 cp = &ccb->ccb_eata_cp;
967 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
968 cp->cp_ccbid = ccb->ccb_id;
969 cp->cp_senselen = sizeof(ccb->ccb_sense);
970 cp->cp_stataddr = htobe32(sc->sc_stppa);
971 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
972 cp->cp_ctl1 = 0;
973 cp->cp_ctl2 = 0;
974 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
975 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
976 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
977 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
978
979 if ((flags & XS_CTL_DATA_IN) != 0)
980 cp->cp_ctl0 |= CP_C0_DATA_IN;
981 if ((flags & XS_CTL_DATA_OUT) != 0)
982 cp->cp_ctl0 |= CP_C0_DATA_OUT;
983 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
984 cp->cp_ctl0 |= CP_C0_INTERPRET;
985
986 /* Synchronous xfers musn't write-back through the cache. */
987 if (xs->bp != NULL)
988 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
989 cp->cp_ctl2 |= CP_C2_NO_CACHE;
990
991 cp->cp_senseaddr =
992 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
993 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
994
995 if (xs->datalen != 0) {
996 if (dpt_ccb_map(sc, ccb))
997 break;
998 } else {
999 cp->cp_dataaddr = 0;
1000 cp->cp_datalen = 0;
1001 }
1002
1003 /* Sync up CCB and status packet. */
1004 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1005 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1006 BUS_DMASYNC_PREWRITE);
1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1008 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1009
1010 /*
1011 * Start the command.
1012 */
1013 if ((xs->xs_control & XS_CTL_POLL) != 0)
1014 ccb->ccb_flg |= CCB_PRIVATE;
1015
1016 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1017 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1018 xs->error = XS_DRIVER_STUFFUP;
1019 if (xs->datalen != 0)
1020 dpt_ccb_unmap(sc, ccb);
1021 dpt_ccb_free(sc, ccb);
1022 break;
1023 }
1024
1025 if ((xs->xs_control & XS_CTL_POLL) == 0)
1026 break;
1027
1028 if (dpt_ccb_poll(sc, ccb)) {
1029 dpt_ccb_abort(sc, ccb);
1030 /* Wait for abort to complete... */
1031 if (dpt_ccb_poll(sc, ccb))
1032 dpt_ccb_abort(sc, ccb);
1033 }
1034
1035 dpt_ccb_done(sc, ccb);
1036 break;
1037
1038 case ADAPTER_REQ_GROW_RESOURCES:
1039 /*
1040 * Not supported, since we allocate the maximum number of
1041 * CCBs up front.
1042 */
1043 break;
1044
1045 case ADAPTER_REQ_SET_XFER_MODE:
1046 /*
1047 * This will be handled by the HBA itself, and we can't
1048 * modify that (ditto for tagged queueing).
1049 */
1050 break;
1051 }
1052 }
1053
1054 /*
1055 * Get inquiry data from the adapter.
1056 */
1057 static void
1058 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1059 {
1060 struct dpt_ccb *ccb;
1061 struct eata_cp *cp;
1062
1063 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1064
1065 /* Get a CCB and mark as private */
1066 ccb = dpt_ccb_alloc(sc);
1067 ccb->ccb_flg |= CCB_PRIVATE;
1068 ccb->ccb_timeout = 200;
1069
1070 /* Put all the arguments into the CCB. */
1071 cp = &ccb->ccb_eata_cp;
1072 cp->cp_ccbid = ccb->ccb_id;
1073 cp->cp_senselen = sizeof(ccb->ccb_sense);
1074 cp->cp_senseaddr = 0;
1075 cp->cp_stataddr = htobe32(sc->sc_stppa);
1076 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1077 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1078 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1079 cp->cp_ctl1 = 0;
1080 cp->cp_ctl2 = 0;
1081 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1082 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1083
1084 /* Put together the SCSI inquiry command. */
1085 memset(&cp->cp_cdb_cmd, 0, 12);
1086 cp->cp_cdb_cmd = INQUIRY;
1087 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1088
1089 /* Sync up CCB, status packet and scratch area. */
1090 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1091 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1092 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1093 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1094 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1095 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1096
1097 /* Start the command and poll on completion. */
1098 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1099 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1100
1101 if (dpt_ccb_poll(sc, ccb))
1102 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1103
1104 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1105 ccb->ccb_scsi_status != SCSI_OK)
1106 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1107 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1108 ccb->ccb_scsi_status);
1109
1110 /* Sync up the DMA map and free CCB, returning. */
1111 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1112 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1113 dpt_ccb_free(sc, ccb);
1114 }
1115
1116 int
1117 dptopen(dev_t dev, int flag __unused, int mode __unused, struct lwp *l __unused)
1118 {
1119
1120 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1121 return (ENXIO);
1122
1123 return (0);
1124 }
1125
1126 int
1127 dptioctl(dev_t dev, u_long cmd, caddr_t data, int flag __unused, struct lwp *l)
1128 {
1129 struct dpt_softc *sc;
1130 int rv;
1131
1132 sc = device_lookup(&dpt_cd, minor(dev));
1133
1134 switch (cmd & 0xffff) {
1135 case DPT_SIGNATURE:
1136 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1137 break;
1138
1139 case DPT_CTRLINFO:
1140 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1141 break;
1142
1143 case DPT_SYSINFO:
1144 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1145 break;
1146
1147 case DPT_BLINKLED:
1148 /*
1149 * XXX Don't know how to get this from EATA boards. I think
1150 * it involves waiting for a "DPT" sequence from HA_ERROR
1151 * and then reading one of the HA_ICMD registers.
1152 */
1153 *(int *)data = 0;
1154 break;
1155
1156 case DPT_EATAUSRCMD:
1157 if (securelevel > 1)
1158 return (EPERM);
1159
1160 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1161 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1162 sc->sc_dv.dv_xname, IOCPARM_LEN(cmd),
1163 (unsigned long int)sizeof(struct eata_ucp)));
1164 return (EINVAL);
1165 }
1166
1167 if (sc->sc_uactive++)
1168 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
1169
1170 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1171
1172 sc->sc_uactive--;
1173 wakeup_one(&sc->sc_uactive);
1174 return (rv);
1175
1176 default:
1177 DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd));
1178 return (ENOTTY);
1179 }
1180
1181 return (0);
1182 }
1183
1184 void
1185 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1186 {
1187
1188 memset(info, 0, sizeof(*info));
1189 info->id = sc->sc_hbaid[0];
1190 info->vect = sc->sc_isairq;
1191 info->base = sc->sc_isaport;
1192 info->qdepth = sc->sc_nccbs;
1193 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1194 info->heads = 16;
1195 info->sectors = 63;
1196 info->do_drive32 = 1;
1197 info->primary = 1;
1198 info->cpLength = sizeof(struct eata_cp);
1199 info->spLength = sizeof(struct eata_sp);
1200 info->drqNum = sc->sc_isadrq;
1201 }
1202
1203 void
1204 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1205 {
1206 #ifdef i386
1207 int i, j;
1208 #endif
1209
1210 memset(info, 0, sizeof(*info));
1211
1212 #ifdef i386
1213 outb (0x70, 0x12);
1214 i = inb(0x71);
1215 j = i >> 4;
1216 if (i == 0x0f) {
1217 outb (0x70, 0x19);
1218 j = inb (0x71);
1219 }
1220 info->drive0CMOS = j;
1221
1222 j = i & 0x0f;
1223 if (i == 0x0f) {
1224 outb (0x70, 0x1a);
1225 j = inb (0x71);
1226 }
1227 info->drive1CMOS = j;
1228 info->processorFamily = dpt_sig.dsProcessorFamily;
1229
1230 /*
1231 * Get the conventional memory size from CMOS.
1232 */
1233 outb(0x70, 0x16);
1234 j = inb(0x71);
1235 j <<= 8;
1236 outb(0x70, 0x15);
1237 j |= inb(0x71);
1238 info->conventionalMemSize = j;
1239
1240 /*
1241 * Get the extended memory size from CMOS.
1242 */
1243 outb(0x70, 0x31);
1244 j = inb(0x71);
1245 j <<= 8;
1246 outb(0x70, 0x30);
1247 j |= inb(0x71);
1248 info->extendedMemSize = j;
1249
1250 switch (cpu_class) {
1251 case CPUCLASS_386:
1252 info->processorType = PROC_386;
1253 break;
1254 case CPUCLASS_486:
1255 info->processorType = PROC_486;
1256 break;
1257 case CPUCLASS_586:
1258 info->processorType = PROC_PENTIUM;
1259 break;
1260 case CPUCLASS_686:
1261 default:
1262 info->processorType = PROC_SEXIUM;
1263 break;
1264 }
1265
1266 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1267 SI_MemorySizeValid | SI_NO_SmartROM;
1268 #else
1269 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1270 #endif
1271
1272 info->busType = sc->sc_bustype;
1273 }
1274
1275 int
1276 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1277 {
1278 struct dpt_ccb *ccb;
1279 struct eata_sp sp;
1280 struct eata_cp *cp;
1281 struct eata_sg *sg;
1282 bus_dmamap_t xfer = 0; /* XXX: gcc */
1283 bus_dma_segment_t *ds;
1284 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1285
1286 /*
1287 * Get a CCB and fill.
1288 */
1289 ccb = dpt_ccb_alloc(sc);
1290 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1291 ccb->ccb_timeout = 0;
1292 ccb->ccb_savesp = &sp;
1293
1294 cp = &ccb->ccb_eata_cp;
1295 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1296 uslen = cp->cp_senselen;
1297 cp->cp_ccbid = ccb->ccb_id;
1298 cp->cp_senselen = sizeof(ccb->ccb_sense);
1299 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1300 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1301 cp->cp_stataddr = htobe32(sc->sc_stppa);
1302
1303 /*
1304 * Map data transfers.
1305 */
1306 if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1307 xfer = ccb->ccb_dmamap_xfer;
1308 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1309
1310 if (ucp->ucp_datalen > DPT_MAX_XFER) {
1311 DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname));
1312 dpt_ccb_free(sc, ccb);
1313 return (EFBIG);
1314 }
1315 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1316 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1317 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1318 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1319 if (rv != 0) {
1320 DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname,
1321 rv));
1322 dpt_ccb_free(sc, ccb);
1323 return (rv);
1324 }
1325
1326 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1327 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1328
1329 sg = ccb->ccb_sg;
1330 ds = xfer->dm_segs;
1331 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1332 sg->sg_addr = htobe32(ds->ds_addr);
1333 sg->sg_len = htobe32(ds->ds_len);
1334 }
1335 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1336 sc->sc_dmamap->dm_segs[0].ds_addr +
1337 offsetof(struct dpt_ccb, ccb_sg));
1338 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1339 cp->cp_ctl0 |= CP_C0_SCATTER;
1340 } else {
1341 cp->cp_dataaddr = 0;
1342 cp->cp_datalen = 0;
1343 }
1344
1345 /*
1346 * Start the command and sleep on completion.
1347 */
1348 PHOLD(curlwp); /* XXXJRT curlwp */
1349 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1350 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1351 s = splbio();
1352 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1353 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1354 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1355 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1356 tsleep(ccb, PWAIT, "dptucmd", 0);
1357 splx(s);
1358 PRELE(curlwp); /* XXXJRT curlwp */
1359
1360 /*
1361 * Sync up the DMA map and copy out results.
1362 */
1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1364 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1365
1366 if (cp->cp_datalen != 0) {
1367 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1368 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1369 bus_dmamap_unload(sc->sc_dmat, xfer);
1370 }
1371
1372 if (ucp->ucp_stataddr != NULL) {
1373 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1374 if (rv != 0) {
1375 DPRINTF(("%s: sp copyout() failed\n",
1376 sc->sc_dv.dv_xname));
1377 }
1378 }
1379 if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1380 i = min(uslen, sizeof(ccb->ccb_sense));
1381 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1382 if (rv != 0) {
1383 DPRINTF(("%s: sense copyout() failed\n",
1384 sc->sc_dv.dv_xname));
1385 }
1386 }
1387
1388 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1389 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1390 dpt_ccb_free(sc, ccb);
1391 return (rv);
1392 }
1393