xy.c revision 1.25.2.1 1 /* $NetBSD: xy.c,v 1.25.2.1 2000/07/22 21:03:56 pk Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1995 Charles D. Cranor
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 *
36 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r
37 *
38 * author: Chuck Cranor <chuck (at) ccrc.wustl.edu>
39 * started: 14-Sep-95
40 * references: [1] Xylogics Model 753 User's Manual
41 * part number: 166-753-001, Revision B, May 21, 1988.
42 * "Your Partner For Performance"
43 * [2] other NetBSD disk device drivers
44 * [3] Xylogics Model 450 User's Manual
45 * part number: 166-017-001, Revision B, 1983.
46 * [4] Addendum to Xylogics Model 450 Disk Controller User's
47 * Manual, Jan. 1985.
48 * [5] The 451 Controller, Rev. B3, September 2, 1986.
49 * [6] David Jones <dej (at) achilles.net>'s unfinished 450/451 driver
50 *
51 */
52
53 #undef XYC_DEBUG /* full debug */
54 #undef XYC_DIAG /* extra sanity checks */
55 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG)
56 #define XYC_DIAG /* link in with master DIAG option */
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/file.h>
64 #include <sys/stat.h>
65 #include <sys/ioctl.h>
66 #include <sys/buf.h>
67 #include <sys/uio.h>
68 #include <sys/malloc.h>
69 #include <sys/device.h>
70 #include <sys/disklabel.h>
71 #include <sys/disk.h>
72 #include <sys/syslog.h>
73 #include <sys/dkbad.h>
74 #include <sys/conf.h>
75
76 #include <vm/vm.h>
77 #include <vm/vm_kern.h>
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #if defined(__sparc__) || defined(sun3)
83 #include <dev/sun/disklabel.h>
84 #endif
85
86 #include <dev/vme/vmereg.h>
87 #include <dev/vme/vmevar.h>
88
89 #include <dev/vme/xyreg.h>
90 #include <dev/vme/xyvar.h>
91 #include <dev/vme/xio.h>
92
93 #include "locators.h"
94
95 /*
96 * macros
97 */
98
99 /*
100 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC
101 */
102 #define XYC_GO(XYC, ADDR) { \
103 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \
104 (ADDR) = ((ADDR) >> 8); \
105 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \
106 (ADDR) = ((ADDR) >> 8); \
107 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \
108 (ADDR) = ((ADDR) >> 8); \
109 (XYC)->xyc_reloc_hi = (ADDR); \
110 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \
111 }
112
113 /*
114 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd)
115 */
116
117 #define XYC_DONE(SC,ER) { \
118 if ((ER) == XY_ERR_AOK) { \
119 (ER) = (SC)->ciorq->errno; \
120 (SC)->ciorq->mode = XY_SUB_FREE; \
121 wakeup((SC)->ciorq); \
122 } \
123 }
124
125 /*
126 * XYC_ADVANCE: advance iorq's pointers by a number of sectors
127 */
128
129 #define XYC_ADVANCE(IORQ, N) { \
130 if (N) { \
131 (IORQ)->sectcnt -= (N); \
132 (IORQ)->blockno += (N); \
133 (IORQ)->dbuf += ((N)*XYFM_BPS); \
134 } \
135 }
136
137 /*
138 * note - addresses you can sleep on:
139 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive)
140 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish)
141 */
142
143
144 /*
145 * function prototypes
146 * "xyc_*" functions are internal, all others are external interfaces
147 */
148
149 extern int pil_to_vme[]; /* from obio.c */
150
151 /* internals */
152 struct xy_iopb *xyc_chain __P((struct xyc_softc *, struct xy_iorq *));
153 int xyc_cmd __P((struct xyc_softc *, int, int, int, int, int, char *, int));
154 char *xyc_e2str __P((int));
155 int xyc_entoact __P((int));
156 int xyc_error __P((struct xyc_softc *, struct xy_iorq *,
157 struct xy_iopb *, int));
158 int xyc_ioctlcmd __P((struct xy_softc *, dev_t dev, struct xd_iocmd *));
159 void xyc_perror __P((struct xy_iorq *, struct xy_iopb *, int));
160 int xyc_piodriver __P((struct xyc_softc *, struct xy_iorq *));
161 int xyc_remove_iorq __P((struct xyc_softc *));
162 int xyc_reset __P((struct xyc_softc *, int, struct xy_iorq *, int,
163 struct xy_softc *));
164 inline void xyc_rqinit __P((struct xy_iorq *, struct xyc_softc *,
165 struct xy_softc *, int, u_long, int,
166 caddr_t, struct buf *));
167 void xyc_rqtopb __P((struct xy_iorq *, struct xy_iopb *, int, int));
168 void xyc_start __P((struct xyc_softc *, struct xy_iorq *));
169 int xyc_startbuf __P((struct xyc_softc *, struct xy_softc *, struct buf *));
170 int xyc_submit_iorq __P((struct xyc_softc *, struct xy_iorq *, int));
171 void xyc_tick __P((void *));
172 int xyc_unbusy __P((struct xyc *, int));
173 void xyc_xyreset __P((struct xyc_softc *, struct xy_softc *));
174 int xy_dmamem_alloc(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
175 int *, bus_size_t, caddr_t *, bus_addr_t *);
176 void xy_dmamem_free(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
177 int, bus_size_t, caddr_t);
178
179 /* machine interrupt hook */
180 int xycintr __P((void *));
181
182 /* autoconf */
183 int xycmatch __P((struct device *, struct cfdata *, void *));
184 void xycattach __P((struct device *, struct device *, void *));
185 int xymatch __P((struct device *, struct cfdata *, void *));
186 void xyattach __P((struct device *, struct device *, void *));
187 static int xyc_probe __P((void *, bus_space_tag_t, bus_space_handle_t));
188
189 static void xydummystrat __P((struct buf *));
190 int xygetdisklabel __P((struct xy_softc *, void *));
191
192 bdev_decl(xy);
193 cdev_decl(xy);
194
195 /*
196 * cfattach's: device driver interface to autoconfig
197 */
198
199 struct cfattach xyc_ca = {
200 sizeof(struct xyc_softc), xycmatch, xycattach
201 };
202
203 struct cfattach xy_ca = {
204 sizeof(struct xy_softc), xymatch, xyattach
205 };
206
207 extern struct cfdriver xy_cd;
208
209 struct xyc_attach_args { /* this is the "aux" args to xyattach */
210 int driveno; /* unit number */
211 int fullmode; /* submit mode */
212 int booting; /* are we booting or not? */
213 };
214
215 /*
216 * dkdriver
217 */
218
219 struct dkdriver xydkdriver = { xystrategy };
220
221 /*
222 * start: disk label fix code (XXX)
223 */
224
225 static void *xy_labeldata;
226
227 static void
228 xydummystrat(bp)
229 struct buf *bp;
230 {
231 if (bp->b_bcount != XYFM_BPS)
232 panic("xydummystrat");
233 bcopy(xy_labeldata, bp->b_data, XYFM_BPS);
234 bp->b_flags |= B_DONE;
235 bp->b_flags &= ~B_BUSY;
236 }
237
238 int
239 xygetdisklabel(xy, b)
240 struct xy_softc *xy;
241 void *b;
242 {
243 char *err;
244 #if defined(__sparc__) || defined(sun3)
245 struct sun_disklabel *sdl;
246 #endif
247
248 /* We already have the label data in `b'; setup for dummy strategy */
249 xy_labeldata = b;
250
251 /* Required parameter for readdisklabel() */
252 xy->sc_dk.dk_label->d_secsize = XYFM_BPS;
253
254 err = readdisklabel(MAKEDISKDEV(0, xy->sc_dev.dv_unit, RAW_PART),
255 xydummystrat,
256 xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel);
257 if (err) {
258 printf("%s: %s\n", xy->sc_dev.dv_xname, err);
259 return(XY_ERR_FAIL);
260 }
261
262 #if defined(__sparc__) || defined(sun3)
263 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
264 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block;
265 if (sdl->sl_magic == SUN_DKMAGIC) {
266 xy->pcyl = sdl->sl_pcylinders;
267 } else
268 #endif
269 {
270 printf("%s: WARNING: no `pcyl' in disk label.\n",
271 xy->sc_dev.dv_xname);
272 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders +
273 xy->sc_dk.dk_label->d_acylinders;
274 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
275 xy->sc_dev.dv_xname, xy->pcyl);
276 }
277
278 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders;
279 xy->acyl = xy->sc_dk.dk_label->d_acylinders;
280 xy->nhead = xy->sc_dk.dk_label->d_ntracks;
281 xy->nsect = xy->sc_dk.dk_label->d_nsectors;
282 xy->sectpercyl = xy->nhead * xy->nsect;
283 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by
284 * sun->bsd */
285 return(XY_ERR_AOK);
286 }
287
288 /*
289 * end: disk label fix code (XXX)
290 */
291
292 /*
293 * Shorthand for allocating, mapping and loading a DMA buffer
294 */
295 int
296 xy_dmamem_alloc(tag, map, seg, nsegp, len, kvap, dmap)
297 bus_dma_tag_t tag;
298 bus_dmamap_t map;
299 bus_dma_segment_t *seg;
300 int *nsegp;
301 bus_size_t len;
302 caddr_t *kvap;
303 bus_addr_t *dmap;
304 {
305 int nseg;
306 int error;
307
308 if ((error = bus_dmamem_alloc(tag, len, 0, 0,
309 seg, 1, &nseg, BUS_DMA_NOWAIT)) != 0) {
310 return (error);
311 }
312
313 if ((error = bus_dmamap_load_raw(tag, map,
314 seg, nseg, len, BUS_DMA_NOWAIT)) != 0) {
315 bus_dmamem_free(tag, seg, nseg);
316 return (error);
317 }
318
319 if ((error = bus_dmamem_map(tag, seg, nseg,
320 len, kvap,
321 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
322 bus_dmamap_unload(tag, map);
323 bus_dmamem_free(tag, seg, nseg);
324 return (error);
325 }
326
327 *dmap = map->dm_segs[0].ds_addr;
328 *nsegp = nseg;
329 return (0);
330 }
331
332 void
333 xy_dmamem_free(tag, map, seg, nseg, len, kva)
334 bus_dma_tag_t tag;
335 bus_dmamap_t map;
336 bus_dma_segment_t *seg;
337 int nseg;
338 bus_size_t len;
339 caddr_t kva;
340 {
341
342 bus_dmamap_unload(tag, map);
343 bus_dmamem_unmap(tag, kva, len);
344 bus_dmamem_free(tag, seg, nseg);
345 }
346
347
348 /*
349 * a u t o c o n f i g f u n c t i o n s
350 */
351
352 /*
353 * xycmatch: determine if xyc is present or not. we do a
354 * soft reset to detect the xyc.
355 */
356 int
357 xyc_probe(arg, tag, handle)
358 void *arg;
359 bus_space_tag_t tag;
360 bus_space_handle_t handle;
361 {
362 struct xyc *xyc = (void *)handle; /* XXX */
363
364 return ((xyc_unbusy(xyc, XYC_RESETUSEC) != XY_ERR_FAIL) ? 0 : EIO);
365 }
366
367 int xycmatch(parent, cf, aux)
368 struct device *parent;
369 struct cfdata *cf;
370 void *aux;
371 {
372 struct vme_attach_args *va = aux;
373 vme_chipset_tag_t ct = va->va_vct;
374 vme_am_t mod;
375 int error;
376
377 mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
378 if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xyc), mod))
379 return (0);
380
381 error = vme_probe(ct, va->r[0].offset, sizeof(struct xyc),
382 mod, VME_D16, xyc_probe, 0);
383 vme_space_free(va->va_vct, va->r[0].offset, sizeof(struct xyc), mod);
384
385 return (error == 0);
386 }
387
388 /*
389 * xycattach: attach controller
390 */
391 void
392 xycattach(parent, self, aux)
393 struct device *parent, *self;
394 void *aux;
395
396 {
397 struct xyc_softc *xyc = (void *) self;
398 struct vme_attach_args *va = aux;
399 vme_chipset_tag_t ct = va->va_vct;
400 bus_space_tag_t bt;
401 bus_space_handle_t bh;
402 vme_intr_handle_t ih;
403 vme_am_t mod;
404 struct xyc_attach_args xa;
405 int lcv, res, error;
406 bus_dma_segment_t seg;
407 int rseg;
408 vme_mapresc_t resc;
409
410 /* get addressing and intr level stuff from autoconfig and load it
411 * into our xyc_softc. */
412
413 mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
414
415 if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xyc), mod))
416 panic("xyc: vme alloc");
417
418 if (vme_space_map(ct, va->r[0].offset, sizeof(struct xyc),
419 mod, VME_D16, 0, &bt, &bh, &resc) != 0)
420 panic("xyc: vme_map");
421
422 xyc->xyc = (struct xyc *) bh; /* XXX */
423 xyc->ipl = va->ilevel;
424 xyc->vector = va->ivector;
425 xyc->no_ols = 0; /* XXX should be from config */
426
427 for (lcv = 0; lcv < XYC_MAXDEV; lcv++)
428 xyc->sc_drives[lcv] = (struct xy_softc *) 0;
429
430 /*
431 * allocate and zero buffers
432 * check boundaries of the KVA's ... all IOPBs must reside in
433 * the same 64K region.
434 */
435
436 /* Get DMA handle for misc. transfers */
437 if ((error = vme_dmamap_create(
438 ct, /* VME chip tag */
439 MAXPHYS, /* size */
440 VME_AM_A24, /* address modifier */
441 VME_D16, /* data size */
442 0, /* swap */
443 1, /* nsegments */
444 MAXPHYS, /* maxsegsz */
445 0, /* boundary */
446 BUS_DMA_NOWAIT,
447 &xyc->reqs[lcv].dmamap)) != 0) {
448
449 printf("%s: DMA buffer map create error %d\n",
450 xyc->sc_dev.dv_xname, error);
451 return;
452 }
453
454 /* Get DMA handle for mapping iorq descriptors */
455 if ((error = vme_dmamap_create(
456 ct, /* VME chip tag */
457 XYC_MAXIOPB * sizeof(struct xy_iopb),
458 VME_AM_A24, /* address modifier */
459 VME_D16, /* data size */
460 0, /* swap */
461 1, /* nsegments */
462 XYC_MAXIOPB * sizeof(struct xy_iopb),
463 64*1024, /* boundary */
464 BUS_DMA_NOWAIT,
465 &xyc->iopmap)) != 0) {
466
467 printf("%s: DMA buffer map create error %d\n",
468 xyc->sc_dev.dv_xname, error);
469 return;
470 }
471
472 /* Get DMA buffer for iorq descriptors */
473 if ((error = xy_dmamem_alloc(xyc->dmatag, xyc->iopmap, &seg, &rseg,
474 XYC_MAXIOPB * sizeof(struct xy_iopb),
475 (caddr_t *)&xyc->iopbase,
476 (bus_addr_t *)&xyc->dvmaiopb)) != 0) {
477 printf("%s: DMA buffer alloc error %d\n",
478 xyc->sc_dev.dv_xname, error);
479 return;
480 }
481
482 bzero(xyc->iopbase, XYC_MAXIOPB * sizeof(struct xy_iopb));
483
484 xyc->reqs = (struct xy_iorq *)
485 malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), M_DEVBUF, M_NOWAIT);
486 if (xyc->reqs == NULL)
487 panic("xyc malloc");
488 bzero(xyc->reqs, XYC_MAXIOPB * sizeof(struct xy_iorq));
489
490 /*
491 * init iorq to iopb pointers, and non-zero fields in the
492 * iopb which never change.
493 */
494
495 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
496 xyc->xy_chain[lcv] = NULL;
497 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv];
498 xyc->reqs[lcv].dmaiopb = &xyc->dvmaiopb[lcv];
499 xyc->iopbase[lcv].asr = 1; /* always the same */
500 xyc->iopbase[lcv].eef = 1; /* always the same */
501 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */
502 xyc->iopbase[lcv].aud = 1; /* always the same */
503 xyc->iopbase[lcv].relo = 1; /* always the same */
504 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */
505
506 if ((error = vme_dmamap_create(
507 ct, /* VME chip tag */
508 MAXPHYS, /* size */
509 VME_AM_A24, /* address modifier */
510 VME_D16, /* data size */
511 0, /* swap */
512 1, /* nsegments */
513 MAXPHYS, /* maxsegsz */
514 0, /* boundary */
515 BUS_DMA_NOWAIT,
516 &xyc->reqs[lcv].dmamap)) != 0) {
517
518 printf("%s: DMA buffer map create error %d\n",
519 xyc->sc_dev.dv_xname, error);
520 return;
521 }
522 }
523 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */
524 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */
525 xyc->xy_hand = 0;
526
527 /* read controller parameters and insure we have a 450/451 */
528
529 error = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL);
530 res = xyc->ciopb->ctyp;
531 XYC_DONE(xyc, error);
532 if (res != XYCT_450) {
533 if (error)
534 printf(": %s: ", xyc_e2str(error));
535 printf(": doesn't identify as a 450/451\n");
536 return;
537 }
538 printf(": Xylogics 450/451");
539 if (xyc->no_ols)
540 printf(" [OLS disabled]"); /* 450 doesn't overlap seek right */
541 printf("\n");
542 if (error) {
543 printf("%s: error: %s\n", xyc->sc_dev.dv_xname,
544 xyc_e2str(error));
545 return;
546 }
547 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) {
548 printf("%s: 24 bit addressing turned off\n",
549 xyc->sc_dev.dv_xname);
550 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n");
551 printf("to enable 24 bit mode and this driver\n");
552 return;
553 }
554
555 /* link in interrupt with higher level software */
556 vme_intr_map(ct, va->ivector, va->ilevel, &ih);
557 vme_intr_establish(ct, ih, IPL_BIO, xycintr, xyc);
558 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
559 xyc->sc_dev.dv_xname, "intr");
560
561 callout_init(&xyc->sc_tick_ch);
562
563 /* now we must look for disks using autoconfig */
564 xa.fullmode = XY_SUB_POLL;
565 xa.booting = 1;
566
567 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++)
568 (void) config_found(self, (void *) &xa, NULL);
569
570 /* start the watchdog clock */
571 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc);
572
573 }
574
575 /*
576 * xymatch: probe for disk.
577 *
578 * note: we almost always say disk is present. this allows us to
579 * spin up and configure a disk after the system is booted (we can
580 * call xyattach!).
581 */
582 int
583 xymatch(parent, cf, aux)
584 struct device *parent;
585 struct cfdata *cf;
586 void *aux;
587 {
588 struct xyc_attach_args *xa = aux;
589
590 /* looking for autoconf wildcard or exact match */
591
592 if (cf->cf_loc[XYCCF_DRIVE] != XYCCF_DRIVE_DEFAULT &&
593 cf->cf_loc[XYCCF_DRIVE] != xa->driveno)
594 return 0;
595
596 return 1;
597
598 }
599
600 /*
601 * xyattach: attach a disk. this can be called from autoconf and also
602 * from xyopen/xystrategy.
603 */
604 void
605 xyattach(parent, self, aux)
606 struct device *parent, *self;
607 void *aux;
608
609 {
610 struct xy_softc *xy = (void *) self, *oxy;
611 struct xyc_softc *xyc = (void *) parent;
612 struct xyc_attach_args *xa = aux;
613 int spt, mb, blk, lcv, fmode, s = 0, newstate;
614 struct dkbad *dkb;
615 int rseg, error;
616 bus_dma_segment_t seg;
617 caddr_t dmaddr;
618 caddr_t buf;
619
620 /*
621 * Always re-initialize the disk structure. We want statistics
622 * to start with a clean slate.
623 */
624 bzero(&xy->sc_dk, sizeof(xy->sc_dk));
625 xy->sc_dk.dk_driver = &xydkdriver;
626 xy->sc_dk.dk_name = xy->sc_dev.dv_xname;
627
628 /* if booting, init the xy_softc */
629
630 if (xa->booting) {
631 xy->state = XY_DRIVE_UNKNOWN; /* to start */
632 xy->flags = 0;
633 xy->parent = xyc;
634
635 /* init queue of waiting bufs */
636
637 BUFQ_INIT(&xy->xyq);
638
639 xy->xyrq = &xyc->reqs[xa->driveno];
640
641 }
642 xy->xy_drive = xa->driveno;
643 fmode = xa->fullmode;
644 xyc->sc_drives[xa->driveno] = xy;
645
646 /* if not booting, make sure we are the only process in the attach for
647 * this drive. if locked out, sleep on it. */
648
649 if (!xa->booting) {
650 s = splbio();
651 while (xy->state == XY_DRIVE_ATTACHING) {
652 if (tsleep(&xy->state, PRIBIO, "xyattach", 0)) {
653 splx(s);
654 return;
655 }
656 }
657 printf("%s at %s",
658 xy->sc_dev.dv_xname, xy->parent->sc_dev.dv_xname);
659 }
660
661 /* we now have control */
662 xy->state = XY_DRIVE_ATTACHING;
663 newstate = XY_DRIVE_UNKNOWN;
664
665 buf = NULL;
666 if ((error = xy_dmamem_alloc(xyc->dmatag, xyc->auxmap, &seg, &rseg,
667 XYFM_BPS,
668 (caddr_t *)&buf,
669 (bus_addr_t *)&dmaddr)) != 0) {
670 printf("%s: DMA buffer alloc error %d\n",
671 xyc->sc_dev.dv_xname, error);
672 return;
673 }
674
675 /* first try and reset the drive */
676 error = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fmode);
677 XYC_DONE(xyc, error);
678 if (error == XY_ERR_DNRY) {
679 printf(" drive %d: off-line\n", xa->driveno);
680 goto done;
681 }
682 if (error) {
683 printf(": ERROR 0x%02x (%s)\n", error, xyc_e2str(error));
684 goto done;
685 }
686 printf(" drive %d: ready", xa->driveno);
687
688 /*
689 * now set drive parameters (to semi-bogus values) so we can read the
690 * disk label.
691 */
692 xy->pcyl = xy->ncyl = 1;
693 xy->acyl = 0;
694 xy->nhead = 1;
695 xy->nsect = 1;
696 xy->sectpercyl = 1;
697 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */
698 xy->dkb.bt_bad[lcv].bt_cyl =
699 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff;
700
701 /* read disk label */
702 for (xy->drive_type = 0 ; xy->drive_type <= XYC_MAXDT ;
703 xy->drive_type++) {
704 error = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1,
705 dmaddr, fmode);
706 XYC_DONE(xyc, error);
707 if (error == XY_ERR_AOK) break;
708 }
709
710 if (error != XY_ERR_AOK) {
711 printf("\n%s: reading disk label failed: %s\n",
712 xy->sc_dev.dv_xname, xyc_e2str(error));
713 goto done;
714 }
715 printf(" (drive type %d)\n", xy->drive_type);
716
717 newstate = XY_DRIVE_NOLABEL;
718
719 xy->hw_spt = spt = 0; /* XXX needed ? */
720 /* Attach the disk: must be before getdisklabel to malloc label */
721 disk_attach(&xy->sc_dk);
722
723 if (xygetdisklabel(xy, buf) != XY_ERR_AOK)
724 goto done;
725
726 /* inform the user of what is up */
727 printf("%s: <%s>, pcyl %d\n", xy->sc_dev.dv_xname,
728 buf, xy->pcyl);
729 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS);
730 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
731 xy->sc_dev.dv_xname, mb, xy->ncyl, xy->nhead, xy->nsect,
732 XYFM_BPS);
733
734 /*
735 * 450/451 stupidity: the drive type is encoded into the format
736 * of the disk. the drive type in the IOPB must match the drive
737 * type in the format, or you will not be able to do I/O to the
738 * disk (you get header not found errors). if you have two drives
739 * of different sizes that have the same drive type in their
740 * formatting then you are out of luck.
741 *
742 * this problem was corrected in the 753/7053.
743 */
744
745 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) {
746 oxy = xyc->sc_drives[lcv];
747 if (oxy == NULL || oxy == xy) continue;
748 if (oxy->drive_type != xy->drive_type) continue;
749 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl ||
750 xy->nhead != oxy->nhead) {
751 printf("%s: %s and %s must be the same size!\n",
752 xyc->sc_dev.dv_xname, xy->sc_dev.dv_xname,
753 oxy->sc_dev.dv_xname);
754 panic("xy drive size mismatch");
755 }
756 }
757
758
759 /* now set the real drive parameters! */
760
761 blk = (xy->nsect - 1) +
762 ((xy->nhead - 1) * xy->nsect) +
763 ((xy->pcyl - 1) * xy->nsect * xy->nhead);
764 error = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fmode);
765 XYC_DONE(xyc, error);
766 if (error) {
767 printf("%s: write drive size failed: %s\n",
768 xy->sc_dev.dv_xname, xyc_e2str(error));
769 goto done;
770 }
771 newstate = XY_DRIVE_ONLINE;
772
773 /*
774 * read bad144 table. this table resides on the first sector of the
775 * last track of the disk (i.e. second cyl of "acyl" area).
776 */
777
778 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) +
779 /* last cyl */
780 (xy->nhead - 1) * xy->nsect; /* last head */
781 error = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1,
782 dmaddr, fmode);
783 XYC_DONE(xyc, error);
784 if (error) {
785 printf("%s: reading bad144 failed: %s\n",
786 xy->sc_dev.dv_xname, xyc_e2str(error));
787 goto done;
788 }
789
790 /* check dkbad for sanity */
791 dkb = (struct dkbad *) buf;
792 for (lcv = 0; lcv < 126; lcv++) {
793 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
794 dkb->bt_bad[lcv].bt_cyl == 0) &&
795 dkb->bt_bad[lcv].bt_trksec == 0xffff)
796 continue; /* blank */
797 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl)
798 break;
799 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead)
800 break;
801 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect)
802 break;
803 }
804 if (lcv != 126) {
805 printf("%s: warning: invalid bad144 sector!\n",
806 xy->sc_dev.dv_xname);
807 } else {
808 bcopy(buf, &xy->dkb, XYFM_BPS);
809 }
810
811 done:
812 if (buf != NULL) {
813 xy_dmamem_free(xyc->dmatag, xyc->auxmap,
814 &seg, rseg, XYFM_BPS, buf);
815 }
816
817 xy->state = newstate;
818 if (!xa->booting) {
819 wakeup(&xy->state);
820 splx(s);
821 }
822 }
823
824 /*
825 * end of autoconfig functions
826 */
827
828 /*
829 * { b , c } d e v s w f u n c t i o n s
830 */
831
832 /*
833 * xyclose: close device
834 */
835 int
836 xyclose(dev, flag, fmt, p)
837 dev_t dev;
838 int flag, fmt;
839 struct proc *p;
840
841 {
842 struct xy_softc *xy = xy_cd.cd_devs[DISKUNIT(dev)];
843 int part = DISKPART(dev);
844
845 /* clear mask bits */
846
847 switch (fmt) {
848 case S_IFCHR:
849 xy->sc_dk.dk_copenmask &= ~(1 << part);
850 break;
851 case S_IFBLK:
852 xy->sc_dk.dk_bopenmask &= ~(1 << part);
853 break;
854 }
855 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
856
857 return 0;
858 }
859
860 /*
861 * xydump: crash dump system
862 */
863 int
864 xydump(dev, blkno, va, size)
865 dev_t dev;
866 daddr_t blkno;
867 caddr_t va;
868 size_t size;
869 {
870 int unit, part;
871 struct xy_softc *xy;
872
873 unit = DISKUNIT(dev);
874 if (unit >= xy_cd.cd_ndevs)
875 return ENXIO;
876 part = DISKPART(dev);
877
878 xy = xy_cd.cd_devs[unit];
879
880 printf("%s%c: crash dump not supported (yet)\n", xy->sc_dev.dv_xname,
881 'a' + part);
882
883 return ENXIO;
884
885 /* outline: globals: "dumplo" == sector number of partition to start
886 * dump at (convert to physical sector with partition table)
887 * "dumpsize" == size of dump in clicks "physmem" == size of physical
888 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
889 * physmem)
890 *
891 * dump a copy of physical memory to the dump device starting at sector
892 * "dumplo" in the swap partition (make sure > 0). map in pages as
893 * we go. use polled I/O.
894 *
895 * XXX how to handle NON_CONTIG? */
896
897 }
898
899 /*
900 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks.
901 */
902 int
903 xyioctl(dev, command, addr, flag, p)
904 dev_t dev;
905 u_long command;
906 caddr_t addr;
907 int flag;
908 struct proc *p;
909
910 {
911 struct xy_softc *xy;
912 struct xd_iocmd *xio;
913 int error, s, unit;
914
915 unit = DISKUNIT(dev);
916
917 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL)
918 return (ENXIO);
919
920 /* switch on ioctl type */
921
922 switch (command) {
923 case DIOCSBAD: /* set bad144 info */
924 if ((flag & FWRITE) == 0)
925 return EBADF;
926 s = splbio();
927 bcopy(addr, &xy->dkb, sizeof(xy->dkb));
928 splx(s);
929 return 0;
930
931 case DIOCGDINFO: /* get disk label */
932 bcopy(xy->sc_dk.dk_label, addr, sizeof(struct disklabel));
933 return 0;
934
935 case DIOCGPART: /* get partition info */
936 ((struct partinfo *) addr)->disklab = xy->sc_dk.dk_label;
937 ((struct partinfo *) addr)->part =
938 &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)];
939 return 0;
940
941 case DIOCSDINFO: /* set disk label */
942 if ((flag & FWRITE) == 0)
943 return EBADF;
944 error = setdisklabel(xy->sc_dk.dk_label,
945 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0,
946 xy->sc_dk.dk_cpulabel);
947 if (error == 0) {
948 if (xy->state == XY_DRIVE_NOLABEL)
949 xy->state = XY_DRIVE_ONLINE;
950 }
951 return error;
952
953 case DIOCWLABEL: /* change write status of disk label */
954 if ((flag & FWRITE) == 0)
955 return EBADF;
956 if (*(int *) addr)
957 xy->flags |= XY_WLABEL;
958 else
959 xy->flags &= ~XY_WLABEL;
960 return 0;
961
962 case DIOCWDINFO: /* write disk label */
963 if ((flag & FWRITE) == 0)
964 return EBADF;
965 error = setdisklabel(xy->sc_dk.dk_label,
966 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0,
967 xy->sc_dk.dk_cpulabel);
968 if (error == 0) {
969 if (xy->state == XY_DRIVE_NOLABEL)
970 xy->state = XY_DRIVE_ONLINE;
971
972 /* Simulate opening partition 0 so write succeeds. */
973 xy->sc_dk.dk_openmask |= (1 << 0);
974 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
975 xystrategy, xy->sc_dk.dk_label,
976 xy->sc_dk.dk_cpulabel);
977 xy->sc_dk.dk_openmask =
978 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
979 }
980 return error;
981
982 case DIOSXDCMD:
983 xio = (struct xd_iocmd *) addr;
984 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
985 return (error);
986 return (xyc_ioctlcmd(xy, dev, xio));
987
988 default:
989 return ENOTTY;
990 }
991 }
992
993 /*
994 * xyopen: open drive
995 */
996
997 int
998 xyopen(dev, flag, fmt, p)
999 dev_t dev;
1000 int flag, fmt;
1001 struct proc *p;
1002 {
1003 int unit, part;
1004 struct xy_softc *xy;
1005 struct xyc_attach_args xa;
1006
1007 /* first, could it be a valid target? */
1008
1009 unit = DISKUNIT(dev);
1010 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL)
1011 return (ENXIO);
1012 part = DISKPART(dev);
1013
1014 /* do we need to attach the drive? */
1015
1016 if (xy->state == XY_DRIVE_UNKNOWN) {
1017 xa.driveno = xy->xy_drive;
1018 xa.fullmode = XY_SUB_WAIT;
1019 xa.booting = 0;
1020 xyattach((struct device *) xy->parent,
1021 (struct device *) xy, &xa);
1022 if (xy->state == XY_DRIVE_UNKNOWN) {
1023 return (EIO);
1024 }
1025 }
1026 /* check for partition */
1027
1028 if (part != RAW_PART &&
1029 (part >= xy->sc_dk.dk_label->d_npartitions ||
1030 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
1031 return (ENXIO);
1032 }
1033 /* set open masks */
1034
1035 switch (fmt) {
1036 case S_IFCHR:
1037 xy->sc_dk.dk_copenmask |= (1 << part);
1038 break;
1039 case S_IFBLK:
1040 xy->sc_dk.dk_bopenmask |= (1 << part);
1041 break;
1042 }
1043 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
1044
1045 return 0;
1046 }
1047
1048 int
1049 xyread(dev, uio, flags)
1050 dev_t dev;
1051 struct uio *uio;
1052 int flags;
1053 {
1054
1055 return (physio(xystrategy, NULL, dev, B_READ, minphys, uio));
1056 }
1057
1058 int
1059 xywrite(dev, uio, flags)
1060 dev_t dev;
1061 struct uio *uio;
1062 int flags;
1063 {
1064
1065 return (physio(xystrategy, NULL, dev, B_WRITE, minphys, uio));
1066 }
1067
1068
1069 /*
1070 * xysize: return size of a partition for a dump
1071 */
1072
1073 int
1074 xysize(dev)
1075 dev_t dev;
1076
1077 {
1078 struct xy_softc *xysc;
1079 int unit, part, size, omask;
1080
1081 /* valid unit? */
1082 unit = DISKUNIT(dev);
1083 if (unit >= xy_cd.cd_ndevs || (xysc = xy_cd.cd_devs[unit]) == NULL)
1084 return (-1);
1085
1086 part = DISKPART(dev);
1087 omask = xysc->sc_dk.dk_openmask & (1 << part);
1088
1089 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0)
1090 return (-1);
1091
1092 /* do it */
1093 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1094 size = -1; /* only give valid size for swap partitions */
1095 else
1096 size = xysc->sc_dk.dk_label->d_partitions[part].p_size *
1097 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1098 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0)
1099 return (-1);
1100 return (size);
1101 }
1102
1103 /*
1104 * xystrategy: buffering system interface to xy.
1105 */
1106
1107 void
1108 xystrategy(bp)
1109 struct buf *bp;
1110
1111 {
1112 struct xy_softc *xy;
1113 int s, unit;
1114 struct xyc_attach_args xa;
1115 struct disklabel *lp;
1116 daddr_t blkno;
1117
1118 unit = DISKUNIT(bp->b_dev);
1119
1120 /* check for live device */
1121
1122 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == 0 ||
1123 bp->b_blkno < 0 ||
1124 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) {
1125 bp->b_error = EINVAL;
1126 goto bad;
1127 }
1128 /* do we need to attach the drive? */
1129
1130 if (xy->state == XY_DRIVE_UNKNOWN) {
1131 xa.driveno = xy->xy_drive;
1132 xa.fullmode = XY_SUB_WAIT;
1133 xa.booting = 0;
1134 xyattach((struct device *)xy->parent, (struct device *)xy, &xa);
1135 if (xy->state == XY_DRIVE_UNKNOWN) {
1136 bp->b_error = EIO;
1137 goto bad;
1138 }
1139 }
1140 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1141 /* no I/O to unlabeled disks, unless raw partition */
1142 bp->b_error = EIO;
1143 goto bad;
1144 }
1145 /* short circuit zero length request */
1146
1147 if (bp->b_bcount == 0)
1148 goto done;
1149
1150 /* check bounds with label (disksubr.c). Determine the size of the
1151 * transfer, and make sure it is within the boundaries of the
1152 * partition. Adjust transfer if needed, and signal errors or early
1153 * completion. */
1154
1155 lp = xy->sc_dk.dk_label;
1156
1157 if (bounds_check_with_label(bp, lp,
1158 (xy->flags & XY_WLABEL) != 0) <= 0)
1159 goto done;
1160
1161 /*
1162 * Now convert the block number to absolute and put it in
1163 * terms of the device's logical block size.
1164 */
1165 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
1166 if (DISKPART(bp->b_dev) != RAW_PART)
1167 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
1168
1169 bp->b_rawblkno = blkno;
1170
1171 /*
1172 * now we know we have a valid buf structure that we need to do I/O
1173 * on.
1174 */
1175 s = splbio(); /* protect the queues */
1176
1177 disksort_blkno(&xy->xyq, bp);
1178
1179 /* start 'em up */
1180
1181 xyc_start(xy->parent, NULL);
1182
1183 /* done! */
1184
1185 splx(s);
1186 return;
1187
1188 bad: /* tells upper layers we have an error */
1189 bp->b_flags |= B_ERROR;
1190 done: /* tells upper layers we are done with this
1191 * buf */
1192 bp->b_resid = bp->b_bcount;
1193 biodone(bp);
1194 }
1195 /*
1196 * end of {b,c}devsw functions
1197 */
1198
1199 /*
1200 * i n t e r r u p t f u n c t i o n
1201 *
1202 * xycintr: hardware interrupt.
1203 */
1204 int
1205 xycintr(v)
1206 void *v;
1207
1208 {
1209 struct xyc_softc *xycsc = v;
1210
1211 /* kick the event counter */
1212
1213 xycsc->sc_intrcnt.ev_count++;
1214
1215 /* remove as many done IOPBs as possible */
1216
1217 xyc_remove_iorq(xycsc);
1218
1219 /* start any iorq's already waiting */
1220
1221 xyc_start(xycsc, NULL);
1222
1223 return (1);
1224 }
1225 /*
1226 * end of interrupt function
1227 */
1228
1229 /*
1230 * i n t e r n a l f u n c t i o n s
1231 */
1232
1233 /*
1234 * xyc_rqinit: fill out the fields of an I/O request
1235 */
1236
1237 inline void
1238 xyc_rqinit(rq, xyc, xy, md, blk, cnt, db, bp)
1239 struct xy_iorq *rq;
1240 struct xyc_softc *xyc;
1241 struct xy_softc *xy;
1242 int md;
1243 u_long blk;
1244 int cnt;
1245 caddr_t db;
1246 struct buf *bp;
1247 {
1248 rq->xyc = xyc;
1249 rq->xy = xy;
1250 rq->ttl = XYC_MAXTTL + 10;
1251 rq->mode = md;
1252 rq->tries = rq->errno = rq->lasterror = 0;
1253 rq->blockno = blk;
1254 rq->sectcnt = cnt;
1255 rq->dbuf = db;
1256 rq->buf = bp;
1257 }
1258
1259 /*
1260 * xyc_rqtopb: load up an IOPB based on an iorq
1261 */
1262
1263 void
1264 xyc_rqtopb(iorq, iopb, cmd, subfun)
1265 struct xy_iorq *iorq;
1266 struct xy_iopb *iopb;
1267 int cmd, subfun;
1268
1269 {
1270 u_long block, dp;
1271
1272 /* normal IOPB case, standard stuff */
1273
1274 /* chain bit handled later */
1275 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1;
1276 iopb->com = cmd;
1277 iopb->errno = 0;
1278 iopb->errs = 0;
1279 iopb->done = 0;
1280 if (iorq->xy) {
1281 iopb->unit = iorq->xy->xy_drive;
1282 iopb->dt = iorq->xy->drive_type;
1283 } else {
1284 iopb->unit = 0;
1285 iopb->dt = 0;
1286 }
1287 block = iorq->blockno;
1288 if (iorq->xy == NULL || block == 0) {
1289 iopb->sect = iopb->head = iopb->cyl = 0;
1290 } else {
1291 iopb->sect = block % iorq->xy->nsect;
1292 block = block / iorq->xy->nsect;
1293 iopb->head = block % iorq->xy->nhead;
1294 block = block / iorq->xy->nhead;
1295 iopb->cyl = block;
1296 }
1297 iopb->scnt = iorq->sectcnt;
1298 dp = (u_long) iorq->dbuf;
1299 if (iorq->dbuf == NULL) {
1300 iopb->dataa = 0;
1301 iopb->datar = 0;
1302 } else {
1303 iopb->dataa = (dp & 0xffff);
1304 iopb->datar = ((dp & 0xff0000) >> 16);
1305 }
1306 iopb->subfn = subfun;
1307 }
1308
1309
1310 /*
1311 * xyc_unbusy: wait for the xyc to go unbusy, or timeout.
1312 */
1313
1314 int
1315 xyc_unbusy(xyc, del)
1316
1317 struct xyc *xyc;
1318 int del;
1319
1320 {
1321 while (del-- > 0) {
1322 if ((xyc->xyc_csr & XYC_GBSY) == 0)
1323 break;
1324 DELAY(1);
1325 }
1326 return(del == 0 ? XY_ERR_FAIL : XY_ERR_AOK);
1327 }
1328
1329 /*
1330 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error.
1331 * note that NORM requests are handled seperately.
1332 */
1333 int
1334 xyc_cmd(xycsc, cmd, subfn, unit, block, scnt, dptr, fullmode)
1335 struct xyc_softc *xycsc;
1336 int cmd, subfn, unit, block, scnt;
1337 char *dptr;
1338 int fullmode;
1339
1340 {
1341 int submode = XY_STATE(fullmode);
1342 struct xy_iorq *iorq = xycsc->ciorq;
1343 struct xy_iopb *iopb = xycsc->ciopb;
1344
1345 /*
1346 * is someone else using the control iopq wait for it if we can
1347 */
1348 start:
1349 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) {
1350 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0))
1351 return(XY_ERR_FAIL);
1352 goto start;
1353 }
1354
1355 if (XY_STATE(iorq->mode) != XY_SUB_FREE) {
1356 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */
1357 iorq->mode = XY_SUB_FREE;
1358 printf("%s: stole control iopb\n", xycsc->sc_dev.dv_xname);
1359 }
1360
1361 /* init iorq/iopb */
1362
1363 xyc_rqinit(iorq, xycsc,
1364 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit],
1365 fullmode, block, scnt, dptr, NULL);
1366
1367 /* load IOPB from iorq */
1368
1369 xyc_rqtopb(iorq, iopb, cmd, subfn);
1370
1371 /* submit it for processing */
1372
1373 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */
1374
1375 return(XY_ERR_AOK);
1376 }
1377
1378 /*
1379 * xyc_startbuf
1380 * start a buffer for running
1381 */
1382
1383 int
1384 xyc_startbuf(xycsc, xysc, bp)
1385 struct xyc_softc *xycsc;
1386 struct xy_softc *xysc;
1387 struct buf *bp;
1388
1389 {
1390 int partno, error;
1391 struct xy_iorq *iorq;
1392 struct xy_iopb *iopb;
1393 u_long block;
1394
1395 iorq = xysc->xyrq;
1396 iopb = iorq->iopb;
1397
1398 /* get buf */
1399
1400 if (bp == NULL)
1401 panic("xyc_startbuf null buf");
1402
1403 partno = DISKPART(bp->b_dev);
1404 #ifdef XYC_DEBUG
1405 printf("xyc_startbuf: %s%c: %s block %d\n", xysc->sc_dev.dv_xname,
1406 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1407 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n",
1408 bp->b_bcount, bp->b_data);
1409 #endif
1410
1411 /*
1412 * load request.
1413 *
1414 * note that iorq points to the buffer as mapped into DVMA space,
1415 * where as the bp->b_data points to its non-DVMA mapping.
1416 */
1417
1418 block = bp->b_rawblkno;
1419
1420 error = bus_dmamap_load(xycsc->dmatag, iorq->dmamap,
1421 bp->b_data, bp->b_bcount, 0, BUS_DMA_NOWAIT);
1422 if (error != 0) {
1423 printf("%s: warning: cannot load DMA map\n",
1424 xycsc->sc_dev.dv_xname);
1425 return (XY_ERR_FAIL); /* XXX: need some sort of
1426 * call-back scheme here? */
1427 }
1428
1429 bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
1430 iorq->dmamap->dm_mapsize, (bp->b_flags & B_READ)
1431 ? BUS_DMASYNC_PREREAD
1432 : BUS_DMASYNC_PREWRITE);
1433
1434 /* init iorq and load iopb from it */
1435 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block,
1436 bp->b_bcount / XYFM_BPS,
1437 (caddr_t)iorq->dmamap->dm_segs[0].ds_addr,
1438 bp);
1439
1440 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0);
1441
1442 /* Instrumentation. */
1443 disk_busy(&xysc->sc_dk);
1444
1445 return (XY_ERR_AOK);
1446 }
1447
1448
1449 /*
1450 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK
1451 * if ok. if it fail returns an error code. type is XY_SUB_*.
1452 *
1453 * note: caller frees iorq in all cases except NORM
1454 *
1455 * return value:
1456 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request)
1457 * WAIT: XY_AOK (success), <error-code> (failed)
1458 * POLL: <same as WAIT>
1459 * NOQ : <same as NORM>
1460 *
1461 * there are three sources for i/o requests:
1462 * [1] xystrategy: normal block I/O, using "struct buf" system.
1463 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1464 * [3] open/ioctl: these are I/O requests done in the context of a process,
1465 * and the process should block until they are done.
1466 *
1467 * software state is stored in the iorq structure. each iorq has an
1468 * iopb structure. the hardware understands the iopb structure.
1469 * every command must go through an iopb. a 450 handles one iopb at a
1470 * time, where as a 451 can take them in chains. [the 450 claims it
1471 * can handle chains, but is appears to be buggy...] iopb are allocated
1472 * in DVMA space at boot up time. each disk gets one iopb, and the
1473 * controller gets one (for POLL and WAIT commands). what happens if
1474 * the iopb is busy? for i/o type [1], the buffers are queued at the
1475 * "buff" layer and * picked up later by the interrupt routine. for case
1476 * [2] we can only be blocked if there is a WAIT type I/O request being
1477 * run. since this can only happen when we are crashing, we wait a sec
1478 * and then steal the IOPB. for case [3] the process can sleep
1479 * on the iorq free list until some iopbs are avaliable.
1480 */
1481
1482
1483 int
1484 xyc_submit_iorq(xycsc, iorq, type)
1485 struct xyc_softc *xycsc;
1486 struct xy_iorq *iorq;
1487 int type;
1488
1489 {
1490 struct xy_iopb *dmaiopb;
1491
1492 #ifdef XYC_DEBUG
1493 printf("xyc_submit_iorq(%s, addr=0x%x, type=%d)\n",
1494 xycsc->sc_dev.dv_xname, iorq, type);
1495 #endif
1496
1497 /* first check and see if controller is busy */
1498 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) {
1499 #ifdef XYC_DEBUG
1500 printf("xyc_submit_iorq: XYC not ready (BUSY)\n");
1501 #endif
1502 if (type == XY_SUB_NOQ)
1503 return (XY_ERR_FAIL); /* failed */
1504 switch (type) {
1505 case XY_SUB_NORM:
1506 return XY_ERR_AOK; /* success */
1507 case XY_SUB_WAIT:
1508 while (iorq->iopb->done == 0) {
1509 (void) tsleep(iorq, PRIBIO, "xyciorq", 0);
1510 }
1511 return (iorq->errno);
1512 case XY_SUB_POLL: /* steal controller */
1513 (void)xycsc->xyc->xyc_rsetup; /* RESET */
1514 if (xyc_unbusy(xycsc->xyc,XYC_RESETUSEC) == XY_ERR_FAIL)
1515 panic("xyc_submit_iorq: stuck xyc");
1516 printf("%s: stole controller\n",
1517 xycsc->sc_dev.dv_xname);
1518 break;
1519 default:
1520 panic("xyc_submit_iorq adding");
1521 }
1522 }
1523
1524 dmaiopb = xyc_chain(xycsc, iorq); /* build chain */
1525 if (dmaiopb == NULL) { /* nothing doing? */
1526 if (type == XY_SUB_NORM || type == XY_SUB_NOQ)
1527 return(XY_ERR_AOK);
1528 panic("xyc_submit_iorq: xyc_chain failed!\n");
1529 }
1530
1531 XYC_GO(xycsc->xyc, (u_long)dmaiopb);
1532
1533 /* command now running, wrap it up */
1534 switch (type) {
1535 case XY_SUB_NORM:
1536 case XY_SUB_NOQ:
1537 return (XY_ERR_AOK); /* success */
1538 case XY_SUB_WAIT:
1539 while (iorq->iopb->done == 0) {
1540 (void) tsleep(iorq, PRIBIO, "xyciorq", 0);
1541 }
1542 return (iorq->errno);
1543 case XY_SUB_POLL:
1544 return (xyc_piodriver(xycsc, iorq));
1545 default:
1546 panic("xyc_submit_iorq wrap up");
1547 }
1548 panic("xyc_submit_iorq");
1549 return 0; /* not reached */
1550 }
1551
1552
1553 /*
1554 * xyc_chain: build a chain. return dvma address of first element in
1555 * the chain. iorq != NULL: means we only want that item on the chain.
1556 */
1557
1558 struct xy_iopb *
1559 xyc_chain(xycsc, iorq)
1560 struct xyc_softc *xycsc;
1561 struct xy_iorq *iorq;
1562
1563 {
1564 int togo, chain, hand;
1565
1566 bzero(xycsc->xy_chain, sizeof(xycsc->xy_chain));
1567
1568 /*
1569 * promote control IOPB to the top
1570 */
1571 if (iorq == NULL) {
1572 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL ||
1573 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) &&
1574 xycsc->iopbase[XYC_CTLIOPB].done == 0)
1575 iorq = &xycsc->reqs[XYC_CTLIOPB];
1576 }
1577
1578 /*
1579 * special case: if iorq != NULL then we have a POLL or WAIT request.
1580 * we let these take priority and do them first.
1581 */
1582 if (iorq) {
1583 xycsc->xy_chain[0] = iorq;
1584 iorq->iopb->chen = 0;
1585 return(iorq->dmaiopb);
1586 }
1587
1588 /*
1589 * NORM case: do round robin and maybe chain (if allowed and possible)
1590 */
1591 chain = 0;
1592 hand = xycsc->xy_hand;
1593 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB;
1594
1595 for (togo = XYC_MAXIOPB; togo > 0;
1596 togo--, hand = (hand + 1) % XYC_MAXIOPB) {
1597 struct xy_iopb *iopb, *prev_iopb, *dmaiopb;
1598
1599 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM ||
1600 xycsc->iopbase[hand].done)
1601 continue; /* not ready-for-i/o */
1602
1603 xycsc->xy_chain[chain] = &xycsc->reqs[hand];
1604 iopb = xycsc->xy_chain[chain]->iopb;
1605 iopb->chen = 0;
1606 if (chain != 0) {
1607 /* adding a link to a chain */
1608 prev_iopb = xycsc->xy_chain[chain-1]->iopb;
1609 prev_iopb->chen = 1;
1610 dmaiopb = xycsc->xy_chain[chain]->dmaiopb;
1611 prev_iopb->nxtiopb = ((u_long)dmaiopb) & 0xffff;
1612 } else {
1613 /* head of chain */
1614 iorq = xycsc->xy_chain[chain];
1615 }
1616 chain++;
1617
1618 /* quit if chaining dis-allowed */
1619 if (xycsc->no_ols)
1620 break;
1621 }
1622
1623 return(iorq ? iorq->dmaiopb : NULL);
1624 }
1625
1626 /*
1627 * xyc_piodriver
1628 *
1629 * programmed i/o driver. this function takes over the computer
1630 * and drains off the polled i/o request. it returns the status of the iorq
1631 * the caller is interesting in.
1632 */
1633 int
1634 xyc_piodriver(xycsc, iorq)
1635 struct xyc_softc *xycsc;
1636 struct xy_iorq *iorq;
1637
1638 {
1639 int nreset = 0;
1640 int retval = 0;
1641 u_long res;
1642 #ifdef XYC_DEBUG
1643 printf("xyc_piodriver(%s, 0x%x)\n", xycsc->sc_dev.dv_xname, iorq);
1644 #endif
1645
1646 while (iorq->iopb->done == 0) {
1647
1648 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME);
1649
1650 /* we expect some progress soon */
1651 if (res == XY_ERR_FAIL && nreset >= 2) {
1652 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0);
1653 #ifdef XYC_DEBUG
1654 printf("xyc_piodriver: timeout\n");
1655 #endif
1656 return (XY_ERR_FAIL);
1657 }
1658 if (res == XY_ERR_FAIL) {
1659 if (xyc_reset(xycsc, 0,
1660 (nreset++ == 0) ? XY_RSET_NONE : iorq,
1661 XY_ERR_FAIL,
1662 0) == XY_ERR_FAIL)
1663 return (XY_ERR_FAIL); /* flushes all but POLL
1664 * requests, resets */
1665 continue;
1666 }
1667
1668 xyc_remove_iorq(xycsc); /* may resubmit request */
1669
1670 if (iorq->iopb->done == 0)
1671 xyc_start(xycsc, iorq);
1672 }
1673
1674 /* get return value */
1675
1676 retval = iorq->errno;
1677
1678 #ifdef XYC_DEBUG
1679 printf("xyc_piodriver: done, retval = 0x%x (%s)\n",
1680 iorq->errno, xyc_e2str(iorq->errno));
1681 #endif
1682
1683 /* start up any bufs that have queued */
1684
1685 xyc_start(xycsc, NULL);
1686
1687 return (retval);
1688 }
1689
1690 /*
1691 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset.
1692 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done.
1693 */
1694 void
1695 xyc_xyreset(xycsc, xysc)
1696 struct xyc_softc *xycsc;
1697 struct xy_softc *xysc;
1698
1699 {
1700 struct xy_iopb tmpiopb;
1701 struct xy_iopb *iopb;
1702 int del;
1703
1704 iopb = xycsc->ciopb;
1705
1706 /* Save contents */
1707 bcopy(iopb, &tmpiopb, sizeof(struct xy_iopb));
1708
1709 iopb->chen = iopb->done = iopb->errs = 0;
1710 iopb->ien = 0;
1711 iopb->com = XYCMD_RST;
1712 iopb->unit = xysc->xy_drive;
1713
1714 XYC_GO(xycsc->xyc, (u_long)xycsc->ciorq->dmaiopb);
1715
1716 del = XYC_RESETUSEC;
1717 while (del > 0) {
1718 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0)
1719 break;
1720 DELAY(1);
1721 del--;
1722 }
1723
1724 if (del <= 0 || iopb->errs) {
1725 printf("%s: off-line: %s\n", xycsc->sc_dev.dv_xname,
1726 xyc_e2str(iopb->errno));
1727 del = xycsc->xyc->xyc_rsetup;
1728 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL)
1729 panic("xyc_reset");
1730 } else {
1731 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */
1732 }
1733
1734 /* Restore contents */
1735 bcopy(&tmpiopb, iopb, sizeof(struct xy_iopb));
1736 }
1737
1738
1739 /*
1740 * xyc_reset: reset everything: requests are marked as errors except
1741 * a polled request (which is resubmitted)
1742 */
1743 int
1744 xyc_reset(xycsc, quiet, blastmode, error, xysc)
1745 struct xyc_softc *xycsc;
1746 int quiet, error;
1747 struct xy_iorq *blastmode;
1748 struct xy_softc *xysc;
1749
1750 {
1751 int del = 0, lcv, retval = XY_ERR_AOK;
1752
1753 /* soft reset hardware */
1754
1755 if (!quiet)
1756 printf("%s: soft reset\n", xycsc->sc_dev.dv_xname);
1757 del = xycsc->xyc->xyc_rsetup;
1758 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC);
1759 if (del == XY_ERR_FAIL) {
1760 blastmode = XY_RSET_ALL; /* dead, flush all requests */
1761 retval = XY_ERR_FAIL;
1762 }
1763 if (xysc)
1764 xyc_xyreset(xycsc, xysc);
1765
1766 /* fix queues based on "blast-mode" */
1767
1768 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
1769 register struct xy_iorq *iorq = &xycsc->reqs[lcv];
1770
1771 if (XY_STATE(iorq->mode) != XY_SUB_POLL &&
1772 XY_STATE(iorq->mode) != XY_SUB_WAIT &&
1773 XY_STATE(iorq->mode) != XY_SUB_NORM)
1774 /* is it active? */
1775 continue;
1776
1777 if (blastmode == XY_RSET_ALL ||
1778 blastmode != iorq) {
1779 /* failed */
1780 iorq->errno = error;
1781 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1;
1782 switch (XY_STATE(iorq->mode)) {
1783 case XY_SUB_NORM:
1784 iorq->buf->b_error = EIO;
1785 iorq->buf->b_flags |= B_ERROR;
1786 iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS;
1787
1788 bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
1789 iorq->dmamap->dm_mapsize,
1790 (iorq->buf->b_flags & B_READ)
1791 ? BUS_DMASYNC_POSTREAD
1792 : BUS_DMASYNC_POSTWRITE);
1793
1794 bus_dmamap_unload(xycsc->dmatag, iorq->dmamap);
1795
1796 BUFQ_REMOVE(&iorq->xy->xyq, iorq->buf);
1797 disk_unbusy(&xycsc->reqs[lcv].xy->sc_dk,
1798 (xycsc->reqs[lcv].buf->b_bcount -
1799 xycsc->reqs[lcv].buf->b_resid));
1800 biodone(iorq->buf);
1801 iorq->mode = XY_SUB_FREE;
1802 break;
1803 case XY_SUB_WAIT:
1804 wakeup(iorq);
1805 case XY_SUB_POLL:
1806 iorq->mode =
1807 XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1808 break;
1809 }
1810
1811 } else {
1812
1813 /* resubmit, no need to do anything here */
1814 }
1815 }
1816
1817 /*
1818 * now, if stuff is waiting, start it.
1819 * since we just reset it should go
1820 */
1821 xyc_start(xycsc, NULL);
1822
1823 return (retval);
1824 }
1825
1826 /*
1827 * xyc_start: start waiting buffers
1828 */
1829
1830 void
1831 xyc_start(xycsc, iorq)
1832 struct xyc_softc *xycsc;
1833 struct xy_iorq *iorq;
1834
1835 {
1836 int lcv;
1837 struct xy_softc *xy;
1838
1839 if (iorq == NULL) {
1840 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) {
1841 if ((xy = xycsc->sc_drives[lcv]) == NULL) continue;
1842 if (BUFQ_FIRST(&xy->xyq) == NULL) continue;
1843 if (xy->xyrq->mode != XY_SUB_FREE) continue;
1844 xyc_startbuf(xycsc, xy, BUFQ_FIRST(&xy->xyq));
1845 }
1846 }
1847 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ);
1848 }
1849
1850 /*
1851 * xyc_remove_iorq: remove "done" IOPB's.
1852 */
1853
1854 int
1855 xyc_remove_iorq(xycsc)
1856 struct xyc_softc *xycsc;
1857
1858 {
1859 int errno, rq, comm, errs;
1860 struct xyc *xyc = xycsc->xyc;
1861 u_long addr;
1862 struct xy_iopb *iopb;
1863 struct xy_iorq *iorq;
1864 struct buf *bp;
1865
1866 if (xyc->xyc_csr & XYC_DERR) {
1867 /*
1868 * DOUBLE ERROR: should never happen under normal use. This
1869 * error is so bad, you can't even tell which IOPB is bad, so
1870 * we dump them all.
1871 */
1872 errno = XY_ERR_DERR;
1873 printf("%s: DOUBLE ERROR!\n", xycsc->sc_dev.dv_xname);
1874 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) {
1875 printf("%s: soft reset failed!\n",
1876 xycsc->sc_dev.dv_xname);
1877 panic("xyc_remove_iorq: controller DEAD");
1878 }
1879 return (XY_ERR_AOK);
1880 }
1881
1882 /*
1883 * get iopb that is done, loop down the chain
1884 */
1885
1886 if (xyc->xyc_csr & XYC_ERR) {
1887 xyc->xyc_csr = XYC_ERR; /* clear error condition */
1888 }
1889 if (xyc->xyc_csr & XYC_IPND) {
1890 xyc->xyc_csr = XYC_IPND; /* clear interrupt */
1891 }
1892
1893 for (rq = 0; rq < XYC_MAXIOPB; rq++) {
1894 iorq = xycsc->xy_chain[rq];
1895 if (iorq == NULL) break; /* done ! */
1896 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE)
1897 continue; /* free, or done */
1898 iopb = iorq->iopb;
1899 if (iopb->done == 0)
1900 continue; /* not done yet */
1901
1902 comm = iopb->com;
1903 errs = iopb->errs;
1904
1905 if (errs)
1906 iorq->errno = iopb->errno;
1907 else
1908 iorq->errno = 0;
1909
1910 /* handle non-fatal errors */
1911
1912 if (errs &&
1913 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK)
1914 continue; /* AOK: we resubmitted it */
1915
1916
1917 /* this iorq is now done (hasn't been restarted or anything) */
1918
1919 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
1920 xyc_perror(iorq, iopb, 0);
1921
1922 /* now, if read/write check to make sure we got all the data
1923 * we needed. (this may not be the case if we got an error in
1924 * the middle of a multisector request). */
1925
1926 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 &&
1927 (comm == XYCMD_RD || comm == XYCMD_WR)) {
1928 /* we just successfully processed a bad144 sector
1929 * note: if we are in bad 144 mode, the pointers have
1930 * been advanced already (see above) and are pointing
1931 * at the bad144 sector. to exit bad144 mode, we
1932 * must advance the pointers 1 sector and issue a new
1933 * request if there are still sectors left to process
1934 *
1935 */
1936 XYC_ADVANCE(iorq, 1); /* advance 1 sector */
1937
1938 /* exit b144 mode */
1939 iorq->mode = iorq->mode & (~XY_MODE_B144);
1940
1941 if (iorq->sectcnt) { /* more to go! */
1942 iorq->lasterror = iorq->errno = iopb->errno = 0;
1943 iopb->errs = iopb->done = 0;
1944 iorq->tries = 0;
1945 iopb->scnt = iorq->sectcnt;
1946 iopb->cyl = iorq->blockno /
1947 iorq->xy->sectpercyl;
1948 iopb->head =
1949 (iorq->blockno / iorq->xy->nhead) %
1950 iorq->xy->nhead;
1951 iopb->sect = iorq->blockno % XYFM_BPS;
1952 addr = (u_long) iorq->dbuf;
1953 iopb->dataa = (addr & 0xffff);
1954 iopb->datar = ((addr & 0xff0000) >> 16);
1955 /* will resubit at end */
1956 continue;
1957 }
1958 }
1959 /* final cleanup, totally done with this request */
1960
1961 switch (XY_STATE(iorq->mode)) {
1962 case XY_SUB_NORM:
1963 bp = iorq->buf;
1964 if (errs) {
1965 bp->b_error = EIO;
1966 bp->b_flags |= B_ERROR;
1967 bp->b_resid = iorq->sectcnt * XYFM_BPS;
1968 } else {
1969 bp->b_resid = 0; /* done */
1970 }
1971 bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
1972 iorq->dmamap->dm_mapsize,
1973 (iorq->buf->b_flags & B_READ)
1974 ? BUS_DMASYNC_POSTREAD
1975 : BUS_DMASYNC_POSTWRITE);
1976
1977 bus_dmamap_unload(xycsc->dmatag, iorq->dmamap);
1978
1979 BUFQ_REMOVE(&iorq->xy->xyq, bp);
1980 disk_unbusy(&iorq->xy->sc_dk,
1981 (bp->b_bcount - bp->b_resid));
1982 iorq->mode = XY_SUB_FREE;
1983 biodone(bp);
1984 break;
1985 case XY_SUB_WAIT:
1986 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1987 wakeup(iorq);
1988 break;
1989 case XY_SUB_POLL:
1990 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1991 break;
1992 }
1993 }
1994
1995 return (XY_ERR_AOK);
1996 }
1997
1998 /*
1999 * xyc_perror: print error.
2000 * - if still_trying is true: we got an error, retried and got a
2001 * different error. in that case lasterror is the old error,
2002 * and errno is the new one.
2003 * - if still_trying is not true, then if we ever had an error it
2004 * is in lasterror. also, if iorq->errno == 0, then we recovered
2005 * from that error (otherwise iorq->errno == iorq->lasterror).
2006 */
2007 void
2008 xyc_perror(iorq, iopb, still_trying)
2009 struct xy_iorq *iorq;
2010 struct xy_iopb *iopb;
2011 int still_trying;
2012
2013 {
2014
2015 int error = iorq->lasterror;
2016
2017 printf("%s", (iorq->xy) ? iorq->xy->sc_dev.dv_xname
2018 : iorq->xyc->sc_dev.dv_xname);
2019 if (iorq->buf)
2020 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev));
2021 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR)
2022 printf("%s %d/%d/%d: ",
2023 (iopb->com == XYCMD_RD) ? "read" : "write",
2024 iopb->cyl, iopb->head, iopb->sect);
2025 printf("%s", xyc_e2str(error));
2026
2027 if (still_trying)
2028 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno));
2029 else
2030 if (iorq->errno == 0)
2031 printf(" [recovered in %d tries]", iorq->tries);
2032
2033 printf("\n");
2034 }
2035
2036 /*
2037 * xyc_error: non-fatal error encountered... recover.
2038 * return AOK if resubmitted, return FAIL if this iopb is done
2039 */
2040 int
2041 xyc_error(xycsc, iorq, iopb, comm)
2042 struct xyc_softc *xycsc;
2043 struct xy_iorq *iorq;
2044 struct xy_iopb *iopb;
2045 int comm;
2046
2047 {
2048 int errno = iorq->errno;
2049 int erract = xyc_entoact(errno);
2050 int oldmode, advance;
2051 #ifdef __sparc__
2052 int i;
2053 #endif
2054
2055 if (erract == XY_ERA_RSET) { /* some errors require a reset */
2056 oldmode = iorq->mode;
2057 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode);
2058 /* make xyc_start ignore us */
2059 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy);
2060 iorq->mode = oldmode;
2061 }
2062 /* check for read/write to a sector in bad144 table if bad: redirect
2063 * request to bad144 area */
2064
2065 if ((comm == XYCMD_RD || comm == XYCMD_WR) &&
2066 (iorq->mode & XY_MODE_B144) == 0) {
2067 advance = iorq->sectcnt - iopb->scnt;
2068 XYC_ADVANCE(iorq, advance);
2069 #ifdef __sparc__
2070 if ((i = isbad(&iorq->xy->dkb, iorq->blockno / iorq->xy->sectpercyl,
2071 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead,
2072 iorq->blockno % iorq->xy->nsect)) != -1) {
2073 iorq->mode |= XY_MODE_B144; /* enter bad144 mode &
2074 * redirect */
2075 iopb->errno = iopb->done = iopb->errs = 0;
2076 iopb->scnt = 1;
2077 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2;
2078 /* second to last acyl */
2079 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144
2080 * standard */
2081 iopb->head = i / iorq->xy->nhead;
2082 iopb->sect = i % iorq->xy->nhead;
2083 /* will resubmit when we come out of remove_iorq */
2084 return (XY_ERR_AOK); /* recovered! */
2085 }
2086 #endif
2087 }
2088
2089 /*
2090 * it isn't a bad144 sector, must be real error! see if we can retry
2091 * it?
2092 */
2093 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
2094 xyc_perror(iorq, iopb, 1); /* inform of error state
2095 * change */
2096 iorq->lasterror = errno;
2097
2098 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD)
2099 && iorq->tries < XYC_MAXTRIES) { /* retry? */
2100 iorq->tries++;
2101 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2102 /* will resubmit at end of remove_iorq */
2103 return (XY_ERR_AOK); /* recovered! */
2104 }
2105
2106 /* failed to recover from this error */
2107 return (XY_ERR_FAIL);
2108 }
2109
2110 /*
2111 * xyc_tick: make sure xy is still alive and ticking (err, kicking).
2112 */
2113 void
2114 xyc_tick(arg)
2115 void *arg;
2116
2117 {
2118 struct xyc_softc *xycsc = arg;
2119 int lcv, s, reset = 0;
2120
2121 /* reduce ttl for each request if one goes to zero, reset xyc */
2122 s = splbio();
2123 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
2124 if (xycsc->reqs[lcv].mode == 0 ||
2125 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE)
2126 continue;
2127 xycsc->reqs[lcv].ttl--;
2128 if (xycsc->reqs[lcv].ttl == 0)
2129 reset = 1;
2130 }
2131 if (reset) {
2132 printf("%s: watchdog timeout\n", xycsc->sc_dev.dv_xname);
2133 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL);
2134 }
2135 splx(s);
2136
2137 /* until next time */
2138
2139 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc);
2140 }
2141
2142 /*
2143 * xyc_ioctlcmd: this function provides a user level interface to the
2144 * controller via ioctl. this allows "format" programs to be written
2145 * in user code, and is also useful for some debugging. we return
2146 * an error code. called at user priority.
2147 *
2148 * XXX missing a few commands (see the 7053 driver for ideas)
2149 */
2150 int
2151 xyc_ioctlcmd(xy, dev, xio)
2152 struct xy_softc *xy;
2153 dev_t dev;
2154 struct xd_iocmd *xio;
2155
2156 {
2157 int s, rqno, dummy = 0;
2158 caddr_t dvmabuf = NULL, buf = NULL;
2159 struct xyc_softc *xycsc;
2160 int rseg, error;
2161 bus_dma_segment_t seg;
2162
2163 /* check sanity of requested command */
2164
2165 switch (xio->cmd) {
2166
2167 case XYCMD_NOP: /* no op: everything should be zero */
2168 if (xio->subfn || xio->dptr || xio->dlen ||
2169 xio->block || xio->sectcnt)
2170 return (EINVAL);
2171 break;
2172
2173 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */
2174 case XYCMD_WR:
2175 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2176 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL)
2177 return (EINVAL);
2178 break;
2179
2180 case XYCMD_SK: /* seek: doesn't seem useful to export this */
2181 return (EINVAL);
2182
2183 break;
2184
2185 default:
2186 return (EINVAL);/* ??? */
2187 }
2188
2189 xycsc = xy->parent;
2190
2191 /* create DVMA buffer for request if needed */
2192 if (xio->dlen) {
2193 if ((error = xy_dmamem_alloc(xycsc->dmatag, xycsc->auxmap,
2194 &seg, &rseg,
2195 xio->dlen, &buf,
2196 (bus_addr_t *)&dvmabuf)) != 0) {
2197 return (error);
2198 }
2199
2200 if (xio->cmd == XYCMD_WR) {
2201 if ((error = copyin(xio->dptr, buf, xio->dlen)) != 0) {
2202 bus_dmamem_unmap(xycsc->dmatag, buf, xio->dlen);
2203 bus_dmamem_free(xycsc->dmatag, &seg, rseg);
2204 return (error);
2205 }
2206 }
2207 }
2208 /* do it! */
2209
2210 error = 0;
2211 s = splbio();
2212 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block,
2213 xio->sectcnt, dvmabuf, XY_SUB_WAIT);
2214 if (rqno == XY_ERR_FAIL) {
2215 error = EIO;
2216 goto done;
2217 }
2218 xio->errno = xycsc->ciorq->errno;
2219 xio->tries = xycsc->ciorq->tries;
2220 XYC_DONE(xycsc, dummy);
2221
2222 if (xio->cmd == XYCMD_RD)
2223 error = copyout(buf, xio->dptr, xio->dlen);
2224
2225 done:
2226 splx(s);
2227 if (dvmabuf) {
2228 xy_dmamem_free(xycsc->dmatag, xycsc->auxmap, &seg, rseg,
2229 xio->dlen, buf);
2230 }
2231 return (error);
2232 }
2233
2234 /*
2235 * xyc_e2str: convert error code number into an error string
2236 */
2237 char *
2238 xyc_e2str(no)
2239 int no;
2240 {
2241 switch (no) {
2242 case XY_ERR_FAIL:
2243 return ("Software fatal error");
2244 case XY_ERR_DERR:
2245 return ("DOUBLE ERROR");
2246 case XY_ERR_AOK:
2247 return ("Successful completion");
2248 case XY_ERR_IPEN:
2249 return("Interrupt pending");
2250 case XY_ERR_BCFL:
2251 return("Busy conflict");
2252 case XY_ERR_TIMO:
2253 return("Operation timeout");
2254 case XY_ERR_NHDR:
2255 return("Header not found");
2256 case XY_ERR_HARD:
2257 return("Hard ECC error");
2258 case XY_ERR_ICYL:
2259 return("Illegal cylinder address");
2260 case XY_ERR_ISEC:
2261 return("Illegal sector address");
2262 case XY_ERR_SMAL:
2263 return("Last sector too small");
2264 case XY_ERR_SACK:
2265 return("Slave ACK error (non-existent memory)");
2266 case XY_ERR_CHER:
2267 return("Cylinder and head/header error");
2268 case XY_ERR_SRTR:
2269 return("Auto-seek retry successful");
2270 case XY_ERR_WPRO:
2271 return("Write-protect error");
2272 case XY_ERR_UIMP:
2273 return("Unimplemented command");
2274 case XY_ERR_DNRY:
2275 return("Drive not ready");
2276 case XY_ERR_SZER:
2277 return("Sector count zero");
2278 case XY_ERR_DFLT:
2279 return("Drive faulted");
2280 case XY_ERR_ISSZ:
2281 return("Illegal sector size");
2282 case XY_ERR_SLTA:
2283 return("Self test A");
2284 case XY_ERR_SLTB:
2285 return("Self test B");
2286 case XY_ERR_SLTC:
2287 return("Self test C");
2288 case XY_ERR_SOFT:
2289 return("Soft ECC error");
2290 case XY_ERR_SFOK:
2291 return("Soft ECC error recovered");
2292 case XY_ERR_IHED:
2293 return("Illegal head");
2294 case XY_ERR_DSEQ:
2295 return("Disk sequencer error");
2296 case XY_ERR_SEEK:
2297 return("Seek error");
2298 default:
2299 return ("Unknown error");
2300 }
2301 }
2302
2303 int
2304 xyc_entoact(errno)
2305
2306 int errno;
2307
2308 {
2309 switch (errno) {
2310 case XY_ERR_FAIL: case XY_ERR_DERR: case XY_ERR_IPEN:
2311 case XY_ERR_BCFL: case XY_ERR_ICYL: case XY_ERR_ISEC:
2312 case XY_ERR_UIMP: case XY_ERR_SZER: case XY_ERR_ISSZ:
2313 case XY_ERR_SLTA: case XY_ERR_SLTB: case XY_ERR_SLTC:
2314 case XY_ERR_IHED: case XY_ERR_SACK: case XY_ERR_SMAL:
2315
2316 return(XY_ERA_PROG); /* program error ! */
2317
2318 case XY_ERR_TIMO: case XY_ERR_NHDR: case XY_ERR_HARD:
2319 case XY_ERR_DNRY: case XY_ERR_CHER: case XY_ERR_SEEK:
2320 case XY_ERR_SOFT:
2321
2322 return(XY_ERA_HARD); /* hard error, retry */
2323
2324 case XY_ERR_DFLT: case XY_ERR_DSEQ:
2325
2326 return(XY_ERA_RSET); /* hard error reset */
2327
2328 case XY_ERR_SRTR: case XY_ERR_SFOK: case XY_ERR_AOK:
2329
2330 return(XY_ERA_SOFT); /* an FYI error */
2331
2332 case XY_ERR_WPRO:
2333
2334 return(XY_ERA_WPRO); /* write protect */
2335 }
2336
2337 return(XY_ERA_PROG); /* ??? */
2338 }
2339