xy.c revision 1.84 1 /* $NetBSD: xy.c,v 1.84 2024/12/21 17:40:11 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1995 Charles D. Cranor
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 *
30 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r
31 *
32 * author: Chuck Cranor <chuck@netbsd>
33 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp &
34 * started: 14-Sep-95
35 * references: [1] Xylogics Model 753 User's Manual
36 * part number: 166-753-001, Revision B, May 21, 1988.
37 * "Your Partner For Performance"
38 * [2] other NetBSD disk device drivers
39 * [3] Xylogics Model 450 User's Manual
40 * part number: 166-017-001, Revision B, 1983.
41 * [4] Addendum to Xylogics Model 450 Disk Controller User's
42 * Manual, Jan. 1985.
43 * [5] The 451 Controller, Rev. B3, September 2, 1986.
44 * [6] David Jones <dej (at) achilles.net>'s unfinished 450/451 driver
45 *
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.84 2024/12/21 17:40:11 tsutsui Exp $");
50
51 #undef XYC_DEBUG /* full debug */
52 #undef XYC_DIAG /* extra sanity checks */
53 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG)
54 #define XYC_DIAG /* link in with master DIAG option */
55 #endif
56
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/file.h>
62 #include <sys/stat.h>
63 #include <sys/ioctl.h>
64 #include <sys/buf.h>
65 #include <sys/bufq.h>
66 #include <sys/uio.h>
67 #include <sys/kmem.h>
68 #include <sys/device.h>
69 #include <sys/disklabel.h>
70 #include <sys/disk.h>
71 #include <sys/syslog.h>
72 #include <sys/dkbad.h>
73 #include <sys/conf.h>
74 #include <sys/kauth.h>
75
76 #include <uvm/uvm_extern.h>
77
78 #include <dev/sun/disklabel.h>
79
80 #include <machine/autoconf.h>
81 #include <machine/dvma.h>
82
83 #include <sun3/dev/xyreg.h>
84 #include <sun3/dev/xyvar.h>
85 #include <sun3/dev/xio.h>
86
87 #include "ioconf.h"
88 #include "locators.h"
89
90 /*
91 * Print a complaint when no xy children were specified
92 * in the config file. Better than a link error...
93 *
94 * XXX: Some folks say this driver should be split in two,
95 * but that seems pointless with ONLY one type of child.
96 */
97 #include "xy.h"
98 #if NXY == 0
99 #error "xyc but no xy?"
100 #endif
101
102 /*
103 * macros
104 */
105
106 /*
107 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC
108 */
109 #define XYC_GO(XYC, ADDR) \
110 do { \
111 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \
112 (ADDR) = ((ADDR) >> 8); \
113 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \
114 (ADDR) = ((ADDR) >> 8); \
115 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \
116 (ADDR) = ((ADDR) >> 8); \
117 (XYC)->xyc_reloc_hi = (ADDR); \
118 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \
119 } while (/* CONSTCOND */ 0)
120
121 /*
122 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd)
123 */
124
125 #define XYC_DONE(SC,ER) \
126 do { \
127 if ((ER) == XY_ERR_AOK) { \
128 (ER) = (SC)->ciorq->errno; \
129 (SC)->ciorq->mode = XY_SUB_FREE; \
130 wakeup((SC)->ciorq); \
131 } \
132 } while (/* CONSTCOND */ 0)
133
134 /*
135 * XYC_ADVANCE: advance iorq's pointers by a number of sectors
136 */
137
138 #define XYC_ADVANCE(IORQ, N) \
139 do { \
140 if (N) { \
141 (IORQ)->sectcnt -= (N); \
142 (IORQ)->blockno += (N); \
143 (IORQ)->dbuf += ((N) * XYFM_BPS); \
144 } \
145 } while (/* CONSTCOND */ 0)
146
147 /*
148 * note - addresses you can sleep on:
149 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive)
150 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish)
151 */
152
153
154 /*
155 * function prototypes
156 * "xyc_*" functions are internal, all others are external interfaces
157 */
158
159 /* internals */
160 struct xy_iopb *xyc_chain(struct xyc_softc *, struct xy_iorq *);
161 int xyc_cmd(struct xyc_softc *, int, int, int, int, int, char *, int);
162 const char *xyc_e2str(int);
163 int xyc_entoact(int);
164 int xyc_error(struct xyc_softc *, struct xy_iorq *, struct xy_iopb *, int);
165 int xyc_ioctlcmd(struct xy_softc *, dev_t dev, struct xd_iocmd *);
166 void xyc_perror(struct xy_iorq *, struct xy_iopb *, int);
167 int xyc_piodriver(struct xyc_softc *, struct xy_iorq *);
168 int xyc_remove_iorq(struct xyc_softc *);
169 int xyc_reset(struct xyc_softc *, int, struct xy_iorq *, int,
170 struct xy_softc *);
171 inline void xyc_rqinit(struct xy_iorq *, struct xyc_softc *, struct xy_softc *,
172 int, u_long, int, void *, struct buf *);
173 void xyc_rqtopb(struct xy_iorq *, struct xy_iopb *, int, int);
174 void xyc_start(struct xyc_softc *, struct xy_iorq *);
175 int xyc_startbuf(struct xyc_softc *, struct xy_softc *, struct buf *);
176 int xyc_submit_iorq(struct xyc_softc *, struct xy_iorq *, int);
177 void xyc_tick(void *);
178 int xyc_unbusy(struct xyc *, int);
179 void xyc_xyreset(struct xyc_softc *, struct xy_softc *);
180
181 /* machine interrupt hook */
182 int xycintr(void *);
183
184 /* autoconf */
185 static int xycmatch(device_t, cfdata_t, void *);
186 static void xycattach(device_t, device_t, void *);
187 static int xyc_print(void *, const char *);
188
189 static int xymatch(device_t, cfdata_t, void *);
190 static void xyattach(device_t, device_t, void *);
191 static void xy_init(struct xy_softc *);
192
193 static void xydummystrat(struct buf *);
194 int xygetdisklabel(struct xy_softc *, void *);
195
196 /*
197 * cfattach's: device driver interface to autoconfig
198 */
199
200 CFATTACH_DECL_NEW(xyc, sizeof(struct xyc_softc),
201 xycmatch, xycattach, NULL, NULL);
202
203 CFATTACH_DECL_NEW(xy, sizeof(struct xy_softc),
204 xymatch, xyattach, NULL, NULL);
205
206 struct xyc_attach_args { /* this is the "aux" args to xyattach */
207 int driveno; /* unit number */
208 };
209
210 static dev_type_open(xyopen);
211 static dev_type_close(xyclose);
212 static dev_type_read(xyread);
213 static dev_type_write(xywrite);
214 static dev_type_ioctl(xyioctl);
215 static dev_type_strategy(xystrategy);
216 static dev_type_dump(xydump);
217 static dev_type_size(xysize);
218
219 const struct bdevsw xy_bdevsw = {
220 .d_open = xyopen,
221 .d_close = xyclose,
222 .d_strategy = xystrategy,
223 .d_ioctl = xyioctl,
224 .d_dump = xydump,
225 .d_psize = xysize,
226 .d_discard = nodiscard,
227 .d_flag = D_DISK
228 };
229
230 const struct cdevsw xy_cdevsw = {
231 .d_open = xyopen,
232 .d_close = xyclose,
233 .d_read = xyread,
234 .d_write = xywrite,
235 .d_ioctl = xyioctl,
236 .d_stop = nostop,
237 .d_tty = notty,
238 .d_poll = nopoll,
239 .d_mmap = nommap,
240 .d_kqfilter = nokqfilter,
241 .d_discard = nodiscard,
242 .d_flag = D_DISK
243 };
244
245 /*
246 * dkdriver
247 */
248
249 struct dkdriver xydkdriver = {
250 .d_strategy = xystrategy
251 };
252
253 /*
254 * start: disk label fix code (XXX)
255 */
256
257 static void *xy_labeldata;
258
259 static void
260 xydummystrat(struct buf *bp)
261 {
262
263 if (bp->b_bcount != XYFM_BPS)
264 panic("%s: b_bcount", __func__);
265 memcpy(bp->b_data, xy_labeldata, XYFM_BPS);
266 bp->b_oflags |= BO_DONE;
267 bp->b_cflags &= ~BC_BUSY;
268 }
269
270 int
271 xygetdisklabel(struct xy_softc *xy, void *b)
272 {
273 const char *err;
274 struct sun_disklabel *sdl;
275
276 /* We already have the label data in `b'; setup for dummy strategy */
277 xy_labeldata = b;
278
279 /* Required parameter for readdisklabel() */
280 xy->sc_dk.dk_label->d_secsize = XYFM_BPS;
281
282 err = readdisklabel(MAKEDISKDEV(0, device_unit(xy->sc_dev), RAW_PART),
283 xydummystrat, xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel);
284 if (err) {
285 printf("%s: %s\n", device_xname(xy->sc_dev), err);
286 return XY_ERR_FAIL;
287 }
288
289 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
290 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block;
291 if (sdl->sl_magic == SUN_DKMAGIC)
292 xy->pcyl = sdl->sl_pcyl;
293 else {
294 printf("%s: WARNING: no `pcyl' in disk label.\n",
295 device_xname(xy->sc_dev));
296 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders +
297 xy->sc_dk.dk_label->d_acylinders;
298 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
299 device_xname(xy->sc_dev), xy->pcyl);
300 }
301
302 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders;
303 xy->acyl = xy->sc_dk.dk_label->d_acylinders;
304 xy->nhead = xy->sc_dk.dk_label->d_ntracks;
305 xy->nsect = xy->sc_dk.dk_label->d_nsectors;
306 xy->sectpercyl = xy->nhead * xy->nsect;
307 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by
308 * sun->bsd */
309 return XY_ERR_AOK;
310 }
311
312 /*
313 * end: disk label fix code (XXX)
314 */
315
316 /*
317 * a u t o c o n f i g f u n c t i o n s
318 */
319
320 /*
321 * xycmatch: determine if xyc is present or not. we do a
322 * soft reset to detect the xyc.
323 */
324 static int
325 xycmatch(device_t parent, cfdata_t cf, void *aux)
326 {
327 struct confargs *ca = aux;
328
329 /* No default VME address. */
330 if (ca->ca_paddr == -1)
331 return 0;
332
333 /* Make sure something is there... */
334 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1)
335 return 0;
336
337 /* Default interrupt priority. */
338 if (ca->ca_intpri == -1)
339 ca->ca_intpri = 2;
340
341 return 1;
342 }
343
344 /*
345 * xycattach: attach controller
346 */
347 static void
348 xycattach(device_t parent, device_t self, void *aux)
349 {
350 struct xyc_softc *xyc = device_private(self);
351 struct confargs *ca = aux;
352 struct xyc_attach_args xa;
353 int lcv, err, res, pbsz;
354 void *tmp, *tmp2;
355 u_long ultmp;
356
357 /* get addressing and intr level stuff from autoconfig and load it
358 * into our xyc_softc. */
359
360 xyc->sc_dev = self;
361 xyc->xyc = (struct xyc *)bus_mapin(ca->ca_bustype, ca->ca_paddr,
362 sizeof(struct xyc));
363 xyc->bustype = ca->ca_bustype;
364 xyc->ipl = ca->ca_intpri;
365 xyc->vector = ca->ca_intvec;
366 xyc->no_ols = 0; /* XXX should be from config */
367
368 for (lcv = 0; lcv < XYC_MAXDEV; lcv++)
369 xyc->sc_drives[lcv] = NULL;
370
371 /*
372 * allocate and zero buffers
373 * check boundaries of the KVA's ... all IOPBs must reside in
374 * the same 64K region.
375 */
376
377 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb);
378 tmp = tmp2 = (struct xy_iopb *)dvma_malloc(pbsz); /* KVA */
379 ultmp = (u_long)tmp;
380 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) {
381 tmp = (struct xy_iopb *)dvma_malloc(pbsz); /* retry! */
382 dvma_free(tmp2, pbsz);
383 ultmp = (u_long) tmp;
384 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) {
385 aprint_error(": can't alloc IOPB mem in 64K\n");
386 return;
387 }
388 }
389 memset(tmp, 0, pbsz);
390 xyc->iopbase = tmp;
391 xyc->dvmaiopb =
392 (struct xy_iopb *)dvma_kvtopa(xyc->iopbase, xyc->bustype);
393 xyc->reqs = kmem_zalloc(XYC_MAXIOPB * sizeof(struct xy_iorq),
394 KM_SLEEP);
395
396 /*
397 * init iorq to iopb pointers, and non-zero fields in the
398 * iopb which never change.
399 */
400
401 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
402 xyc->xy_chain[lcv] = NULL;
403 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv];
404 xyc->iopbase[lcv].asr = 1; /* always the same */
405 xyc->iopbase[lcv].eef = 1; /* always the same */
406 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */
407 xyc->iopbase[lcv].aud = 1; /* always the same */
408 xyc->iopbase[lcv].relo = 1; /* always the same */
409 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */
410 }
411 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */
412 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */
413 xyc->xy_hand = 0;
414
415 /* read controller parameters and insure we have a 450/451 */
416
417 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL);
418 res = xyc->ciopb->ctyp;
419 XYC_DONE(xyc, err);
420 if (res != XYCT_450) {
421 if (err)
422 aprint_error(": %s: ", xyc_e2str(err));
423 aprint_error(": doesn't identify as a 450/451\n");
424 return;
425 }
426 aprint_normal(": Xylogics 450/451");
427 if (xyc->no_ols)
428 /* 450 doesn't overlap seek right */
429 aprint_normal(" [OLS disabled]");
430 aprint_normal("\n");
431 if (err) {
432 aprint_error_dev(self, "error: %s\n", xyc_e2str(err));
433 return;
434 }
435 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) {
436 aprint_error_dev(self, "24 bit addressing turned off\n");
437 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n");
438 printf("to enable 24 bit mode and this driver\n");
439 return;
440 }
441
442 /* link in interrupt with higher level software */
443 isr_add_vectored(xycintr, xyc, ca->ca_intpri, ca->ca_intvec);
444 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
445 device_xname(self), "intr");
446
447 callout_init(&xyc->sc_tick_ch, 0);
448
449 /* now we must look for disks using autoconfig */
450 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++)
451 (void)config_found(self, (void *)&xa, xyc_print, CFARGS_NONE);
452
453 /* start the watchdog clock */
454 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc);
455 }
456
457 static int
458 xyc_print(void *aux, const char *name)
459 {
460 struct xyc_attach_args *xa = aux;
461
462 if (name != NULL)
463 aprint_normal("%s: ", name);
464
465 if (xa->driveno != -1)
466 aprint_normal(" drive %d", xa->driveno);
467
468 return UNCONF;
469 }
470
471 /*
472 * xymatch: probe for disk.
473 *
474 * note: we almost always say disk is present. this allows us to
475 * spin up and configure a disk after the system is booted (we can
476 * call xyattach!). Also, wire down the relationship between the
477 * xy* and xyc* devices, to simplify boot device identification.
478 */
479 static int
480 xymatch(device_t parent, cfdata_t cf, void *aux)
481 {
482 struct xyc_attach_args *xa = aux;
483 int xy_unit;
484
485 /* Match only on the "wired-down" controller+disk. */
486 xy_unit = device_unit(parent) * 2 + xa->driveno;
487 if (cf->cf_unit != xy_unit)
488 return 0;
489
490 return 1;
491 }
492
493 /*
494 * xyattach: attach a disk.
495 */
496 static void
497 xyattach(device_t parent, device_t self, void *aux)
498 {
499 struct xy_softc *xy = device_private(self);
500 struct xyc_softc *xyc = device_private(parent);
501 struct xyc_attach_args *xa = aux;
502
503 xy->sc_dev = self;
504 aprint_normal("\n");
505
506 /*
507 * Always re-initialize the disk structure. We want statistics
508 * to start with a clean slate.
509 */
510 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk));
511 disk_init(&xy->sc_dk, device_xname(self), &xydkdriver);
512
513 xy->state = XY_DRIVE_UNKNOWN; /* to start */
514 xy->flags = 0;
515 xy->parent = xyc;
516
517 /* init queue of waiting bufs */
518 bufq_alloc(&xy->xyq, "disksort", BUFQ_SORT_RAWBLOCK);
519 xy->xyrq = &xyc->reqs[xa->driveno];
520
521 xy->xy_drive = xa->driveno;
522 xyc->sc_drives[xa->driveno] = xy;
523
524 /* Do init work common to attach and open. */
525 xy_init(xy);
526 }
527
528 /*
529 * end of autoconfig functions
530 */
531
532 /*
533 * Initialize a disk. This can be called from both autoconf and
534 * also from xyopen/xystrategy.
535 */
536 static void
537 xy_init(struct xy_softc *xy)
538 {
539 struct xyc_softc *xyc;
540 struct dkbad *dkb;
541 void *dvmabuf;
542 int err, spt, mb, blk, lcv, fullmode, newstate;
543
544 xyc = xy->parent;
545 xy->state = XY_DRIVE_ATTACHING;
546 newstate = XY_DRIVE_UNKNOWN;
547 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT;
548 dvmabuf = dvma_malloc(XYFM_BPS);
549
550 /* first try and reset the drive */
551
552 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode);
553 XYC_DONE(xyc, err);
554 if (err == XY_ERR_DNRY) {
555 printf("%s: drive %d: off-line\n",
556 device_xname(xy->sc_dev), xy->xy_drive);
557 goto done;
558 }
559 if (err) {
560 printf("%s: ERROR 0x%02x (%s)\n",
561 device_xname(xy->sc_dev), err, xyc_e2str(err));
562 goto done;
563 }
564 printf("%s: drive %d ready",
565 device_xname(xy->sc_dev), xy->xy_drive);
566
567 /*
568 * now set drive parameters (to semi-bogus values) so we can read the
569 * disk label.
570 */
571 xy->pcyl = xy->ncyl = 1;
572 xy->acyl = 0;
573 xy->nhead = 1;
574 xy->nsect = 1;
575 xy->sectpercyl = 1;
576 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */
577 xy->dkb.bt_bad[lcv].bt_cyl =
578 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff;
579
580 /* read disk label */
581 for (xy->drive_type = 0; xy->drive_type <= XYC_MAXDT;
582 xy->drive_type++) {
583 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1,
584 dvmabuf, fullmode);
585 XYC_DONE(xyc, err);
586 if (err == XY_ERR_AOK)
587 break;
588 }
589
590 if (err != XY_ERR_AOK) {
591 printf("%s: reading disk label failed: %s\n",
592 device_xname(xy->sc_dev), xyc_e2str(err));
593 goto done;
594 }
595 printf("%s: drive type %d\n",
596 device_xname(xy->sc_dev), xy->drive_type);
597
598 newstate = XY_DRIVE_NOLABEL;
599
600 xy->hw_spt = spt = 0; /* XXX needed ? */
601 /* Attach the disk: must be before getdisklabel to malloc label */
602 disk_attach(&xy->sc_dk);
603
604 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK)
605 goto done;
606
607 /* inform the user of what is up */
608 printf("%s: <%s>, pcyl %d\n",
609 device_xname(xy->sc_dev),
610 (char *)dvmabuf, xy->pcyl);
611 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS);
612 printf("%s: %dMB, %d cyl, %d head, %d sec\n",
613 device_xname(xy->sc_dev), mb, xy->ncyl, xy->nhead, xy->nsect);
614
615 /*
616 * 450/451 stupidity: the drive type is encoded into the format
617 * of the disk. the drive type in the IOPB must match the drive
618 * type in the format, or you will not be able to do I/O to the
619 * disk (you get header not found errors). if you have two drives
620 * of different sizes that have the same drive type in their
621 * formatting then you are out of luck.
622 *
623 * this problem was corrected in the 753/7053.
624 */
625
626 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) {
627 struct xy_softc *oxy;
628
629 oxy = xyc->sc_drives[lcv];
630 if (oxy == NULL || oxy == xy)
631 continue;
632 if (oxy->drive_type != xy->drive_type)
633 continue;
634 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl ||
635 xy->nhead != oxy->nhead) {
636 printf("%s: %s and %s must be the same size!\n",
637 device_xname(xyc->sc_dev),
638 device_xname(xy->sc_dev),
639 device_xname(oxy->sc_dev));
640 panic("xy drive size mismatch");
641 }
642 }
643
644
645 /* now set the real drive parameters! */
646 blk = (xy->nsect - 1) +
647 ((xy->nhead - 1) * xy->nsect) +
648 ((xy->pcyl - 1) * xy->nsect * xy->nhead);
649 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode);
650 XYC_DONE(xyc, err);
651 if (err) {
652 printf("%s: write drive size failed: %s\n",
653 device_xname(xy->sc_dev), xyc_e2str(err));
654 goto done;
655 }
656 newstate = XY_DRIVE_ONLINE;
657
658 /*
659 * read bad144 table. this table resides on the first sector of the
660 * last track of the disk (i.e. second cyl of "acyl" area).
661 */
662 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) +
663 /* last cyl */
664 (xy->nhead - 1) * xy->nsect; /* last head */
665 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1,
666 dvmabuf, fullmode);
667 XYC_DONE(xyc, err);
668 if (err) {
669 printf("%s: reading bad144 failed: %s\n",
670 device_xname(xy->sc_dev), xyc_e2str(err));
671 goto done;
672 }
673
674 /* check dkbad for sanity */
675 dkb = (struct dkbad *)dvmabuf;
676 for (lcv = 0; lcv < 126; lcv++) {
677 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
678 dkb->bt_bad[lcv].bt_cyl == 0) &&
679 dkb->bt_bad[lcv].bt_trksec == 0xffff)
680 continue; /* blank */
681 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl)
682 break;
683 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead)
684 break;
685 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect)
686 break;
687 }
688 if (lcv != 126) {
689 printf("%s: warning: invalid bad144 sector!\n",
690 device_xname(xy->sc_dev));
691 } else {
692 memcpy(&xy->dkb, dvmabuf, XYFM_BPS);
693 }
694
695 done:
696 xy->state = newstate;
697 dvma_free(dvmabuf, XYFM_BPS);
698 }
699
700 /*
701 * { b , c } d e v s w f u n c t i o n s
702 */
703
704 /*
705 * xyclose: close device
706 */
707 static int
708 xyclose(dev_t dev, int flag, int fmt, struct lwp *l)
709 {
710 struct xy_softc *xy = device_lookup_private(&xy_cd, DISKUNIT(dev));
711 int part = DISKPART(dev);
712
713 /* clear mask bits */
714
715 switch (fmt) {
716 case S_IFCHR:
717 xy->sc_dk.dk_copenmask &= ~(1 << part);
718 break;
719 case S_IFBLK:
720 xy->sc_dk.dk_bopenmask &= ~(1 << part);
721 break;
722 }
723 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
724
725 return 0;
726 }
727
728 /*
729 * xydump: crash dump system
730 */
731 static int
732 xydump(dev_t dev, daddr_t blkno, void *va, size_t sz)
733 {
734 int unit, part;
735 struct xy_softc *xy;
736
737 unit = DISKUNIT(dev);
738 part = DISKPART(dev);
739
740 xy = device_lookup_private(&xy_cd, unit);
741 if (xy == NULL)
742 return ENXIO;
743
744 printf("%s%c: crash dump not supported (yet)\n",
745 device_xname(xy->sc_dev), 'a' + part);
746
747 return ENXIO;
748
749 /* outline: globals: "dumplo" == sector number of partition to start
750 * dump at (convert to physical sector with partition table)
751 * "dumpsize" == size of dump in clicks "physmem" == size of physical
752 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
753 * physmem)
754 *
755 * dump a copy of physical memory to the dump device starting at sector
756 * "dumplo" in the swap partition (make sure > 0). map in pages as
757 * we go. use polled I/O.
758 *
759 * XXX how to handle NON_CONTIG?
760 */
761 }
762
763 static enum kauth_device_req
764 xy_getkauthreq(u_char cmd)
765 {
766 enum kauth_device_req req;
767
768 switch (cmd) {
769 case XYCMD_WR:
770 case XYCMD_WTH:
771 case XYCMD_WFM:
772 case XYCMD_WRH:
773 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE;
774 break;
775
776 case XYCMD_RD:
777 case XYCMD_RTH:
778 case XYCMD_RDH:
779 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ;
780 break;
781
782 case XYCMD_RDS:
783 case XYCMD_MBD:
784 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF;
785 break;
786
787 case XYCMD_RST:
788 case XYCMD_SDS:
789 case XYCMD_MBL:
790 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF;
791 break;
792
793 case XYCMD_NOP:
794 case XYCMD_SK:
795 case XYCMD_ST:
796 case XYCMD_R:
797 default:
798 req = 0;
799 break;
800 }
801
802 return req;
803 }
804
805 /*
806 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks.
807 */
808 static int
809 xyioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
810 {
811 struct xy_softc *xy;
812 struct xd_iocmd *xio;
813 int error, s, unit;
814
815 unit = DISKUNIT(dev);
816
817 xy = device_lookup_private(&xy_cd, unit);
818 if (xy == NULL)
819 return ENXIO;
820
821 error = disk_ioctl(&xy->sc_dk, dev, cmd, addr, flag, l);
822 if (error != EPASSTHROUGH)
823 return error;
824
825 /* switch on ioctl type */
826
827 switch (cmd) {
828 case DIOCSBAD: /* set bad144 info */
829 if ((flag & FWRITE) == 0)
830 return EBADF;
831 s = splbio();
832 memcpy(&xy->dkb, addr, sizeof(xy->dkb));
833 splx(s);
834 return 0;
835
836 case DIOCSDINFO: /* set disk label */
837 if ((flag & FWRITE) == 0)
838 return EBADF;
839 error = setdisklabel(xy->sc_dk.dk_label,
840 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0,
841 xy->sc_dk.dk_cpulabel);
842 if (error == 0) {
843 if (xy->state == XY_DRIVE_NOLABEL)
844 xy->state = XY_DRIVE_ONLINE;
845 }
846 return error;
847
848 case DIOCWLABEL: /* change write status of disk label */
849 if ((flag & FWRITE) == 0)
850 return EBADF;
851 if (*(int *)addr)
852 xy->flags |= XY_WLABEL;
853 else
854 xy->flags &= ~XY_WLABEL;
855 return 0;
856
857 case DIOCWDINFO: /* write disk label */
858 if ((flag & FWRITE) == 0)
859 return EBADF;
860 error = setdisklabel(xy->sc_dk.dk_label,
861 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0,
862 xy->sc_dk.dk_cpulabel);
863 if (error == 0) {
864 if (xy->state == XY_DRIVE_NOLABEL)
865 xy->state = XY_DRIVE_ONLINE;
866
867 /* Simulate opening partition 0 so write succeeds. */
868 xy->sc_dk.dk_openmask |= (1 << 0);
869 error = writedisklabel(MAKEDISKDEV(major(dev),
870 DISKUNIT(dev), RAW_PART),
871 xystrategy, xy->sc_dk.dk_label,
872 xy->sc_dk.dk_cpulabel);
873 xy->sc_dk.dk_openmask =
874 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
875 }
876 return error;
877
878 case DIOSXDCMD: {
879 enum kauth_device_req req;
880
881 xio = (struct xd_iocmd *)addr;
882 req = xy_getkauthreq(xio->cmd);
883 if ((error = kauth_authorize_device_passthru(l->l_cred,
884 dev, req, xio)) != 0)
885 return error;
886 return xyc_ioctlcmd(xy, dev, xio);
887 }
888
889 default:
890 return ENOTTY;
891 }
892 }
893
894 /*
895 * xyopen: open drive
896 */
897 static int
898 xyopen(dev_t dev, int flag, int fmt, struct lwp *l)
899 {
900 int err, unit, part, s;
901 struct xy_softc *xy;
902
903 /* first, could it be a valid target? */
904 unit = DISKUNIT(dev);
905 xy = device_lookup_private(&xy_cd, unit);
906 if (xy == NULL)
907 return ENXIO;
908 part = DISKPART(dev);
909 err = 0;
910
911 /*
912 * If some other processing is doing init, sleep.
913 */
914 s = splbio();
915 while (xy->state == XY_DRIVE_ATTACHING) {
916 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) {
917 err = EINTR;
918 goto done;
919 }
920 }
921 /* Do we need to init the drive? */
922 if (xy->state == XY_DRIVE_UNKNOWN) {
923 xy_init(xy);
924 wakeup(&xy->state);
925 }
926 /* Was the init successful? */
927 if (xy->state == XY_DRIVE_UNKNOWN) {
928 err = EIO;
929 goto done;
930 }
931
932 /* check for partition */
933 if (part != RAW_PART &&
934 (part >= xy->sc_dk.dk_label->d_npartitions ||
935 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
936 err = ENXIO;
937 goto done;
938 }
939
940 /* set open masks */
941 switch (fmt) {
942 case S_IFCHR:
943 xy->sc_dk.dk_copenmask |= (1 << part);
944 break;
945 case S_IFBLK:
946 xy->sc_dk.dk_bopenmask |= (1 << part);
947 break;
948 }
949 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
950
951 done:
952 splx(s);
953 return err;
954 }
955
956 static int
957 xyread(dev_t dev, struct uio *uio, int flags)
958 {
959
960 return physio(xystrategy, NULL, dev, B_READ, minphys, uio);
961 }
962
963 static int
964 xywrite(dev_t dev, struct uio *uio, int flags)
965 {
966
967 return physio(xystrategy, NULL, dev, B_WRITE, minphys, uio);
968 }
969
970
971 /*
972 * xysize: return size of a partition for a dump
973 */
974
975 static int
976 xysize(dev_t dev)
977 {
978 struct xy_softc *xysc;
979 int unit, part, size, omask;
980
981 /* valid unit? */
982 unit = DISKUNIT(dev);
983 xysc = device_lookup_private(&xy_cd, unit);
984 if (xysc == NULL)
985 return -1;
986
987 part = DISKPART(dev);
988 omask = xysc->sc_dk.dk_openmask & (1 << part);
989
990 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0)
991 return -1;
992
993 /* do it */
994 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
995 size = -1; /* only give valid size for swap partitions */
996 else
997 size = xysc->sc_dk.dk_label->d_partitions[part].p_size *
998 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
999 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0)
1000 return -1;
1001 return size;
1002 }
1003
1004 /*
1005 * xystrategy: buffering system interface to xy.
1006 */
1007 static void
1008 xystrategy(struct buf *bp)
1009 {
1010 struct xy_softc *xy;
1011 int s, unit;
1012 struct disklabel *lp;
1013 daddr_t blkno;
1014
1015 unit = DISKUNIT(bp->b_dev);
1016
1017 /* check for live device */
1018
1019 xy = device_lookup_private(&xy_cd, unit);
1020 if (xy == NULL ||
1021 bp->b_blkno < 0 ||
1022 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) {
1023 bp->b_error = EINVAL;
1024 goto done;
1025 }
1026
1027 /* There should always be an open first. */
1028 if (xy->state == XY_DRIVE_UNKNOWN) {
1029 bp->b_error = EIO;
1030 goto done;
1031 }
1032 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1033 /* no I/O to unlabeled disks, unless raw partition */
1034 bp->b_error = EIO;
1035 goto done;
1036 }
1037 /* short circuit zero length request */
1038
1039 if (bp->b_bcount == 0)
1040 goto done;
1041
1042 /* check bounds with label (disksubr.c). Determine the size of the
1043 * transfer, and make sure it is within the boundaries of the
1044 * partition. Adjust transfer if needed, and signal errors or early
1045 * completion. */
1046
1047 lp = xy->sc_dk.dk_label;
1048
1049 if (bounds_check_with_label(&xy->sc_dk, bp,
1050 (xy->flags & XY_WLABEL) != 0) <= 0)
1051 goto done;
1052
1053 /*
1054 * Now convert the block number to absolute and put it in
1055 * terms of the device's logical block size.
1056 */
1057 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
1058 if (DISKPART(bp->b_dev) != RAW_PART)
1059 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
1060
1061 bp->b_rawblkno = blkno;
1062
1063 /*
1064 * now we know we have a valid buf structure that we need to do I/O
1065 * on.
1066 */
1067
1068 s = splbio(); /* protect the queues */
1069
1070 bufq_put(xy->xyq, bp); /* XXX disksort_cylinder */
1071
1072 /* start 'em up */
1073
1074 xyc_start(xy->parent, NULL);
1075
1076 /* done! */
1077
1078 splx(s);
1079 return;
1080
1081 done:
1082 /* tells upper layers we are done with this buf */
1083 bp->b_resid = bp->b_bcount;
1084 biodone(bp);
1085 }
1086 /*
1087 * end of {b,c}devsw functions
1088 */
1089
1090 /*
1091 * i n t e r r u p t f u n c t i o n
1092 *
1093 * xycintr: hardware interrupt.
1094 */
1095 int
1096 xycintr(void *v)
1097 {
1098 struct xyc_softc *xycsc = v;
1099
1100 /* kick the event counter */
1101 xycsc->sc_intrcnt.ev_count++;
1102
1103 /* remove as many done IOPBs as possible */
1104 xyc_remove_iorq(xycsc);
1105
1106 /* start any iorq's already waiting */
1107 xyc_start(xycsc, NULL);
1108
1109 return 1;
1110 }
1111 /*
1112 * end of interrupt function
1113 */
1114
1115 /*
1116 * i n t e r n a l f u n c t i o n s
1117 */
1118
1119 /*
1120 * xyc_rqinit: fill out the fields of an I/O request
1121 */
1122
1123 inline void
1124 xyc_rqinit(struct xy_iorq *rq, struct xyc_softc *xyc, struct xy_softc *xy,
1125 int md, u_long blk, int cnt, void *db, struct buf *bp)
1126 {
1127
1128 rq->xyc = xyc;
1129 rq->xy = xy;
1130 rq->ttl = XYC_MAXTTL + 10;
1131 rq->mode = md;
1132 rq->tries = rq->errno = rq->lasterror = 0;
1133 rq->blockno = blk;
1134 rq->sectcnt = cnt;
1135 rq->dbuf = rq->dbufbase = db;
1136 rq->buf = bp;
1137 }
1138
1139 /*
1140 * xyc_rqtopb: load up an IOPB based on an iorq
1141 */
1142
1143 void
1144 xyc_rqtopb(struct xy_iorq *iorq, struct xy_iopb *iopb, int cmd, int subfun)
1145 {
1146 u_long block, dp;
1147
1148 /* normal IOPB case, standard stuff */
1149
1150 /* chain bit handled later */
1151 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1;
1152 iopb->com = cmd;
1153 iopb->errno = 0;
1154 iopb->errs = 0;
1155 iopb->done = 0;
1156 if (iorq->xy) {
1157 iopb->unit = iorq->xy->xy_drive;
1158 iopb->dt = iorq->xy->drive_type;
1159 } else {
1160 iopb->unit = 0;
1161 iopb->dt = 0;
1162 }
1163 block = iorq->blockno;
1164 if (iorq->xy == NULL || block == 0) {
1165 iopb->sect = iopb->head = iopb->cyl = 0;
1166 } else {
1167 iopb->sect = block % iorq->xy->nsect;
1168 block = block / iorq->xy->nsect;
1169 iopb->head = block % iorq->xy->nhead;
1170 block = block / iorq->xy->nhead;
1171 iopb->cyl = block;
1172 }
1173 iopb->scnt = iorq->sectcnt;
1174 if (iorq->dbuf == NULL) {
1175 iopb->dataa = 0;
1176 iopb->datar = 0;
1177 } else {
1178 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype);
1179 iopb->dataa = (dp & 0xffff);
1180 iopb->datar = ((dp & 0xff0000) >> 16);
1181 }
1182 iopb->subfn = subfun;
1183 }
1184
1185
1186 /*
1187 * xyc_unbusy: wait for the xyc to go unbusy, or timeout.
1188 */
1189
1190 int
1191 xyc_unbusy(struct xyc *xyc, int del)
1192 {
1193
1194 while (del-- > 0) {
1195 if ((xyc->xyc_csr & XYC_GBSY) == 0)
1196 break;
1197 DELAY(1);
1198 }
1199 return del == 0 ? XY_ERR_FAIL : XY_ERR_AOK;
1200 }
1201
1202 /*
1203 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error.
1204 * note that NORM requests are handled separately.
1205 */
1206 int
1207 xyc_cmd(struct xyc_softc *xycsc, int cmd, int subfn, int unit, int block,
1208 int scnt, char *dptr, int fullmode)
1209 {
1210 struct xy_iorq *iorq = xycsc->ciorq;
1211 struct xy_iopb *iopb = xycsc->ciopb;
1212 int submode = XY_STATE(fullmode);
1213
1214 /*
1215 * is someone else using the control iopq wait for it if we can
1216 */
1217 start:
1218 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) {
1219 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0))
1220 return XY_ERR_FAIL;
1221 goto start;
1222 }
1223
1224 if (XY_STATE(iorq->mode) != XY_SUB_FREE) {
1225 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */
1226 iorq->mode = XY_SUB_FREE;
1227 printf("%s: stole control iopb\n", device_xname(xycsc->sc_dev));
1228 }
1229
1230 /* init iorq/iopb */
1231
1232 xyc_rqinit(iorq, xycsc,
1233 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit],
1234 fullmode, block, scnt, dptr, NULL);
1235
1236 /* load IOPB from iorq */
1237
1238 xyc_rqtopb(iorq, iopb, cmd, subfn);
1239
1240 /* submit it for processing */
1241
1242 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */
1243
1244 return XY_ERR_AOK;
1245 }
1246
1247 /*
1248 * xyc_startbuf
1249 * start a buffer for running
1250 */
1251
1252 int
1253 xyc_startbuf(struct xyc_softc *xycsc, struct xy_softc *xysc, struct buf *bp)
1254 {
1255 struct xy_iorq *iorq;
1256 struct xy_iopb *iopb;
1257 u_long block;
1258 void *dbuf;
1259
1260 iorq = xysc->xyrq;
1261 iopb = iorq->iopb;
1262
1263 /* get buf */
1264
1265 if (bp == NULL)
1266 panic("%s null buf", __func__);
1267
1268 #ifdef XYC_DEBUG
1269 int partno = DISKPART(bp->b_dev);
1270 printf("%s: %s%c: %s block %d\n", __func__, device_xname(xysc->sc_dev),
1271 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write",
1272 (int)bp->b_blkno);
1273 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n",
1274 bp->b_bcount, bp->b_data);
1275 #endif
1276
1277 /*
1278 * load request.
1279 *
1280 * also, note that there are two kinds of buf structures, those with
1281 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is
1282 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users'
1283 * buffer which has already been mapped into DVMA space. (Not on sun3)
1284 * However, if B_PHYS is not set, then the buffer is a normal system
1285 * buffer which does *not* live in DVMA space. In that case we call
1286 * dvma_mapin to map it into DVMA space so we can do the DMA to it.
1287 *
1288 * in cases where we do a dvma_mapin, note that iorq points to the
1289 * buffer as mapped into DVMA space, where as the bp->b_data points
1290 * to its non-DVMA mapping.
1291 *
1292 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped
1293 * into dvma space, only that it was remapped into the kernel.
1294 * We ALWAYS have to remap the kernel buf into DVMA space.
1295 * (It is done inexpensively, using whole segments!)
1296 */
1297
1298 block = bp->b_rawblkno;
1299
1300 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0);
1301 if (dbuf == NULL) { /* out of DVMA space */
1302 printf("%s: warning: out of DVMA space\n",
1303 device_xname(xycsc->sc_dev));
1304 return XY_ERR_FAIL; /* XXX: need some sort of
1305 * call-back scheme here? */
1306 }
1307
1308 /* init iorq and load iopb from it */
1309
1310 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block,
1311 bp->b_bcount / XYFM_BPS, dbuf, bp);
1312
1313 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0);
1314
1315 /* Instrumentation. */
1316 disk_busy(&xysc->sc_dk);
1317
1318 return XY_ERR_AOK;
1319 }
1320
1321
1322 /*
1323 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK
1324 * if ok. if it fail returns an error code. type is XY_SUB_*.
1325 *
1326 * note: caller frees iorq in all cases except NORM
1327 *
1328 * return value:
1329 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request)
1330 * WAIT: XY_AOK (success), <error-code> (failed)
1331 * POLL: <same as WAIT>
1332 * NOQ : <same as NORM>
1333 *
1334 * there are three sources for i/o requests:
1335 * [1] xystrategy: normal block I/O, using "struct buf" system.
1336 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1337 * [3] open/ioctl: these are I/O requests done in the context of a process,
1338 * and the process should block until they are done.
1339 *
1340 * software state is stored in the iorq structure. each iorq has an
1341 * iopb structure. the hardware understands the iopb structure.
1342 * every command must go through an iopb. a 450 handles one iopb at a
1343 * time, where as a 451 can take them in chains. [the 450 claims it
1344 * can handle chains, but is appears to be buggy...] iopb are allocated
1345 * in DVMA space at boot up time. each disk gets one iopb, and the
1346 * controller gets one (for POLL and WAIT commands). what happens if
1347 * the iopb is busy? for i/o type [1], the buffers are queued at the
1348 * "buff" layer and * picked up later by the interrupt routine. for case
1349 * [2] we can only be blocked if there is a WAIT type I/O request being
1350 * run. since this can only happen when we are crashing, we wait a sec
1351 * and then steal the IOPB. for case [3] the process can sleep
1352 * on the iorq free list until some iopbs are available.
1353 */
1354
1355 int
1356 xyc_submit_iorq(struct xyc_softc *xycsc, struct xy_iorq *iorq, int type)
1357 {
1358 struct xy_iopb *iopb;
1359 u_long iopbaddr;
1360
1361 #ifdef XYC_DEBUG
1362 printf("%s(%s, addr=0x%x, type=%d)\n", __func__,
1363 device_xname(xycsc->sc_dev), iorq, type);
1364 #endif
1365
1366 /* first check and see if controller is busy */
1367 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) {
1368 #ifdef XYC_DEBUG
1369 printf("%s: XYC not ready (BUSY)\n", __func__);
1370 #endif
1371 if (type == XY_SUB_NOQ)
1372 return XY_ERR_FAIL; /* failed */
1373 switch (type) {
1374 case XY_SUB_NORM:
1375 return XY_ERR_AOK; /* success */
1376 case XY_SUB_WAIT:
1377 while (iorq->iopb->done == 0) {
1378 (void)tsleep(iorq, PRIBIO, "xyciorq", 0);
1379 }
1380 return (iorq->errno);
1381 case XY_SUB_POLL: /* steal controller */
1382 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */
1383 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) ==
1384 XY_ERR_FAIL)
1385 panic("%s: stuck xyc", __func__);
1386 printf("%s: stole controller\n",
1387 device_xname(xycsc->sc_dev));
1388 break;
1389 default:
1390 panic("%s adding", __func__);
1391 }
1392 }
1393
1394 iopb = xyc_chain(xycsc, iorq); /* build chain */
1395 if (iopb == NULL) { /* nothing doing? */
1396 if (type == XY_SUB_NORM || type == XY_SUB_NOQ)
1397 return XY_ERR_AOK;
1398 panic("xyc_submit_iorq: xyc_chain failed!");
1399 }
1400 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype);
1401
1402 XYC_GO(xycsc->xyc, iopbaddr);
1403
1404 /* command now running, wrap it up */
1405 switch (type) {
1406 case XY_SUB_NORM:
1407 case XY_SUB_NOQ:
1408 return XY_ERR_AOK; /* success */
1409 case XY_SUB_WAIT:
1410 while (iorq->iopb->done == 0) {
1411 (void)tsleep(iorq, PRIBIO, "xyciorq", 0);
1412 }
1413 return iorq->errno;
1414 case XY_SUB_POLL:
1415 return xyc_piodriver(xycsc, iorq);
1416 default:
1417 panic("%s wrap up", __func__);
1418 }
1419 panic("%s impossible", __func__);
1420 return 0; /* not reached */
1421 }
1422
1423
1424 /*
1425 * xyc_chain: build a chain. return dvma address of first element in
1426 * the chain. iorq != NULL: means we only want that item on the chain.
1427 */
1428
1429 struct xy_iopb *
1430 xyc_chain(struct xyc_softc *xycsc, struct xy_iorq *iorq)
1431 {
1432 int togo, chain, hand;
1433 struct xy_iopb *iopb, *prev_iopb;
1434
1435 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain));
1436
1437 /*
1438 * promote control IOPB to the top
1439 */
1440 if (iorq == NULL) {
1441 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL ||
1442 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) &&
1443 xycsc->iopbase[XYC_CTLIOPB].done == 0)
1444 iorq = &xycsc->reqs[XYC_CTLIOPB];
1445 }
1446
1447 /*
1448 * special case: if iorq != NULL then we have a POLL or WAIT request.
1449 * we let these take priority and do them first.
1450 */
1451 if (iorq) {
1452 xycsc->xy_chain[0] = iorq;
1453 iorq->iopb->chen = 0;
1454 return iorq->iopb;
1455 }
1456
1457 /*
1458 * NORM case: do round robin and maybe chain (if allowed and possible)
1459 */
1460
1461 chain = 0;
1462 hand = xycsc->xy_hand;
1463 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB;
1464
1465 for (togo = XYC_MAXIOPB ; togo > 0 ;
1466 togo--, hand = (hand + 1) % XYC_MAXIOPB) {
1467
1468 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM ||
1469 xycsc->iopbase[hand].done)
1470 continue; /* not ready-for-i/o */
1471
1472 xycsc->xy_chain[chain] = &xycsc->reqs[hand];
1473 iopb = xycsc->xy_chain[chain]->iopb;
1474 iopb->chen = 0;
1475 if (chain != 0) { /* adding a link to a chain? */
1476 prev_iopb = xycsc->xy_chain[chain-1]->iopb;
1477 prev_iopb->chen = 1;
1478 prev_iopb->nxtiopb = 0xffff &
1479 dvma_kvtopa(iopb, xycsc->bustype);
1480 } else { /* head of chain */
1481 iorq = xycsc->xy_chain[chain];
1482 }
1483 chain++;
1484 if (xycsc->no_ols)
1485 break; /* quit if chaining dis-allowed */
1486 }
1487 return iorq ? iorq->iopb : NULL;
1488 }
1489
1490 /*
1491 * xyc_piodriver
1492 *
1493 * programmed i/o driver. this function takes over the computer
1494 * and drains off the polled i/o request. it returns the status of the iorq
1495 * the caller is interesting in.
1496 */
1497 int
1498 xyc_piodriver(struct xyc_softc *xycsc, struct xy_iorq *iorq)
1499 {
1500 int nreset = 0;
1501 int retval = 0;
1502 u_long res;
1503
1504 #ifdef XYC_DEBUG
1505 printf("%s(%s, 0x%x)\n", __func__, device_xname(xycsc->sc_dev), iorq);
1506 #endif
1507
1508 while (iorq->iopb->done == 0) {
1509
1510 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME);
1511
1512 /* we expect some progress soon */
1513 if (res == XY_ERR_FAIL && nreset >= 2) {
1514 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0);
1515 #ifdef XYC_DEBUG
1516 printf("%s: timeout\n", __func__);
1517 #endif
1518 return XY_ERR_FAIL;
1519 }
1520 if (res == XY_ERR_FAIL) {
1521 if (xyc_reset(xycsc, 0,
1522 (nreset++ == 0) ? XY_RSET_NONE : iorq,
1523 XY_ERR_FAIL, 0) == XY_ERR_FAIL)
1524 return XY_ERR_FAIL; /* flushes all but POLL
1525 * requests, resets */
1526 continue;
1527 }
1528
1529 xyc_remove_iorq(xycsc); /* may resubmit request */
1530
1531 if (iorq->iopb->done == 0)
1532 xyc_start(xycsc, iorq);
1533 }
1534
1535 /* get return value */
1536
1537 retval = iorq->errno;
1538
1539 #ifdef XYC_DEBUG
1540 printf("%s: done, retval = 0x%x (%s)\n", __func__,
1541 iorq->errno, xyc_e2str(iorq->errno));
1542 #endif
1543
1544 /* start up any bufs that have queued */
1545
1546 xyc_start(xycsc, NULL);
1547
1548 return retval;
1549 }
1550
1551 /*
1552 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset.
1553 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done.
1554 */
1555 void
1556 xyc_xyreset(struct xyc_softc *xycsc, struct xy_softc *xysc)
1557 {
1558 struct xy_iopb tmpiopb;
1559 u_long addr;
1560 int del;
1561 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb));
1562 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0;
1563 xycsc->ciopb->ien = 0;
1564 xycsc->ciopb->com = XYCMD_RST;
1565 xycsc->ciopb->unit = xysc->xy_drive;
1566 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype);
1567
1568 XYC_GO(xycsc->xyc, addr);
1569
1570 del = XYC_RESETUSEC;
1571 while (del > 0) {
1572 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0)
1573 break;
1574 DELAY(1);
1575 del--;
1576 }
1577
1578 if (del <= 0 || xycsc->ciopb->errs) {
1579 printf("%s: off-line: %s\n", device_xname(xycsc->sc_dev),
1580 xyc_e2str(xycsc->ciopb->errno));
1581 del = xycsc->xyc->xyc_rsetup;
1582 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL)
1583 panic("%s", __func__);
1584 } else {
1585 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */
1586 }
1587 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb));
1588 }
1589
1590
1591 /*
1592 * xyc_reset: reset everything: requests are marked as errors except
1593 * a polled request (which is resubmitted)
1594 */
1595 int
1596 xyc_reset(struct xyc_softc *xycsc, int quiet, struct xy_iorq *blastmode,
1597 int error, struct xy_softc *xysc)
1598 {
1599 int del = 0, lcv, retval = XY_ERR_AOK;
1600 struct xy_iorq *iorq;
1601
1602 /* soft reset hardware */
1603
1604 if (quiet == 0)
1605 printf("%s: soft reset\n", device_xname(xycsc->sc_dev));
1606 del = xycsc->xyc->xyc_rsetup;
1607 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC);
1608 if (del == XY_ERR_FAIL) {
1609 blastmode = XY_RSET_ALL; /* dead, flush all requests */
1610 retval = XY_ERR_FAIL;
1611 }
1612 if (xysc)
1613 xyc_xyreset(xycsc, xysc);
1614
1615 /* fix queues based on "blast-mode" */
1616
1617 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
1618 iorq = &xycsc->reqs[lcv];
1619
1620 if (XY_STATE(iorq->mode) != XY_SUB_POLL &&
1621 XY_STATE(iorq->mode) != XY_SUB_WAIT &&
1622 XY_STATE(iorq->mode) != XY_SUB_NORM)
1623 /* is it active? */
1624 continue;
1625
1626 if (blastmode == XY_RSET_ALL ||
1627 blastmode != iorq) {
1628 /* failed */
1629 iorq->errno = error;
1630 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1;
1631 switch (XY_STATE(iorq->mode)) {
1632 case XY_SUB_NORM:
1633 iorq->buf->b_error = EIO;
1634 iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS;
1635 /* Sun3: map/unmap regardless of B_PHYS */
1636 dvma_mapout(iorq->dbufbase,
1637 iorq->buf->b_bcount);
1638 (void)bufq_get(iorq->xy->xyq);
1639 disk_unbusy(&iorq->xy->sc_dk,
1640 (iorq->buf->b_bcount - iorq->buf->b_resid),
1641 (iorq->buf->b_flags & B_READ));
1642 biodone(iorq->buf);
1643 iorq->mode = XY_SUB_FREE;
1644 break;
1645 case XY_SUB_WAIT:
1646 wakeup(iorq);
1647 case XY_SUB_POLL:
1648 iorq->mode =
1649 XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1650 break;
1651 }
1652
1653 } else {
1654
1655 /* resubmit, no need to do anything here */
1656 }
1657 }
1658
1659 /*
1660 * now, if stuff is waiting, start it.
1661 * since we just reset it should go
1662 */
1663 xyc_start(xycsc, NULL);
1664
1665 return retval;
1666 }
1667
1668 /*
1669 * xyc_start: start waiting buffers
1670 */
1671
1672 void
1673 xyc_start(struct xyc_softc *xycsc, struct xy_iorq *iorq)
1674 {
1675 int lcv;
1676 struct xy_softc *xy;
1677
1678 if (iorq == NULL) {
1679 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) {
1680 if ((xy = xycsc->sc_drives[lcv]) == NULL)
1681 continue;
1682 if (bufq_peek(xy->xyq) == NULL)
1683 continue;
1684 if (xy->xyrq->mode != XY_SUB_FREE)
1685 continue;
1686 xyc_startbuf(xycsc, xy, bufq_peek(xy->xyq));
1687 }
1688 }
1689 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ);
1690 }
1691
1692 /*
1693 * xyc_remove_iorq: remove "done" IOPB's.
1694 */
1695
1696 int
1697 xyc_remove_iorq(struct xyc_softc *xycsc)
1698 {
1699 int errno, rq, comm, errs;
1700 struct xyc *xyc = xycsc->xyc;
1701 u_long addr;
1702 struct xy_iopb *iopb;
1703 struct xy_iorq *iorq;
1704 struct buf *bp;
1705
1706 if (xyc->xyc_csr & XYC_DERR) {
1707 /*
1708 * DOUBLE ERROR: should never happen under normal use. This
1709 * error is so bad, you can't even tell which IOPB is bad, so
1710 * we dump them all.
1711 */
1712 errno = XY_ERR_DERR;
1713 printf("%s: DOUBLE ERROR!\n", device_xname(xycsc->sc_dev));
1714 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) {
1715 printf("%s: soft reset failed!\n",
1716 device_xname(xycsc->sc_dev));
1717 panic("%s: controller DEAD", __func__);
1718 }
1719 return XY_ERR_AOK;
1720 }
1721
1722 /*
1723 * get iopb that is done, loop down the chain
1724 */
1725
1726 if (xyc->xyc_csr & XYC_ERR) {
1727 xyc->xyc_csr = XYC_ERR; /* clear error condition */
1728 }
1729 if (xyc->xyc_csr & XYC_IPND) {
1730 xyc->xyc_csr = XYC_IPND; /* clear interrupt */
1731 }
1732
1733 for (rq = 0; rq < XYC_MAXIOPB; rq++) {
1734 iorq = xycsc->xy_chain[rq];
1735 if (iorq == NULL) break; /* done ! */
1736 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE)
1737 continue; /* free, or done */
1738 iopb = iorq->iopb;
1739 if (iopb->done == 0)
1740 continue; /* not done yet */
1741
1742 comm = iopb->com;
1743 errs = iopb->errs;
1744
1745 if (errs)
1746 iorq->errno = iopb->errno;
1747 else
1748 iorq->errno = 0;
1749
1750 /* handle non-fatal errors */
1751
1752 if (errs &&
1753 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK)
1754 continue; /* AOK: we resubmitted it */
1755
1756
1757 /* this iorq is now done (hasn't been restarted or anything) */
1758
1759 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
1760 xyc_perror(iorq, iopb, 0);
1761
1762 /* now, if read/write check to make sure we got all the data
1763 * we needed. (this may not be the case if we got an error in
1764 * the middle of a multisector request). */
1765
1766 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 &&
1767 (comm == XYCMD_RD || comm == XYCMD_WR)) {
1768 /* we just successfully processed a bad144 sector
1769 * note: if we are in bad 144 mode, the pointers have
1770 * been advanced already (see above) and are pointing
1771 * at the bad144 sector. to exit bad144 mode, we
1772 * must advance the pointers 1 sector and issue a new
1773 * request if there are still sectors left to process
1774 *
1775 */
1776 XYC_ADVANCE(iorq, 1); /* advance 1 sector */
1777
1778 /* exit b144 mode */
1779 iorq->mode = iorq->mode & (~XY_MODE_B144);
1780
1781 if (iorq->sectcnt) { /* more to go! */
1782 iorq->lasterror = iorq->errno = iopb->errno = 0;
1783 iopb->errs = iopb->done = 0;
1784 iorq->tries = 0;
1785 iopb->scnt = iorq->sectcnt;
1786 iopb->cyl =
1787 iorq->blockno / iorq->xy->sectpercyl;
1788 iopb->head =
1789 (iorq->blockno / iorq->xy->nhead) %
1790 iorq->xy->nhead;
1791 iopb->sect = iorq->blockno % XYFM_BPS;
1792 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype);
1793 iopb->dataa = (addr & 0xffff);
1794 iopb->datar = ((addr & 0xff0000) >> 16);
1795 /* will resubit at end */
1796 continue;
1797 }
1798 }
1799 /* final cleanup, totally done with this request */
1800
1801 switch (XY_STATE(iorq->mode)) {
1802 case XY_SUB_NORM:
1803 bp = iorq->buf;
1804 if (errs) {
1805 bp->b_error = EIO;
1806 bp->b_resid = iorq->sectcnt * XYFM_BPS;
1807 } else {
1808 bp->b_resid = 0; /* done */
1809 }
1810 /* Sun3: map/unmap regardless of B_PHYS */
1811 dvma_mapout(iorq->dbufbase, iorq->buf->b_bcount);
1812 (void)bufq_get(iorq->xy->xyq);
1813 disk_unbusy(&iorq->xy->sc_dk,
1814 (bp->b_bcount - bp->b_resid),
1815 (bp->b_flags & B_READ));
1816 iorq->mode = XY_SUB_FREE;
1817 biodone(bp);
1818 break;
1819 case XY_SUB_WAIT:
1820 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1821 wakeup(iorq);
1822 break;
1823 case XY_SUB_POLL:
1824 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1825 break;
1826 }
1827 }
1828
1829 return XY_ERR_AOK;
1830 }
1831
1832 /*
1833 * xyc_perror: print error.
1834 * - if still_trying is true: we got an error, retried and got a
1835 * different error. in that case lasterror is the old error,
1836 * and errno is the new one.
1837 * - if still_trying is not true, then if we ever had an error it
1838 * is in lasterror. also, if iorq->errno == 0, then we recovered
1839 * from that error (otherwise iorq->errno == iorq->lasterror).
1840 */
1841 void
1842 xyc_perror(struct xy_iorq *iorq, struct xy_iopb *iopb, int still_trying)
1843 {
1844 int error = iorq->lasterror;
1845
1846 printf("%s", (iorq->xy) ? device_xname(iorq->xy->sc_dev)
1847 : device_xname(iorq->xyc->sc_dev));
1848 if (iorq->buf)
1849 printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev));
1850 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR)
1851 printf("%s %d/%d/%d: ",
1852 (iopb->com == XYCMD_RD) ? "read" : "write",
1853 iopb->cyl, iopb->head, iopb->sect);
1854 printf("%s", xyc_e2str(error));
1855
1856 if (still_trying)
1857 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno));
1858 else
1859 if (iorq->errno == 0)
1860 printf(" [recovered in %d tries]", iorq->tries);
1861
1862 printf("\n");
1863 }
1864
1865 /*
1866 * xyc_error: non-fatal error encountered... recover.
1867 * return AOK if resubmitted, return FAIL if this iopb is done
1868 */
1869 int
1870 xyc_error(struct xyc_softc *xycsc, struct xy_iorq *iorq, struct xy_iopb *iopb,
1871 int comm)
1872 {
1873 int errno = iorq->errno;
1874 int erract = xyc_entoact(errno);
1875 int oldmode, advance, i;
1876
1877 if (erract == XY_ERA_RSET) { /* some errors require a reset */
1878 oldmode = iorq->mode;
1879 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode);
1880 /* make xyc_start ignore us */
1881 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy);
1882 iorq->mode = oldmode;
1883 }
1884 /* check for read/write to a sector in bad144 table if bad: redirect
1885 * request to bad144 area */
1886
1887 if ((comm == XYCMD_RD || comm == XYCMD_WR) &&
1888 (iorq->mode & XY_MODE_B144) == 0) {
1889 advance = iorq->sectcnt - iopb->scnt;
1890 XYC_ADVANCE(iorq, advance);
1891 if ((i = isbad(&iorq->xy->dkb,
1892 iorq->blockno / iorq->xy->sectpercyl,
1893 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead,
1894 iorq->blockno % iorq->xy->nsect)) != -1) {
1895 iorq->mode |= XY_MODE_B144; /* enter bad144 mode &
1896 * redirect */
1897 iopb->errno = iopb->done = iopb->errs = 0;
1898 iopb->scnt = 1;
1899 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2;
1900 /* second to last acyl */
1901 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144
1902 * standard */
1903 iopb->head = i / iorq->xy->nhead;
1904 iopb->sect = i % iorq->xy->nhead;
1905 /* will resubmit when we come out of remove_iorq */
1906 return XY_ERR_AOK; /* recovered! */
1907 }
1908 }
1909
1910 /*
1911 * it isn't a bad144 sector, must be real error! see if we can retry
1912 * it?
1913 */
1914 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
1915 xyc_perror(iorq, iopb, 1); /* inform of error state
1916 * change */
1917 iorq->lasterror = errno;
1918
1919 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD)
1920 && iorq->tries < XYC_MAXTRIES) { /* retry? */
1921 iorq->tries++;
1922 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
1923 /* will resubmit at end of remove_iorq */
1924 return XY_ERR_AOK; /* recovered! */
1925 }
1926
1927 /* failed to recover from this error */
1928 return XY_ERR_FAIL;
1929 }
1930
1931 /*
1932 * xyc_tick: make sure xy is still alive and ticking (err, kicking).
1933 */
1934 void
1935 xyc_tick(void *arg)
1936 {
1937 struct xyc_softc *xycsc = arg;
1938 int lcv, s, reset = 0;
1939
1940 /* reduce ttl for each request if one goes to zero, reset xyc */
1941 s = splbio();
1942 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
1943 if (xycsc->reqs[lcv].mode == 0 ||
1944 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE)
1945 continue;
1946 xycsc->reqs[lcv].ttl--;
1947 if (xycsc->reqs[lcv].ttl == 0)
1948 reset = 1;
1949 }
1950 if (reset) {
1951 printf("%s: watchdog timeout\n", device_xname(xycsc->sc_dev));
1952 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL);
1953 }
1954 splx(s);
1955
1956 /* until next time */
1957
1958 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc);
1959 }
1960
1961 /*
1962 * xyc_ioctlcmd: this function provides a user level interface to the
1963 * controller via ioctl. this allows "format" programs to be written
1964 * in user code, and is also useful for some debugging. we return
1965 * an error code. called at user priority.
1966 *
1967 * XXX missing a few commands (see the 7053 driver for ideas)
1968 */
1969 int
1970 xyc_ioctlcmd(struct xy_softc *xy, dev_t dev, struct xd_iocmd *xio)
1971 {
1972 int s, err, rqno;
1973 void *dvmabuf = NULL;
1974 struct xyc_softc *xycsc;
1975
1976 /* check sanity of requested command */
1977
1978 switch (xio->cmd) {
1979
1980 case XYCMD_NOP: /* no op: everything should be zero */
1981 if (xio->subfn || xio->dptr || xio->dlen ||
1982 xio->block || xio->sectcnt)
1983 return EINVAL;
1984 break;
1985
1986 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */
1987 case XYCMD_WR:
1988 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
1989 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL)
1990 return EINVAL;
1991 break;
1992
1993 case XYCMD_SK: /* seek: doesn't seem useful to export this */
1994 return EINVAL;
1995 break;
1996
1997 default:
1998 return EINVAL;/* ??? */
1999 }
2000
2001 /* create DVMA buffer for request if needed */
2002
2003 if (xio->dlen) {
2004 dvmabuf = dvma_malloc(xio->dlen);
2005 if (xio->cmd == XYCMD_WR) {
2006 err = copyin(xio->dptr, dvmabuf, xio->dlen);
2007 if (err) {
2008 dvma_free(dvmabuf, xio->dlen);
2009 return err;
2010 }
2011 }
2012 }
2013 /* do it! */
2014
2015 err = 0;
2016 xycsc = xy->parent;
2017 s = splbio();
2018 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block,
2019 xio->sectcnt, dvmabuf, XY_SUB_WAIT);
2020 if (rqno == XY_ERR_FAIL) {
2021 err = EIO;
2022 goto done;
2023 }
2024 xio->errno = xycsc->ciorq->errno;
2025 xio->tries = xycsc->ciorq->tries;
2026 XYC_DONE(xycsc, err);
2027
2028 if (xio->cmd == XYCMD_RD)
2029 err = copyout(dvmabuf, xio->dptr, xio->dlen);
2030
2031 done:
2032 splx(s);
2033 if (dvmabuf)
2034 dvma_free(dvmabuf, xio->dlen);
2035 return err;
2036 }
2037
2038 /*
2039 * xyc_e2str: convert error code number into an error string
2040 */
2041 const char *
2042 xyc_e2str(int no)
2043 {
2044 switch (no) {
2045 case XY_ERR_FAIL:
2046 return "Software fatal error";
2047 case XY_ERR_DERR:
2048 return "DOUBLE ERROR";
2049 case XY_ERR_AOK:
2050 return "Successful completion";
2051 case XY_ERR_IPEN:
2052 return "Interrupt pending";
2053 case XY_ERR_BCFL:
2054 return "Busy conflict";
2055 case XY_ERR_TIMO:
2056 return "Operation timeout";
2057 case XY_ERR_NHDR:
2058 return "Header not found";
2059 case XY_ERR_HARD:
2060 return "Hard ECC error";
2061 case XY_ERR_ICYL:
2062 return "Illegal cylinder address";
2063 case XY_ERR_ISEC:
2064 return "Illegal sector address";
2065 case XY_ERR_SMAL:
2066 return "Last sector too small";
2067 case XY_ERR_SACK:
2068 return "Slave ACK error (non-existent memory)";
2069 case XY_ERR_CHER:
2070 return "Cylinder and head/header error";
2071 case XY_ERR_SRTR:
2072 return "Auto-seek retry successful";
2073 case XY_ERR_WPRO:
2074 return "Write-protect error";
2075 case XY_ERR_UIMP:
2076 return "Unimplemented command";
2077 case XY_ERR_DNRY:
2078 return "Drive not ready";
2079 case XY_ERR_SZER:
2080 return "Sector count zero";
2081 case XY_ERR_DFLT:
2082 return "Drive faulted";
2083 case XY_ERR_ISSZ:
2084 return "Illegal sector size";
2085 case XY_ERR_SLTA:
2086 return "Self test A";
2087 case XY_ERR_SLTB:
2088 return "Self test B";
2089 case XY_ERR_SLTC:
2090 return "Self test C";
2091 case XY_ERR_SOFT:
2092 return "Soft ECC error";
2093 case XY_ERR_SFOK:
2094 return "Soft ECC error recovered";
2095 case XY_ERR_IHED:
2096 return "Illegal head";
2097 case XY_ERR_DSEQ:
2098 return "Disk sequencer error";
2099 case XY_ERR_SEEK:
2100 return "Seek error";
2101 default:
2102 return "Unknown error";
2103 }
2104 }
2105
2106 int
2107 xyc_entoact(int errno)
2108 {
2109
2110 switch (errno) {
2111 case XY_ERR_FAIL:
2112 case XY_ERR_DERR:
2113 case XY_ERR_IPEN:
2114 case XY_ERR_BCFL:
2115 case XY_ERR_ICYL:
2116 case XY_ERR_ISEC:
2117 case XY_ERR_UIMP:
2118 case XY_ERR_SZER:
2119 case XY_ERR_ISSZ:
2120 case XY_ERR_SLTA:
2121 case XY_ERR_SLTB:
2122 case XY_ERR_SLTC:
2123 case XY_ERR_IHED:
2124 case XY_ERR_SACK:
2125 case XY_ERR_SMAL:
2126 return XY_ERA_PROG; /* program error ! */
2127
2128 case XY_ERR_TIMO:
2129 case XY_ERR_NHDR:
2130 case XY_ERR_HARD:
2131 case XY_ERR_DNRY:
2132 case XY_ERR_CHER:
2133 case XY_ERR_SEEK:
2134 case XY_ERR_SOFT:
2135 return XY_ERA_HARD; /* hard error, retry */
2136
2137 case XY_ERR_DFLT:
2138 case XY_ERR_DSEQ:
2139 return XY_ERA_RSET; /* hard error reset */
2140
2141 case XY_ERR_SRTR:
2142 case XY_ERR_SFOK:
2143 case XY_ERR_AOK:
2144 return XY_ERA_SOFT; /* an FYI error */
2145
2146 case XY_ERR_WPRO:
2147 return XY_ERA_WPRO; /* write protect */
2148 }
2149
2150 return XY_ERA_PROG; /* ??? */
2151 }
2152