ld_iop.c revision 1.6 1 /* $NetBSD: ld_iop.c,v 1.6 2001/03/20 13:01:49 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * I2O front-end for ld(4) driver, supporting random block storage class
41 * devices. Currently, this doesn't handle anything more complex than
42 * fixed direct-access devices.
43 */
44
45 #include "opt_i2o.h"
46 #include "rnd.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/buf.h>
53 #include <sys/endian.h>
54 #include <sys/dkio.h>
55 #include <sys/disk.h>
56 #include <sys/proc.h>
57 #if NRND > 0
58 #include <sys/rnd.h>
59 #endif
60
61 #include <machine/bus.h>
62
63 #include <dev/ldvar.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopvar.h>
68
69 #define LD_IOP_TIMEOUT 30*1000
70
71 #define LD_IOP_CLAIMED 0x01
72 #define LD_IOP_NEW_EVTMASK 0x02
73
74 struct ld_iop_softc {
75 struct ld_softc sc_ld;
76 struct iop_initiator sc_ii;
77 struct iop_initiator sc_eventii;
78 int sc_flags;
79 };
80
81 static void ld_iop_adjqparam(struct device *, int);
82 static void ld_iop_attach(struct device *, struct device *, void *);
83 static int ld_iop_detach(struct device *, int);
84 static int ld_iop_dump(struct ld_softc *, void *, int, int);
85 static int ld_iop_flush(struct ld_softc *);
86 static void ld_iop_intr(struct device *, struct iop_msg *, void *);
87 static void ld_iop_intr_event(struct device *, struct iop_msg *, void *);
88 static int ld_iop_match(struct device *, struct cfdata *, void *);
89 static int ld_iop_start(struct ld_softc *, struct buf *);
90 static void ld_iop_unconfig(struct ld_iop_softc *, int);
91
92 struct cfattach ld_iop_ca = {
93 sizeof(struct ld_iop_softc),
94 ld_iop_match,
95 ld_iop_attach,
96 ld_iop_detach
97 };
98
99 #ifdef I2OVERBOSE
100 static const char * const ld_iop_errors[] = {
101 "success",
102 "media error",
103 "access error",
104 "device failure",
105 "device not ready",
106 "media not present",
107 "media locked",
108 "media failure",
109 "protocol failure",
110 "bus failure",
111 "access violation",
112 "media write protected",
113 "device reset",
114 "volume changed, waiting for acknowledgement",
115 "timeout",
116 };
117 #endif
118
119 static int
120 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
121 {
122 struct iop_attach_args *ia;
123
124 ia = aux;
125
126 return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
127 }
128
129 static void
130 ld_iop_attach(struct device *parent, struct device *self, void *aux)
131 {
132 struct iop_attach_args *ia;
133 struct ld_softc *ld;
134 struct ld_iop_softc *sc;
135 struct iop_softc *iop;
136 int rv, evreg, enable;
137 char *typestr, *fixedstr;
138 u_int cachesz;
139 struct {
140 struct i2o_param_op_results pr;
141 struct i2o_param_read_results prr;
142 union {
143 struct i2o_param_rbs_cache_control cc;
144 struct i2o_param_rbs_device_info bdi;
145 struct i2o_param_rbs_operation op;
146 } p;
147 } param /* XXX gcc __attribute__ ((__packed__)) */;
148
149 sc = (struct ld_iop_softc *)self;
150 ld = &sc->sc_ld;
151 iop = (struct iop_softc *)parent;
152 ia = (struct iop_attach_args *)aux;
153 evreg = 0;
154
155 /* Register us as an initiator. */
156 sc->sc_ii.ii_dv = self;
157 sc->sc_ii.ii_intr = ld_iop_intr;
158 sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
159 sc->sc_ii.ii_flags = 0;
160 sc->sc_ii.ii_tid = ia->ia_tid;
161 iop_initiator_register(iop, &sc->sc_ii);
162
163 /* Register another initiator to handle events from the device. */
164 sc->sc_eventii.ii_dv = self;
165 sc->sc_eventii.ii_intr = ld_iop_intr_event;
166 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
167 sc->sc_eventii.ii_tid = ia->ia_tid;
168 iop_initiator_register(iop, &sc->sc_eventii);
169
170 rv = iop_util_eventreg(iop, &sc->sc_eventii,
171 I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
172 I2O_EVENT_GEN_DEVICE_RESET |
173 I2O_EVENT_GEN_STATE_CHANGE |
174 I2O_EVENT_GEN_GENERAL_WARNING);
175 if (rv != 0) {
176 printf("%s: unable to register for events", self->dv_xname);
177 goto bad;
178 }
179 evreg = 1;
180
181 /*
182 * Start out with one queued command. The `iop' driver will adjust
183 * the queue parameters once we're up and running.
184 */
185 ld->sc_maxqueuecnt = 1;
186
187 ld->sc_maxxfer = IOP_MAX_XFER;
188 ld->sc_dump = ld_iop_dump;
189 ld->sc_flush = ld_iop_flush;
190 ld->sc_start = ld_iop_start;
191
192 /* Say what the device is. */
193 printf(":");
194 iop_print_ident(iop, ia->ia_tid);
195
196 /*
197 * Claim the device so that we don't get any nasty surprises. Allow
198 * failure.
199 */
200 rv = iop_util_claim(iop, &sc->sc_ii, 0,
201 I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
202 I2O_UTIL_CLAIM_NO_PEER_SERVICE |
203 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
204 I2O_UTIL_CLAIM_PRIMARY_USER);
205 sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
206
207 rv = iop_param_op(iop, ia->ia_tid, NULL, 0, I2O_PARAM_RBS_DEVICE_INFO,
208 ¶m, sizeof(param));
209 if (rv != 0) {
210 printf("%s: unable to get parameters (0x%04x; %d)\n",
211 ld->sc_dv.dv_xname, I2O_PARAM_RBS_DEVICE_INFO, rv);
212 goto bad;
213 }
214
215 ld->sc_secsize = le32toh(param.p.bdi.blocksize);
216 ld->sc_secperunit = (int)
217 (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
218
219 /* Build synthetic geometry. */
220 if (ld->sc_secperunit <= 528 * 2048) /* 528MB */
221 ld->sc_nheads = 16;
222 else if (ld->sc_secperunit <= 1024 * 2048) /* 1GB */
223 ld->sc_nheads = 32;
224 else if (ld->sc_secperunit <= 21504 * 2048) /* 21GB */
225 ld->sc_nheads = 64;
226 else if (ld->sc_secperunit <= 43008 * 2048) /* 42GB */
227 ld->sc_nheads = 128;
228 else
229 ld->sc_nheads = 255;
230
231 ld->sc_nsectors = 63;
232 ld->sc_ncylinders = ld->sc_secperunit /
233 (ld->sc_nheads * ld->sc_nsectors);
234
235 switch (param.p.bdi.type) {
236 case I2O_RBS_TYPE_DIRECT:
237 typestr = "direct access";
238 enable = 1;
239 break;
240 case I2O_RBS_TYPE_WORM:
241 typestr = "WORM";
242 enable = 0;
243 break;
244 case I2O_RBS_TYPE_CDROM:
245 typestr = "CD-ROM";
246 enable = 0;
247 break;
248 case I2O_RBS_TYPE_OPTICAL:
249 typestr = "optical";
250 enable = 0;
251 break;
252 default:
253 typestr = "unknown";
254 enable = 0;
255 break;
256 }
257
258 if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
259 != 0) {
260 /* ld->sc_flags = LDF_REMOVEABLE; */
261 fixedstr = "removeable";
262 enable = 0;
263 } else
264 fixedstr = "fixed";
265
266 printf(" %s, %s", typestr, fixedstr);
267
268 /*
269 * Determine if the device has an private cache. If so, print the
270 * cache size. Even if the device doesn't appear to have a cache,
271 * we perform a flush at shutdown.
272 */
273 rv = iop_param_op(iop, ia->ia_tid, NULL, 0,
274 I2O_PARAM_RBS_CACHE_CONTROL, ¶m, sizeof(param));
275 if (rv != 0) {
276 printf("%s: unable to get parameters (0x%04x; %d)\n",
277 ld->sc_dv.dv_xname, I2O_PARAM_RBS_CACHE_CONTROL, rv);
278 goto bad;
279 }
280
281 if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
282 printf(", %dkB cache", cachesz >> 10);
283
284 printf("\n");
285
286 /*
287 * Configure the DDM's timeout functions to time out all commands
288 * after 30 seconds.
289 */
290 rv = iop_param_op(iop, ia->ia_tid, NULL, 0, I2O_PARAM_RBS_OPERATION,
291 ¶m, sizeof(param));
292 if (rv != 0) {
293 printf("%s: unable to get parameters (0x%04x; %d)\n",
294 ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
295 goto bad;
296 }
297
298 param.p.op.timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
299 param.p.op.rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
300 param.p.op.rwvtimeout = 0;
301
302 rv = iop_param_op(iop, ia->ia_tid, NULL, 1, I2O_PARAM_RBS_OPERATION,
303 ¶m, sizeof(param));
304 #ifdef notdef
305 /*
306 * Intel RAID adapters don't like the above, but do post a
307 * `parameter changed' event. Perhaps we're doing something
308 * wrong...
309 */
310 if (rv != 0) {
311 printf("%s: unable to set parameters (0x%04x; %d)\n",
312 ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
313 goto bad;
314 }
315 #endif
316
317 if (enable)
318 ld->sc_flags |= LDF_ENABLED;
319 else
320 printf("%s: device not yet supported\n", self->dv_xname);
321
322 ldattach(ld);
323 return;
324
325 bad:
326 ld_iop_unconfig(sc, evreg);
327 }
328
329 static void
330 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
331 {
332 struct iop_softc *iop;
333 int s;
334
335 iop = (struct iop_softc *)sc->sc_ld.sc_dv.dv_parent;
336
337 if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
338 iop_util_claim(iop, &sc->sc_ii, 1,
339 I2O_UTIL_CLAIM_PRIMARY_USER);
340
341 if (evreg) {
342 /*
343 * Mask off events, and wait up to 5 seconds for a reply.
344 * Note that some adapters won't reply to this (XXX We
345 * should check the event capabilities).
346 */
347 sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
348 iop_util_eventreg(iop, &sc->sc_eventii,
349 I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
350 s = splbio();
351 if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
352 tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
353 splx(s);
354 #ifdef I2ODEBUG
355 if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
356 printf("%s: didn't reply to event unregister",
357 sc->sc_ld.sc_dv.dv_xname);
358 #endif
359 }
360
361 iop_initiator_unregister(iop, &sc->sc_eventii);
362 iop_initiator_unregister(iop, &sc->sc_ii);
363 }
364
365 static int
366 ld_iop_detach(struct device *self, int flags)
367 {
368 struct ld_iop_softc *sc;
369 struct iop_softc *iop;
370 int rv;
371
372 sc = (struct ld_iop_softc *)self;
373 iop = (struct iop_softc *)self->dv_parent;
374
375 if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
376 return (rv);
377
378 /*
379 * Abort any requests queued with the IOP, but allow requests that
380 * are already in progress to complete.
381 */
382 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
383 iop_util_abort(iop, &sc->sc_ii, 0, 0,
384 I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
385
386 ldenddetach(&sc->sc_ld);
387
388 /* Un-claim the target, and un-register our initiators. */
389 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
390 ld_iop_unconfig(sc, 1);
391
392 return (0);
393 }
394
395 static int
396 ld_iop_start(struct ld_softc *ld, struct buf *bp)
397 {
398 struct iop_msg *im;
399 struct iop_softc *iop;
400 struct ld_iop_softc *sc;
401 struct i2o_rbs_block_read *mf;
402 u_int rv, flags, write;
403 u_int64_t ba;
404 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
405
406 sc = (struct ld_iop_softc *)ld;
407 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
408
409 im = iop_msg_alloc(iop, &sc->sc_ii, 0);
410 im->im_dvcontext = bp;
411
412 write = ((bp->b_flags & B_READ) == 0);
413 ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
414
415 /*
416 * Write through the cache when performing synchronous writes. When
417 * performing a read, we don't request that the DDM cache the data,
418 * as there's little advantage to it.
419 */
420 if (write) {
421 if ((bp->b_flags & B_ASYNC) == 0)
422 flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
423 else
424 flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
425 } else
426 flags = 0;
427
428 /*
429 * Fill the message frame. We can use the block_read structure for
430 * both reads and writes, as it's almost identical to the
431 * block_write structure.
432 */
433 mf = (struct i2o_rbs_block_read *)mb;
434 mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
435 mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
436 write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
437 mf->msgictx = sc->sc_ii.ii_ictx;
438 mf->msgtctx = im->im_tctx;
439 mf->flags = flags | (1 << 16); /* flags & time multiplier */
440 mf->datasize = bp->b_bcount;
441 mf->lowoffset = (u_int32_t)ba;
442 mf->highoffset = (u_int32_t)(ba >> 32);
443
444 /* Map the data transfer and enqueue the command. */
445 rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
446 if (rv == 0) {
447 if ((rv = iop_msg_post(iop, im, mb, 0)) != 0) {
448 iop_msg_unmap(iop, im);
449 iop_msg_free(iop, im);
450 }
451 }
452 return (rv);
453 }
454
455 static int
456 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
457 {
458 struct iop_msg *im;
459 struct iop_softc *iop;
460 struct ld_iop_softc *sc;
461 struct i2o_rbs_block_write *mf;
462 int rv, bcount;
463 u_int64_t ba;
464 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
465
466 sc = (struct ld_iop_softc *)ld;
467 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
468 bcount = blkcnt * ld->sc_secsize;
469 ba = (u_int64_t)blkno * ld->sc_secsize;
470 im = iop_msg_alloc(iop, &sc->sc_ii, IM_POLL);
471
472 mf = (struct i2o_rbs_block_write *)mb;
473 mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
474 mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
475 mf->msgictx = sc->sc_ii.ii_ictx;
476 mf->msgtctx = im->im_tctx;
477 mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
478 mf->datasize = bcount;
479 mf->lowoffset = (u_int32_t)ba;
480 mf->highoffset = (u_int32_t)(ba >> 32);
481
482 if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1)) != 0) {
483 iop_msg_free(iop, im);
484 return (rv);
485 }
486
487 rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
488 iop_msg_unmap(iop, im);
489 iop_msg_free(iop, im);
490 return (rv);
491 }
492
493 static int
494 ld_iop_flush(struct ld_softc *ld)
495 {
496 struct iop_msg *im;
497 struct iop_softc *iop;
498 struct ld_iop_softc *sc;
499 struct i2o_rbs_cache_flush mf;
500 int rv;
501
502 sc = (struct ld_iop_softc *)ld;
503 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
504 im = iop_msg_alloc(iop, &sc->sc_ii, IM_WAIT);
505
506 mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
507 mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
508 mf.msgictx = sc->sc_ii.ii_ictx;
509 mf.msgtctx = im->im_tctx;
510 mf.flags = 1 << 16; /* time multiplier */
511
512 /*
513 * XXX Aincent disks will return an error here. Also, we shouldn't
514 * be polling on completion while the system is running.
515 */
516 rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
517 iop_msg_free(iop, im);
518 return (rv);
519 }
520
521 void
522 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
523 {
524 struct i2o_rbs_reply *rb;
525 struct buf *bp;
526 struct ld_iop_softc *sc;
527 struct iop_softc *iop;
528 int err, detail;
529 #ifdef I2OVERBOSE
530 const char *errstr;
531 #endif
532
533 rb = reply;
534 bp = im->im_dvcontext;
535 sc = (struct ld_iop_softc *)dv;
536 iop = (struct iop_softc *)dv->dv_parent;
537
538 err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
539
540 if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
541 detail = le16toh(rb->detail);
542 #ifdef I2OVERBOSE
543 if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
544 errstr = "<unknown>";
545 else
546 errstr = ld_iop_errors[detail];
547 printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
548 #else
549 printf("%s: error 0x%04x\n", dv->dv_xname, detail);
550 #endif
551 err = 1;
552 }
553
554 if (err) {
555 bp->b_flags |= B_ERROR;
556 bp->b_error = EIO;
557 bp->b_resid = bp->b_bcount;
558 } else
559 bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
560
561 iop_msg_unmap(iop, im);
562 iop_msg_free(iop, im);
563 lddone(&sc->sc_ld, bp);
564 }
565
566 static void
567 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
568 {
569 struct i2o_util_event_register_reply *rb;
570 struct ld_iop_softc *sc;
571 u_int event;
572
573 rb = reply;
574
575 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
576 return;
577
578 event = le32toh(rb->event);
579 sc = (struct ld_iop_softc *)dv;
580
581 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
582 sc->sc_flags |= LD_IOP_NEW_EVTMASK;
583 wakeup(&sc->sc_eventii);
584 #ifndef I2ODEBUG
585 return;
586 #endif
587 }
588
589 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
590 }
591
592 static void
593 ld_iop_adjqparam(struct device *dv, int mpi)
594 {
595 struct iop_softc *iop;
596
597 /*
598 * AMI controllers seem to loose the plot if you hand off lots of
599 * queued commands.
600 */
601 iop = (struct iop_softc *)dv->dv_parent;
602 if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
603 mpi = 64;
604
605 ldadjqparam((struct ld_softc *)dv, mpi);
606 }
607