ld_iop.c revision 1.5 1 /* $NetBSD: ld_iop.c,v 1.5 2001/02/06 12:22:24 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * I2O front-end for ld(4) driver, supporting random block storage class
41 * devices. Currently, this doesn't handle anything more complex than
42 * fixed direct-access devices.
43 */
44
45 #include "opt_i2o.h"
46 #include "rnd.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/buf.h>
53 #include <sys/endian.h>
54 #include <sys/dkio.h>
55 #include <sys/disk.h>
56 #include <sys/proc.h>
57 #if NRND > 0
58 #include <sys/rnd.h>
59 #endif
60
61 #include <machine/bus.h>
62
63 #include <dev/ldvar.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopvar.h>
67
68 #define LD_IOP_MAXQUEUECNT 64 /* XXX */
69 #define LD_IOP_TIMEOUT 10*1000*1000
70
71 struct ld_iop_softc {
72 struct ld_softc sc_ld;
73 struct iop_initiator sc_ii;
74 struct iop_initiator sc_eventii;
75 int sc_claimed;
76 u_int sc_tid;
77 };
78
79 static void ld_iop_attach(struct device *, struct device *, void *);
80 static int ld_iop_detach(struct device *, int);
81 static int ld_iop_dump(struct ld_softc *, void *, int, int);
82 static int ld_iop_flush(struct ld_softc *);
83 static void ld_iop_intr(struct device *, struct iop_msg *, void *);
84 static void ld_iop_intr_event(struct device *, struct iop_msg *, void *);
85 static int ld_iop_start(struct ld_softc *, struct buf *);
86 static int ld_iop_match(struct device *, struct cfdata *, void *);
87
88 struct cfattach ld_iop_ca = {
89 sizeof(struct ld_iop_softc),
90 ld_iop_match,
91 ld_iop_attach,
92 ld_iop_detach
93 };
94
95 #ifdef I2OVERBOSE
96 static const char *ld_iop_errors[] = {
97 "success",
98 "media error",
99 "failure communicating with device",
100 "device failure",
101 "device is not ready",
102 "media not present",
103 "media locked by another user",
104 "media failure",
105 "failure communicating to device",
106 "device bus failure",
107 "device locked by another user",
108 "device write protected",
109 "device reset",
110 "volume has changed, waiting for acknowledgement",
111 };
112 #endif
113
114 static int
115 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
116 {
117 struct iop_attach_args *ia;
118
119 ia = aux;
120
121 return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
122 }
123
124 static void
125 ld_iop_attach(struct device *parent, struct device *self, void *aux)
126 {
127 struct iop_attach_args *ia;
128 struct ld_softc *ld;
129 struct ld_iop_softc *sc;
130 struct iop_softc *iop;
131 int rv, evreg, enable;
132 char ident[64 + 1], *typestr, *fixedstr;
133 u_int cachesz;
134 struct {
135 struct i2o_param_op_results pr;
136 struct i2o_param_read_results prr;
137 union {
138 struct i2o_param_rbs_cache_control cc;
139 struct i2o_param_rbs_device_info bdi;
140 struct i2o_param_device_identity di;
141 struct i2o_param_rbs_operation op;
142 } p;
143 } param;
144
145 sc = (struct ld_iop_softc *)self;
146 ld = &sc->sc_ld;
147 iop = (struct iop_softc *)parent;
148 ia = (struct iop_attach_args *)aux;
149 sc->sc_tid = ia->ia_tid;
150 evreg = 0;
151
152 /* Register us as an initiator. */
153 sc->sc_ii.ii_dv = self;
154 sc->sc_ii.ii_intr = ld_iop_intr;
155 sc->sc_ii.ii_flags = 0;
156 sc->sc_ii.ii_tid = ia->ia_tid;
157 if (iop_initiator_register(iop, &sc->sc_ii) != 0) {
158 printf("%s: unable to register initiator\n", self->dv_xname);
159 return;
160 }
161
162 /* Register another initiator to handle events from the device. */
163 sc->sc_eventii.ii_dv = self;
164 sc->sc_eventii.ii_intr = ld_iop_intr_event;
165 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
166 sc->sc_eventii.ii_tid = ia->ia_tid;
167 if (iop_initiator_register(iop, &sc->sc_eventii) != 0) {
168 printf("%s: unable to register initiator", self->dv_xname);
169 goto bad;
170 }
171 if (iop_util_eventreg(iop, &sc->sc_eventii, 0xffffffff)) {
172 printf("%s: unable to register for events", self->dv_xname);
173 goto bad;
174 }
175 evreg = 1;
176
177 ld->sc_maxxfer = IOP_MAX_XFER;
178 ld->sc_maxqueuecnt = LD_IOP_MAXQUEUECNT;
179 ld->sc_dump = ld_iop_dump;
180 ld->sc_flush = ld_iop_flush;
181 ld->sc_start = ld_iop_start;
182
183 /* Say what the device is. */
184 printf(": ");
185 if (iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_DEVICE_IDENTITY, ¶m,
186 sizeof(param)) == 0) {
187 iop_strvis(iop, param.p.di.vendorinfo,
188 sizeof(param.p.di.vendorinfo), ident, sizeof(ident));
189 printf("<%s, ", ident);
190 iop_strvis(iop, param.p.di.productinfo,
191 sizeof(param.p.di.productinfo), ident, sizeof(ident));
192 printf("%s, ", ident);
193 iop_strvis(iop, param.p.di.revlevel,
194 sizeof(param.p.di.revlevel), ident, sizeof(ident));
195 printf("%s> ", ident);
196 }
197
198 /*
199 * Claim the device so that we don't get any nasty surprises. Allow
200 * failure.
201 */
202 sc->sc_claimed = !iop_util_claim(iop, &sc->sc_ii, 0,
203 I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
204 I2O_UTIL_CLAIM_NO_PEER_SERVICE |
205 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
206 I2O_UTIL_CLAIM_PRIMARY_USER);
207
208 rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_DEVICE_INFO,
209 ¶m, sizeof(param));
210 if (rv != 0) {
211 printf("%s: unable to get parameters (0x%04x; %d)\n",
212 ld->sc_dv.dv_xname, I2O_PARAM_RBS_DEVICE_INFO, rv);
213 goto bad;
214 }
215
216 ld->sc_secsize = le32toh(param.p.bdi.blocksize);
217 ld->sc_secperunit = (int)
218 (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
219
220 /* Build synthetic geometry. */
221 if (ld->sc_secperunit <= 528 * 2048) /* 528MB */
222 ld->sc_nheads = 16;
223 else if (ld->sc_secperunit <= 1024 * 2048) /* 1GB */
224 ld->sc_nheads = 32;
225 else if (ld->sc_secperunit <= 21504 * 2048) /* 21GB */
226 ld->sc_nheads = 64;
227 else if (ld->sc_secperunit <= 43008 * 2048) /* 42GB */
228 ld->sc_nheads = 128;
229 else
230 ld->sc_nheads = 255;
231
232 ld->sc_nsectors = 63;
233 ld->sc_ncylinders = ld->sc_secperunit /
234 (ld->sc_nheads * ld->sc_nsectors);
235
236 switch (param.p.bdi.type) {
237 case I2O_RBS_TYPE_DIRECT:
238 typestr = "direct access";
239 enable = 1;
240 break;
241 case I2O_RBS_TYPE_WORM:
242 typestr = "WORM";
243 enable = 0;
244 break;
245 case I2O_RBS_TYPE_CDROM:
246 typestr = "cdrom";
247 enable = 0;
248 break;
249 case I2O_RBS_TYPE_OPTICAL:
250 typestr = "optical";
251 enable = 0;
252 break;
253 default:
254 typestr = "unknown";
255 enable = 0;
256 break;
257 }
258
259 if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
260 != 0) {
261 /* ld->sc_flags = LDF_REMOVEABLE; */
262 fixedstr = "removeable";
263 enable = 0;
264 } else
265 fixedstr = "fixed";
266
267 printf("%s, %s", typestr, fixedstr);
268
269 /*
270 * Determine if the device has an private cache. If so, print the
271 * cache size. Even if the device doesn't appear to have a cache,
272 * we perform a flush at shutdown, as it is still valid to do so.
273 */
274 rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_CACHE_CONTROL,
275 ¶m, sizeof(param));
276 if (rv != 0) {
277 printf("%s: unable to get parameters (0x%04x; %d)\n",
278 ld->sc_dv.dv_xname, I2O_PARAM_RBS_CACHE_CONTROL, rv);
279 goto bad;
280 }
281
282 if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
283 printf(", %dkB cache", cachesz >> 10);
284
285 printf("\n");
286
287 /*
288 * Configure the DDM's timeout functions to time out all commands
289 * after 10 seconds.
290 */
291 rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_OPERATION,
292 ¶m, sizeof(param));
293 if (rv != 0) {
294 printf("%s: unable to get parameters (0x%04x; %d)\n",
295 ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
296 goto bad;
297 }
298
299 param.p.op.timeoutbase = htole32(LD_IOP_TIMEOUT);
300 param.p.op.rwvtimeoutbase = htole32(LD_IOP_TIMEOUT);
301 param.p.op.rwvtimeout = 0;
302
303 rv = iop_param_op(iop, ia->ia_tid, 1, I2O_PARAM_RBS_OPERATION,
304 ¶m, sizeof(param));
305 if (rv != 0) {
306 printf("%s: unable to set parameters (0x%04x; %d)\n",
307 ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
308 goto bad;
309 }
310
311 if (enable)
312 ld->sc_flags |= LDF_ENABLED;
313 else
314 printf("%s: device not yet supported\n", self->dv_xname);
315
316 ldattach(ld);
317 return;
318
319 bad:
320 if (sc->sc_claimed)
321 iop_util_claim(iop, &sc->sc_ii, 1,
322 I2O_UTIL_CLAIM_PRIMARY_USER);
323 if (evreg)
324 iop_util_eventreg(iop, &sc->sc_eventii, 0);
325 if (sc->sc_eventii.ii_intr != NULL)
326 iop_initiator_unregister(iop, &sc->sc_eventii);
327 iop_initiator_unregister(iop, &sc->sc_ii);
328 }
329
330 static int
331 ld_iop_detach(struct device *self, int flags)
332 {
333 struct ld_iop_softc *sc;
334 struct iop_softc *iop;
335 int rv;
336
337 sc = (struct ld_iop_softc *)self;
338
339 if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
340 return (rv);
341
342 iop = (struct iop_softc *)self->dv_parent;
343
344 /*
345 * Abort any requests queued with the IOP, but allow requests that
346 * are already in progress to complete.
347 */
348 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
349 iop_util_abort(iop, &sc->sc_ii, 0, 0,
350 I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
351
352 ldenddetach(&sc->sc_ld);
353
354 /* Un-claim the target, and un-register us as an initiator. */
355 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0) {
356 if (sc->sc_claimed) {
357 rv = iop_util_claim(iop, &sc->sc_ii, 1,
358 I2O_UTIL_CLAIM_PRIMARY_USER);
359 if (rv != 0)
360 return (rv);
361 }
362 iop_util_eventreg(iop, &sc->sc_eventii, 0);
363 iop_initiator_unregister(iop, &sc->sc_eventii);
364 iop_initiator_unregister(iop, &sc->sc_ii);
365 }
366
367 return (0);
368 }
369
370 static int
371 ld_iop_start(struct ld_softc *ld, struct buf *bp)
372 {
373 struct iop_msg *im;
374 struct iop_softc *iop;
375 struct ld_iop_softc *sc;
376 struct i2o_rbs_block_read *mb;
377 int rv, flags, write;
378 u_int64_t ba;
379
380 sc = (struct ld_iop_softc *)ld;
381 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
382
383 im = NULL;
384 if ((rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT)) != 0)
385 goto bad;
386 im->im_dvcontext = bp;
387
388 write = ((bp->b_flags & B_READ) == 0);
389 ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
390
391 /*
392 * Write through the cache when performing synchronous writes. When
393 * performing a read, we don't request that the DDM cache the data,
394 * as there's little advantage to it.
395 */
396 if (write) {
397 if ((bp->b_flags & B_ASYNC) == 0)
398 flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
399 else
400 flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
401 } else
402 flags = 0;
403
404 /*
405 * Fill the message frame. We can use the block_read structure for
406 * both reads and writes, as it's almost identical to the
407 * block_write structure.
408 */
409 mb = (struct i2o_rbs_block_read *)im->im_msg;
410 mb->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
411 mb->msgfunc = I2O_MSGFUNC(sc->sc_tid,
412 write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
413 mb->msgictx = sc->sc_ii.ii_ictx;
414 mb->msgtctx = im->im_tctx;
415 mb->flags = flags | (1 << 16); /* flags & time multiplier */
416 mb->datasize = bp->b_bcount;
417 mb->lowoffset = (u_int32_t)ba;
418 mb->highoffset = (u_int32_t)(ba >> 32);
419
420 /* Map the data transfer. */
421 if ((rv = iop_msg_map(iop, im, bp->b_data, bp->b_bcount, write)) != 0)
422 goto bad;
423
424 /* Enqueue the command. */
425 iop_msg_enqueue(iop, im, 0);
426 return (0);
427
428 bad:
429 if (im != NULL)
430 iop_msg_free(iop, &sc->sc_ii, im);
431 return (rv);
432 }
433
434 static int
435 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
436 {
437 struct iop_msg *im;
438 struct iop_softc *iop;
439 struct ld_iop_softc *sc;
440 struct i2o_rbs_block_write *mb;
441 int rv, bcount;
442 u_int64_t ba;
443
444 sc = (struct ld_iop_softc *)ld;
445 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
446 bcount = blkcnt * ld->sc_secsize;
447 ba = (u_int64_t)blkno * ld->sc_secsize;
448
449 rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT | IM_NOINTR);
450 if (rv != 0)
451 return (rv);
452
453 mb = (struct i2o_rbs_block_write *)im->im_msg;
454 mb->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
455 mb->msgfunc = I2O_MSGFUNC(sc->sc_tid, I2O_RBS_BLOCK_WRITE);
456 mb->msgictx = sc->sc_ii.ii_ictx;
457 mb->msgtctx = im->im_tctx;
458 mb->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
459 mb->datasize = bcount;
460 mb->lowoffset = (u_int32_t)ba;
461 mb->highoffset = (u_int32_t)(ba >> 32);
462
463 if ((rv = iop_msg_map(iop, im, data, bcount, 1)) != 0) {
464 iop_msg_free(iop, &sc->sc_ii, im);
465 return (rv);
466 }
467
468 rv = (iop_msg_send(iop, im, 5000) != 0 ? EIO : 0);
469 iop_msg_unmap(iop, im);
470 iop_msg_free(iop, &sc->sc_ii, im);
471 return (rv);
472 }
473
474 static int
475 ld_iop_flush(struct ld_softc *ld)
476 {
477 struct iop_msg *im;
478 struct iop_softc *iop;
479 struct ld_iop_softc *sc;
480 struct i2o_rbs_cache_flush *mb;
481 int rv;
482
483 sc = (struct ld_iop_softc *)ld;
484 iop = (struct iop_softc *)ld->sc_dv.dv_parent;
485
486 rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT | IM_NOINTR);
487 if (rv != 0)
488 return (rv);
489
490 mb = (struct i2o_rbs_cache_flush *)im->im_msg;
491 mb->msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
492 mb->msgfunc = I2O_MSGFUNC(sc->sc_tid, I2O_RBS_CACHE_FLUSH);
493 mb->msgictx = sc->sc_ii.ii_ictx;
494 mb->msgtctx = im->im_tctx;
495 mb->flags = 1 << 16; /* time multiplier */
496
497 rv = iop_msg_send(iop, im, 10000);
498 iop_msg_free(iop, &sc->sc_ii, im);
499 return (rv);
500 }
501
502 void
503 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
504 {
505 struct i2o_rbs_reply *rb;
506 struct buf *bp;
507 struct ld_iop_softc *sc;
508 struct iop_softc *iop;
509 #ifdef I2OVERBOSE
510 int detail;
511 const char *errstr;
512 #endif
513
514 rb = reply;
515 bp = im->im_dvcontext;
516 sc = (struct ld_iop_softc *)dv;
517 iop = (struct iop_softc *)dv->dv_parent;
518
519 #ifdef I2OVERBOSE
520 if (rb->reqstatus != I2O_STATUS_SUCCESS) {
521 detail = le16toh(rb->detail);
522 if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
523 errstr = "unknown error";
524 else
525 errstr = ld_iop_errors[detail];
526 printf("%s: %s\n", dv->dv_xname, errstr);
527 #else
528 if (rb->reqstatus != I2O_STATUS_SUCCESS) {
529 #endif
530 bp->b_flags |= B_ERROR;
531 bp->b_error = EIO;
532 #ifndef notyet
533 bp->b_resid = bp->b_bcount;
534 } else
535 bp->b_resid = 0;
536 #else
537 }
538 bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
539 #endif
540
541 iop_msg_unmap(iop, im);
542 iop_msg_free(iop, &sc->sc_ii, im);
543 lddone(&sc->sc_ld, bp);
544 }
545
546 static void
547 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
548 {
549 struct i2o_util_event_register_reply *rb;
550 u_int event;
551
552 rb = reply;
553 event = le32toh(rb->event);
554
555 #ifndef I2ODEBUG
556 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
557 return;
558 #endif
559
560 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
561 }
562