ld_iop.c revision 1.26 1 /* $NetBSD: ld_iop.c,v 1.26 2007/10/19 11:59:44 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * I2O front-end for ld(4) driver, supporting random block storage class
41 * devices. Currently, this doesn't handle anything more complex than
42 * fixed direct-access devices.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.26 2007/10/19 11:59:44 ad Exp $");
47
48 #include "rnd.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/buf.h>
55 #include <sys/bufq.h>
56 #include <sys/endian.h>
57 #include <sys/dkio.h>
58 #include <sys/disk.h>
59 #include <sys/proc.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63
64 #include <sys/bus.h>
65
66 #include <dev/ldvar.h>
67
68 #include <dev/i2o/i2o.h>
69 #include <dev/i2o/iopio.h>
70 #include <dev/i2o/iopvar.h>
71
72 #define LD_IOP_TIMEOUT 30*1000
73
74 #define LD_IOP_CLAIMED 0x01
75 #define LD_IOP_NEW_EVTMASK 0x02
76
77 struct ld_iop_softc {
78 struct ld_softc sc_ld;
79 struct iop_initiator sc_ii;
80 struct iop_initiator sc_eventii;
81 int sc_flags;
82 };
83
84 static void ld_iop_adjqparam(struct device *, int);
85 static void ld_iop_attach(struct device *, struct device *, void *);
86 static int ld_iop_detach(struct device *, int);
87 static int ld_iop_dump(struct ld_softc *, void *, int, int);
88 static int ld_iop_flush(struct ld_softc *);
89 static void ld_iop_intr(struct device *, struct iop_msg *, void *);
90 static void ld_iop_intr_event(struct device *, struct iop_msg *, void *);
91 static int ld_iop_match(struct device *, struct cfdata *, void *);
92 static int ld_iop_start(struct ld_softc *, struct buf *);
93 static void ld_iop_unconfig(struct ld_iop_softc *, int);
94
95 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
96 ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
97
98 static const char * const ld_iop_errors[] = {
99 "success",
100 "media error",
101 "access error",
102 "device failure",
103 "device not ready",
104 "media not present",
105 "media locked",
106 "media failure",
107 "protocol failure",
108 "bus failure",
109 "access violation",
110 "media write protected",
111 "device reset",
112 "volume changed, waiting for acknowledgement",
113 "timeout",
114 };
115
116 static int
117 ld_iop_match(struct device *parent, struct cfdata *match,
118 void *aux)
119 {
120 struct iop_attach_args *ia;
121
122 ia = aux;
123
124 return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
125 }
126
127 static void
128 ld_iop_attach(struct device *parent, struct device *self, void *aux)
129 {
130 struct iop_attach_args *ia;
131 struct ld_softc *ld;
132 struct ld_iop_softc *sc;
133 struct iop_softc *iop;
134 int rv, evreg, enable;
135 const char *typestr, *fixedstr;
136 u_int cachesz;
137 u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
138 struct {
139 struct i2o_param_op_results pr;
140 struct i2o_param_read_results prr;
141 union {
142 struct i2o_param_rbs_cache_control cc;
143 struct i2o_param_rbs_device_info bdi;
144 } p;
145 } __attribute__ ((__packed__)) param;
146
147 sc = device_private(self);
148 ld = &sc->sc_ld;
149 iop = device_private(parent);
150 ia = (struct iop_attach_args *)aux;
151 evreg = 0;
152
153 /* Register us as an initiator. */
154 sc->sc_ii.ii_dv = self;
155 sc->sc_ii.ii_intr = ld_iop_intr;
156 sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
157 sc->sc_ii.ii_flags = 0;
158 sc->sc_ii.ii_tid = ia->ia_tid;
159 iop_initiator_register(iop, &sc->sc_ii);
160
161 /* Register another initiator to handle events from the device. */
162 sc->sc_eventii.ii_dv = self;
163 sc->sc_eventii.ii_intr = ld_iop_intr_event;
164 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
165 sc->sc_eventii.ii_tid = ia->ia_tid;
166 iop_initiator_register(iop, &sc->sc_eventii);
167
168 rv = iop_util_eventreg(iop, &sc->sc_eventii,
169 I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
170 I2O_EVENT_GEN_DEVICE_RESET |
171 I2O_EVENT_GEN_STATE_CHANGE |
172 I2O_EVENT_GEN_GENERAL_WARNING);
173 if (rv != 0) {
174 printf("%s: unable to register for events", self->dv_xname);
175 goto bad;
176 }
177 evreg = 1;
178
179 /*
180 * Start out with one queued command. The `iop' driver will adjust
181 * the queue parameters once we're up and running.
182 */
183 ld->sc_maxqueuecnt = 1;
184
185 ld->sc_maxxfer = IOP_MAX_XFER;
186 ld->sc_dump = ld_iop_dump;
187 ld->sc_flush = ld_iop_flush;
188 ld->sc_start = ld_iop_start;
189
190 /* Say what the device is. */
191 printf(":");
192 iop_print_ident(iop, ia->ia_tid);
193
194 /*
195 * Claim the device so that we don't get any nasty surprises. Allow
196 * failure.
197 */
198 rv = iop_util_claim(iop, &sc->sc_ii, 0,
199 I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
200 I2O_UTIL_CLAIM_NO_PEER_SERVICE |
201 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
202 I2O_UTIL_CLAIM_PRIMARY_USER);
203 sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
204
205 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
206 ¶m, sizeof(param), NULL);
207 if (rv != 0)
208 goto bad;
209
210 ld->sc_secsize = le32toh(param.p.bdi.blocksize);
211 ld->sc_secperunit = (int)
212 (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
213
214 switch (param.p.bdi.type) {
215 case I2O_RBS_TYPE_DIRECT:
216 typestr = "direct access";
217 enable = 1;
218 break;
219 case I2O_RBS_TYPE_WORM:
220 typestr = "WORM";
221 enable = 0;
222 break;
223 case I2O_RBS_TYPE_CDROM:
224 typestr = "CD-ROM";
225 enable = 0;
226 break;
227 case I2O_RBS_TYPE_OPTICAL:
228 typestr = "optical";
229 enable = 0;
230 break;
231 default:
232 typestr = "unknown";
233 enable = 0;
234 break;
235 }
236
237 if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
238 != 0) {
239 /* ld->sc_flags = LDF_REMOVABLE; */
240 fixedstr = "removable";
241 enable = 0;
242 } else
243 fixedstr = "fixed";
244
245 printf(" %s, %s", typestr, fixedstr);
246
247 /*
248 * Determine if the device has an private cache. If so, print the
249 * cache size. Even if the device doesn't appear to have a cache,
250 * we perform a flush at shutdown.
251 */
252 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
253 ¶m, sizeof(param), NULL);
254 if (rv != 0)
255 goto bad;
256
257 if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
258 printf(", %dkB cache", cachesz >> 10);
259
260 printf("\n");
261
262 /*
263 * Configure the DDM's timeout functions to time out all commands
264 * after 30 seconds.
265 */
266 timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
267 rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
268 rwvtimeout = 0;
269
270 iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
271 &timeoutbase, sizeof(timeoutbase),
272 I2O_PARAM_RBS_OPERATION_timeoutbase);
273 iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
274 &rwvtimeoutbase, sizeof(rwvtimeoutbase),
275 I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
276 iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
277 &rwvtimeout, sizeof(rwvtimeout),
278 I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
279
280 if (enable)
281 ld->sc_flags |= LDF_ENABLED;
282 else
283 printf("%s: device not yet supported\n", self->dv_xname);
284
285 ldattach(ld);
286 return;
287
288 bad:
289 ld_iop_unconfig(sc, evreg);
290 }
291
292 static void
293 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
294 {
295 struct iop_softc *iop;
296
297 iop = (struct iop_softc *)device_parent(&sc->sc_ld.sc_dv);
298
299 if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
300 iop_util_claim(iop, &sc->sc_ii, 1,
301 I2O_UTIL_CLAIM_PRIMARY_USER);
302
303 if (evreg) {
304 /*
305 * Mask off events, and wait up to 5 seconds for a reply.
306 * Note that some adapters won't reply to this (XXX We
307 * should check the event capabilities).
308 */
309 mutex_spin_enter(&iop->sc_intrlock);
310 sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
311 mutex_spin_exit(&iop->sc_intrlock);
312
313 iop_util_eventreg(iop, &sc->sc_eventii,
314 I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
315
316 mutex_spin_enter(&iop->sc_intrlock);
317 if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
318 cv_timedwait(&sc->sc_eventii.ii_cv,
319 &iop->sc_intrlock, hz * 5);
320 mutex_spin_exit(&iop->sc_intrlock);
321 }
322
323 iop_initiator_unregister(iop, &sc->sc_eventii);
324 iop_initiator_unregister(iop, &sc->sc_ii);
325 }
326
327 static int
328 ld_iop_detach(struct device *self, int flags)
329 {
330 struct ld_iop_softc *sc;
331 struct iop_softc *iop;
332 int rv;
333
334 sc = device_private(self);
335 iop = device_private(device_parent(self));
336
337 if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
338 return (rv);
339
340 /*
341 * Abort any requests queued with the IOP, but allow requests that
342 * are already in progress to complete.
343 */
344 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
345 iop_util_abort(iop, &sc->sc_ii, 0, 0,
346 I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
347
348 ldenddetach(&sc->sc_ld);
349
350 /* Un-claim the target, and un-register our initiators. */
351 if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
352 ld_iop_unconfig(sc, 1);
353
354 return (0);
355 }
356
357 static int
358 ld_iop_start(struct ld_softc *ld, struct buf *bp)
359 {
360 struct iop_msg *im;
361 struct iop_softc *iop;
362 struct ld_iop_softc *sc;
363 struct i2o_rbs_block_read *mf;
364 u_int rv, flags, write;
365 u_int64_t ba;
366 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
367
368 sc = (struct ld_iop_softc *)ld;
369 iop = (struct iop_softc *)device_parent(&ld->sc_dv);
370
371 im = iop_msg_alloc(iop, 0);
372 im->im_dvcontext = bp;
373
374 write = ((bp->b_flags & B_READ) == 0);
375 ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
376
377 /*
378 * Write through the cache when performing synchronous writes. When
379 * performing a read, we don't request that the DDM cache the data,
380 * as there's little advantage to it.
381 */
382 if (write) {
383 if ((bp->b_flags & B_ASYNC) == 0)
384 flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
385 else
386 flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
387 } else
388 flags = 0;
389
390 /*
391 * Fill the message frame. We can use the block_read structure for
392 * both reads and writes, as it's almost identical to the
393 * block_write structure.
394 */
395 mf = (struct i2o_rbs_block_read *)mb;
396 mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
397 mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
398 write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
399 mf->msgictx = sc->sc_ii.ii_ictx;
400 mf->msgtctx = im->im_tctx;
401 mf->flags = flags | (1 << 16); /* flags & time multiplier */
402 mf->datasize = bp->b_bcount;
403 mf->lowoffset = (u_int32_t)ba;
404 mf->highoffset = (u_int32_t)(ba >> 32);
405
406 /* Map the data transfer and enqueue the command. */
407 rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
408 if (rv == 0) {
409 if ((rv = iop_post(iop, mb)) != 0) {
410 iop_msg_unmap(iop, im);
411 iop_msg_free(iop, im);
412 }
413 }
414 return (rv);
415 }
416
417 static int
418 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
419 {
420 struct iop_msg *im;
421 struct iop_softc *iop;
422 struct ld_iop_softc *sc;
423 struct i2o_rbs_block_write *mf;
424 int rv, bcount;
425 u_int64_t ba;
426 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
427
428 sc = (struct ld_iop_softc *)ld;
429 iop = (struct iop_softc *)device_parent(&ld->sc_dv);
430 bcount = blkcnt * ld->sc_secsize;
431 ba = (u_int64_t)blkno * ld->sc_secsize;
432 im = iop_msg_alloc(iop, IM_POLL);
433
434 mf = (struct i2o_rbs_block_write *)mb;
435 mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
436 mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
437 mf->msgictx = sc->sc_ii.ii_ictx;
438 mf->msgtctx = im->im_tctx;
439 mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
440 mf->datasize = bcount;
441 mf->lowoffset = (u_int32_t)ba;
442 mf->highoffset = (u_int32_t)(ba >> 32);
443
444 if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
445 iop_msg_free(iop, im);
446 return (rv);
447 }
448
449 rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
450 iop_msg_unmap(iop, im);
451 iop_msg_free(iop, im);
452 return (rv);
453 }
454
455 static int
456 ld_iop_flush(struct ld_softc *ld)
457 {
458 struct iop_msg *im;
459 struct iop_softc *iop;
460 struct ld_iop_softc *sc;
461 struct i2o_rbs_cache_flush mf;
462 int rv;
463
464 sc = (struct ld_iop_softc *)ld;
465 iop = (struct iop_softc *)device_parent(&ld->sc_dv);
466 im = iop_msg_alloc(iop, IM_WAIT);
467
468 mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
469 mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
470 mf.msgictx = sc->sc_ii.ii_ictx;
471 mf.msgtctx = im->im_tctx;
472 mf.flags = 1 << 16; /* time multiplier */
473
474 /* Aincent disks will return an error here. */
475 rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
476 iop_msg_free(iop, im);
477 return (rv);
478 }
479
480 void
481 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
482 {
483 struct i2o_rbs_reply *rb;
484 struct buf *bp;
485 struct ld_iop_softc *sc;
486 struct iop_softc *iop;
487 int err, detail;
488 const char *errstr;
489
490 rb = reply;
491 bp = im->im_dvcontext;
492 sc = (struct ld_iop_softc *)dv;
493 iop = (struct iop_softc *)device_parent(dv);
494
495 err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
496
497 if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
498 detail = le16toh(rb->detail);
499 if (detail >= __arraycount(ld_iop_errors))
500 errstr = "<unknown>";
501 else
502 errstr = ld_iop_errors[detail];
503 printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
504 err = 1;
505 }
506
507 if (err) {
508 bp->b_error = EIO;
509 bp->b_resid = bp->b_bcount;
510 } else
511 bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
512
513 iop_msg_unmap(iop, im);
514 iop_msg_free(iop, im);
515 lddone(&sc->sc_ld, bp);
516 }
517
518 static void
519 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
520 {
521 struct i2o_util_event_register_reply *rb;
522 struct ld_iop_softc *sc;
523 struct iop_softc *iop;
524 u_int event;
525
526 rb = reply;
527
528 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
529 return;
530
531 event = le32toh(rb->event);
532 sc = (struct ld_iop_softc *)dv;
533
534 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
535 iop = device_private(device_parent(dv));
536 mutex_spin_enter(&iop->sc_intrlock);
537 sc->sc_flags |= LD_IOP_NEW_EVTMASK;
538 cv_broadcast(&sc->sc_eventii.ii_cv);
539 mutex_spin_exit(&iop->sc_intrlock);
540 return;
541 }
542
543 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
544 }
545
546 static void
547 ld_iop_adjqparam(struct device *dv, int mpi)
548 {
549 struct iop_softc *iop;
550
551 /*
552 * AMI controllers seem to loose the plot if you hand off lots of
553 * queued commands.
554 */
555 iop = (struct iop_softc *)device_parent(dv);
556 if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
557 mpi = 64;
558
559 ldadjqparam((struct ld_softc *)dv, mpi);
560 }
561