Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.32
      1 /*	$NetBSD: ld_iop.c,v 1.32 2008/09/09 12:45:39 tron Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * I2O front-end for ld(4) driver, supporting random block storage class
     34  * devices.  Currently, this doesn't handle anything more complex than
     35  * fixed direct-access devices.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.32 2008/09/09 12:45:39 tron Exp $");
     40 
     41 #include "rnd.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/device.h>
     47 #include <sys/buf.h>
     48 #include <sys/bufq.h>
     49 #include <sys/endian.h>
     50 #include <sys/dkio.h>
     51 #include <sys/disk.h>
     52 #include <sys/proc.h>
     53 #if NRND > 0
     54 #include <sys/rnd.h>
     55 #endif
     56 
     57 #include <sys/bus.h>
     58 
     59 #include <dev/ldvar.h>
     60 
     61 #include <dev/i2o/i2o.h>
     62 #include <dev/i2o/iopio.h>
     63 #include <dev/i2o/iopvar.h>
     64 
     65 #define	LD_IOP_TIMEOUT		30*1000
     66 
     67 #define	LD_IOP_CLAIMED		0x01
     68 #define	LD_IOP_NEW_EVTMASK	0x02
     69 
     70 struct ld_iop_softc {
     71 	struct	ld_softc sc_ld;
     72 	struct	iop_initiator sc_ii;
     73 	struct	iop_initiator sc_eventii;
     74 	int	sc_flags;
     75 };
     76 
     77 static void	ld_iop_adjqparam(device_t, int);
     78 static void	ld_iop_attach(device_t, device_t, void *);
     79 static int	ld_iop_detach(device_t, int);
     80 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     81 static int	ld_iop_flush(struct ld_softc *, int);
     82 static void	ld_iop_intr(device_t, struct iop_msg *, void *);
     83 static void	ld_iop_intr_event(device_t, struct iop_msg *, void *);
     84 static int	ld_iop_match(device_t, cfdata_t, void *);
     85 static int	ld_iop_start(struct ld_softc *, struct buf *);
     86 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
     87 
     88 CFATTACH_DECL_NEW(ld_iop, sizeof(struct ld_iop_softc),
     89     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
     90 
     91 static const char * const ld_iop_errors[] = {
     92 	"success",
     93 	"media error",
     94 	"access error",
     95 	"device failure",
     96 	"device not ready",
     97 	"media not present",
     98 	"media locked",
     99 	"media failure",
    100 	"protocol failure",
    101 	"bus failure",
    102 	"access violation",
    103 	"media write protected",
    104 	"device reset",
    105 	"volume changed, waiting for acknowledgement",
    106 	"timeout",
    107 };
    108 
    109 static int
    110 ld_iop_match(device_t parent, cfdata_t match, void *aux)
    111 {
    112 	struct iop_attach_args *ia;
    113 
    114 	ia = aux;
    115 
    116 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
    117 }
    118 
    119 static void
    120 ld_iop_attach(device_t parent, device_t self, void *aux)
    121 {
    122 	struct iop_attach_args *ia = aux;
    123 	struct ld_iop_softc *sc = device_private(self);
    124 	struct iop_softc *iop = device_private(parent);
    125 	struct ld_softc *ld = &sc->sc_ld;
    126 	int rv, evreg, enable;
    127 	const char *typestr, *fixedstr;
    128 	u_int cachesz;
    129 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
    130 	struct {
    131 		struct	i2o_param_op_results pr;
    132 		struct	i2o_param_read_results prr;
    133 		union {
    134 			struct	i2o_param_rbs_cache_control cc;
    135 			struct	i2o_param_rbs_device_info bdi;
    136 		} p;
    137 	} __packed param;
    138 
    139 	ld->sc_dv = self;
    140 	evreg = 0;
    141 
    142 	/* Register us as an initiator. */
    143 	sc->sc_ii.ii_dv = self;
    144 	sc->sc_ii.ii_intr = ld_iop_intr;
    145 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
    146 	sc->sc_ii.ii_flags = 0;
    147 	sc->sc_ii.ii_tid = ia->ia_tid;
    148 	iop_initiator_register(iop, &sc->sc_ii);
    149 
    150 	/* Register another initiator to handle events from the device. */
    151 	sc->sc_eventii.ii_dv = self;
    152 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    153 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    154 	sc->sc_eventii.ii_tid = ia->ia_tid;
    155 	iop_initiator_register(iop, &sc->sc_eventii);
    156 
    157 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
    158 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
    159 	    I2O_EVENT_GEN_DEVICE_RESET |
    160 	    I2O_EVENT_GEN_STATE_CHANGE |
    161 	    I2O_EVENT_GEN_GENERAL_WARNING);
    162 	if (rv != 0) {
    163 		aprint_error_dev(self, "unable to register for events");
    164 		goto bad;
    165 	}
    166 	evreg = 1;
    167 
    168 	/*
    169 	 * Start out with one queued command.  The `iop' driver will adjust
    170 	 * the queue parameters once we're up and running.
    171 	 */
    172 	ld->sc_maxqueuecnt = 1;
    173 
    174 	ld->sc_maxxfer = IOP_MAX_XFER;
    175 	ld->sc_dump = ld_iop_dump;
    176 	ld->sc_flush = ld_iop_flush;
    177 	ld->sc_start = ld_iop_start;
    178 
    179 	/* Say what the device is. */
    180 	printf(":");
    181 	iop_print_ident(iop, ia->ia_tid);
    182 
    183 	/*
    184 	 * Claim the device so that we don't get any nasty surprises.  Allow
    185 	 * failure.
    186 	 */
    187 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
    188 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    189 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    190 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    191 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    192 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
    193 
    194 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
    195 	    &param, sizeof(param), NULL);
    196 	if (rv != 0)
    197 		goto bad;
    198 
    199 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    200 	ld->sc_secperunit = (int)
    201 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    202 
    203 	switch (param.p.bdi.type) {
    204 	case I2O_RBS_TYPE_DIRECT:
    205 		typestr = "direct access";
    206 		enable = 1;
    207 		break;
    208 	case I2O_RBS_TYPE_WORM:
    209 		typestr = "WORM";
    210 		enable = 0;
    211 		break;
    212 	case I2O_RBS_TYPE_CDROM:
    213 		typestr = "CD-ROM";
    214 		enable = 0;
    215 		break;
    216 	case I2O_RBS_TYPE_OPTICAL:
    217 		typestr = "optical";
    218 		enable = 0;
    219 		break;
    220 	default:
    221 		typestr = "unknown";
    222 		enable = 0;
    223 		break;
    224 	}
    225 
    226 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
    227 	    != 0) {
    228 		/* ld->sc_flags = LDF_REMOVABLE; */
    229 		fixedstr = "removable";
    230 		enable = 0;
    231 	} else
    232 		fixedstr = "fixed";
    233 
    234 	printf(" %s, %s", typestr, fixedstr);
    235 
    236 	/*
    237 	 * Determine if the device has an private cache.  If so, print the
    238 	 * cache size.  Even if the device doesn't appear to have a cache,
    239 	 * we perform a flush at shutdown.
    240 	 */
    241 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
    242 	    &param, sizeof(param), NULL);
    243 	if (rv != 0)
    244 		goto bad;
    245 
    246 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    247 		printf(", %dkB cache", cachesz >> 10);
    248 
    249 	printf("\n");
    250 
    251 	/*
    252 	 * Configure the DDM's timeout functions to time out all commands
    253 	 * after 30 seconds.
    254 	 */
    255 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    256 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    257 	rwvtimeout = 0;
    258 
    259 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    260 	    &timeoutbase, sizeof(timeoutbase),
    261 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
    262 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    263 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
    264 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    265 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    266 	    &rwvtimeout, sizeof(rwvtimeout),
    267 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    268 
    269 	if (enable)
    270 		ld->sc_flags |= LDF_ENABLED;
    271 	else
    272 		aprint_error_dev(self, "device not yet supported\n");
    273 
    274 	ldattach(ld);
    275 	return;
    276 
    277  bad:
    278 	ld_iop_unconfig(sc, evreg);
    279 }
    280 
    281 static void
    282 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
    283 {
    284 	struct iop_softc *iop;
    285 
    286 	iop = device_private(device_parent(sc->sc_ld.sc_dv));
    287 
    288 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
    289 		iop_util_claim(iop, &sc->sc_ii, 1,
    290 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    291 
    292 	if (evreg) {
    293 		/*
    294 		 * Mask off events, and wait up to 5 seconds for a reply.
    295 		 * Note that some adapters won't reply to this (XXX We
    296 		 * should check the event capabilities).
    297 		 */
    298 		mutex_spin_enter(&iop->sc_intrlock);
    299 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
    300 		mutex_spin_exit(&iop->sc_intrlock);
    301 
    302 		iop_util_eventreg(iop, &sc->sc_eventii,
    303 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
    304 
    305 		mutex_spin_enter(&iop->sc_intrlock);
    306 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    307 			cv_timedwait(&sc->sc_eventii.ii_cv,
    308 			    &iop->sc_intrlock, hz * 5);
    309 		mutex_spin_exit(&iop->sc_intrlock);
    310 	}
    311 
    312 	iop_initiator_unregister(iop, &sc->sc_eventii);
    313 	iop_initiator_unregister(iop, &sc->sc_ii);
    314 }
    315 
    316 static int
    317 ld_iop_detach(device_t self, int flags)
    318 {
    319 	struct ld_iop_softc *sc;
    320 	struct iop_softc *iop;
    321 	int rv;
    322 
    323 	sc = device_private(self);
    324 	iop = device_private(device_parent(self));
    325 
    326 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
    327 		return (rv);
    328 
    329 	/*
    330 	 * Abort any requests queued with the IOP, but allow requests that
    331 	 * are already in progress to complete.
    332 	 */
    333 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    334 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    335 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    336 
    337 	ldenddetach(&sc->sc_ld);
    338 
    339 	/* Un-claim the target, and un-register our initiators. */
    340 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    341 		ld_iop_unconfig(sc, 1);
    342 
    343 	return (0);
    344 }
    345 
    346 static int
    347 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    348 {
    349 	struct iop_msg *im;
    350 	struct iop_softc *iop;
    351 	struct ld_iop_softc *sc;
    352 	struct i2o_rbs_block_read *mf;
    353 	u_int rv, flags, write;
    354 	u_int64_t ba;
    355 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    356 
    357 	sc = (struct ld_iop_softc *)ld;
    358 	iop = device_private(device_parent(ld->sc_dv));
    359 
    360 	im = iop_msg_alloc(iop, 0);
    361 	im->im_dvcontext = bp;
    362 
    363 	write = ((bp->b_flags & B_READ) == 0);
    364 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    365 
    366 	/*
    367 	 * Write through the cache when performing synchronous writes.  When
    368 	 * performing a read, we don't request that the DDM cache the data,
    369 	 * as there's little advantage to it.
    370 	 */
    371 	if (write) {
    372 		if ((bp->b_flags & B_ASYNC) == 0)
    373 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    374 		else
    375 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    376 	} else
    377 		flags = 0;
    378 
    379 	/*
    380 	 * Fill the message frame.  We can use the block_read structure for
    381 	 * both reads and writes, as it's almost identical to the
    382 	 * block_write structure.
    383 	 */
    384 	mf = (struct i2o_rbs_block_read *)mb;
    385 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    386 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
    387 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    388 	mf->msgictx = sc->sc_ii.ii_ictx;
    389 	mf->msgtctx = im->im_tctx;
    390 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
    391 	mf->datasize = bp->b_bcount;
    392 	mf->lowoffset = (u_int32_t)ba;
    393 	mf->highoffset = (u_int32_t)(ba >> 32);
    394 
    395 	/* Map the data transfer and enqueue the command. */
    396 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
    397 	if (rv == 0) {
    398 		if ((rv = iop_post(iop, mb)) != 0) {
    399 			iop_msg_unmap(iop, im);
    400 			iop_msg_free(iop, im);
    401 		}
    402 	}
    403 	return (rv);
    404 }
    405 
    406 static int
    407 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    408 {
    409 	struct iop_msg *im;
    410 	struct iop_softc *iop;
    411 	struct ld_iop_softc *sc;
    412 	struct i2o_rbs_block_write *mf;
    413 	int rv, bcount;
    414 	u_int64_t ba;
    415 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    416 
    417 	sc = (struct ld_iop_softc *)ld;
    418 	iop = device_private(device_parent(ld->sc_dv));
    419 	bcount = blkcnt * ld->sc_secsize;
    420 	ba = (u_int64_t)blkno * ld->sc_secsize;
    421 	im = iop_msg_alloc(iop, IM_POLL);
    422 
    423 	mf = (struct i2o_rbs_block_write *)mb;
    424 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    425 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
    426 	mf->msgictx = sc->sc_ii.ii_ictx;
    427 	mf->msgtctx = im->im_tctx;
    428 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    429 	mf->datasize = bcount;
    430 	mf->lowoffset = (u_int32_t)ba;
    431 	mf->highoffset = (u_int32_t)(ba >> 32);
    432 
    433 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
    434 		iop_msg_free(iop, im);
    435 		return (rv);
    436 	}
    437 
    438 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
    439 	iop_msg_unmap(iop, im);
    440 	iop_msg_free(iop, im);
    441  	return (rv);
    442 }
    443 
    444 static int
    445 ld_iop_flush(struct ld_softc *ld, int flags)
    446 {
    447 	struct iop_msg *im;
    448 	struct iop_softc *iop;
    449 	struct ld_iop_softc *sc;
    450 	struct i2o_rbs_cache_flush mf;
    451 	int rv;
    452 
    453 	sc = (struct ld_iop_softc *)ld;
    454 	iop = device_private(device_parent(ld->sc_dv));
    455 	im = iop_msg_alloc(iop, IM_WAIT);
    456 
    457 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    458 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
    459 	mf.msgictx = sc->sc_ii.ii_ictx;
    460 	mf.msgtctx = im->im_tctx;
    461 	mf.flags = 1 << 16;			/* time multiplier */
    462 
    463 	/* Ancient disks will return an error here. */
    464 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
    465 	iop_msg_free(iop, im);
    466 	return (rv);
    467 }
    468 
    469 void
    470 ld_iop_intr(device_t dv, struct iop_msg *im, void *reply)
    471 {
    472 	struct i2o_rbs_reply *rb;
    473 	struct buf *bp;
    474 	struct ld_iop_softc *sc;
    475 	struct iop_softc *iop;
    476 	int err, detail;
    477 	const char *errstr;
    478 
    479 	rb = reply;
    480 	bp = im->im_dvcontext;
    481 	sc = (struct ld_iop_softc *)dv;
    482 	iop = device_private(device_parent(dv));
    483 
    484 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
    485 
    486 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
    487 		detail = le16toh(rb->detail);
    488 		if (detail >= __arraycount(ld_iop_errors))
    489 			errstr = "<unknown>";
    490 		else
    491 			errstr = ld_iop_errors[detail];
    492 		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
    493 		err = 1;
    494 	}
    495 
    496 	if (err) {
    497 		bp->b_error = EIO;
    498 		bp->b_resid = bp->b_bcount;
    499 	} else
    500 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
    501 
    502 	iop_msg_unmap(iop, im);
    503 	iop_msg_free(iop, im);
    504 	lddone(&sc->sc_ld, bp);
    505 }
    506 
    507 static void
    508 ld_iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
    509 {
    510 	struct i2o_util_event_register_reply *rb;
    511 	struct ld_iop_softc *sc;
    512 	struct iop_softc *iop;
    513 	u_int event;
    514 
    515 	rb = reply;
    516 
    517 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
    518 		return;
    519 
    520 	event = le32toh(rb->event);
    521 	sc = (struct ld_iop_softc *)dv;
    522 
    523 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
    524 		iop = device_private(device_parent(dv));
    525 		mutex_spin_enter(&iop->sc_intrlock);
    526 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
    527 		cv_broadcast(&sc->sc_eventii.ii_cv);
    528 		mutex_spin_exit(&iop->sc_intrlock);
    529 		return;
    530 	}
    531 
    532 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
    533 }
    534 
    535 static void
    536 ld_iop_adjqparam(device_t dv, int mpi)
    537 {
    538 	struct ld_iop_softc *sc = device_private(dv);
    539 	struct iop_softc *iop = device_private(device_parent(dv));
    540 	struct ld_softc *ld = &sc->sc_ld;
    541 
    542 	/*
    543 	 * AMI controllers seem to loose the plot if you hand off lots of
    544 	 * queued commands.
    545 	 */
    546 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
    547 		mpi = 64;
    548 
    549 	ldadjqparam(ld, mpi);
    550 }
    551