Home | History | Annotate | Line # | Download | only in ic
mpt_netbsd.c revision 1.4
      1 /*	$NetBSD: mpt_netbsd.c,v 1.4 2003/04/16 23:17:30 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 2000, 2001 by Greg Ansley
     40  * Partially derived from Matt Jacob's ISP driver.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice immediately at the beginning of the file, without modification,
     47  *    this list of conditions, and the following disclaimer.
     48  * 2. The name of the author may not be used to endorse or promote products
     49  *    derived from this software without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     55  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  */
     63 /*
     64  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
     65  */
     66 
     67 /*
     68  * mpt_netbsd.c:
     69  *
     70  * NetBSD-specific routines for LSI Fusion adapters.  Includes some
     71  * bus_dma glue, and SCSIPI glue.
     72  *
     73  * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
     74  * Wasabi Systems, Inc.
     75  */
     76 
     77 #include <dev/ic/mpt.h>			/* pulls in all headers */
     78 
     79 #include <machine/stdarg.h>		/* for mpt_prt() */
     80 
     81 static int	mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
     82 static void	mpt_timeout(void *);
     83 static void	mpt_done(mpt_softc_t *, uint32_t);
     84 static void	mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
     85 static void	mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
     86 static void	mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
     87 static void	mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
     88 static void	mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
     89 
     90 static void	mpt_scsipi_request(struct scsipi_channel *,
     91 		    scsipi_adapter_req_t, void *);
     92 static void	mpt_minphys(struct buf *);
     93 
     94 void
     95 mpt_scsipi_attach(mpt_softc_t *mpt)
     96 {
     97 	struct scsipi_adapter *adapt = &mpt->sc_adapter;
     98 	struct scsipi_channel *chan = &mpt->sc_channel;
     99 	int maxq;
    100 
    101 	mpt->bus = 0;		/* XXX ?? */
    102 
    103 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
    104 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
    105 
    106 	/* Fill in the scsipi_adapter. */
    107 	memset(adapt, 0, sizeof(*adapt));
    108 	adapt->adapt_dev = &mpt->sc_dev;
    109 	adapt->adapt_nchannels = 1;
    110 	adapt->adapt_openings = maxq;
    111 	adapt->adapt_max_periph = maxq;
    112 	adapt->adapt_request = mpt_scsipi_request;
    113 	adapt->adapt_minphys = mpt_minphys;
    114 
    115 	/* Fill in the scsipi_channel. */
    116 	memset(chan, 0, sizeof(*chan));
    117 	chan->chan_adapter = adapt;
    118 	chan->chan_bustype = &scsi_bustype;
    119 	chan->chan_channel = 0;
    120 	chan->chan_flags = 0;
    121 	chan->chan_nluns = 8;
    122 	if (mpt->is_fc) {
    123 		chan->chan_ntargets = 256;
    124 		chan->chan_id = 256;
    125 	} else {
    126 		chan->chan_ntargets = 16;
    127 		chan->chan_id = mpt->mpt_ini_id;
    128 	}
    129 
    130 	(void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
    131 }
    132 
    133 int
    134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
    135 {
    136 	bus_dma_segment_t reply_seg, request_seg;
    137 	int reply_rseg, request_rseg;
    138 	bus_addr_t pptr, end;
    139 	caddr_t vptr;
    140 	size_t len;
    141 	int error, i;
    142 
    143 	/* Check if we have already allocated the reply memory. */
    144 	if (mpt->reply != NULL)
    145 		return (0);
    146 
    147 	/*
    148 	 * Allocate the request pool.  This isn't really DMA'd memory,
    149 	 * but it's a convenient place to do it.
    150 	 */
    151 	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
    152 	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
    153 	if (mpt->request_pool == NULL) {
    154 		aprint_error("%s: unable to allocate request pool\n",
    155 		    mpt->sc_dev.dv_xname);
    156 		return (ENOMEM);
    157 	}
    158 
    159 	/*
    160 	 * Allocate DMA resources for reply buffers.
    161 	 */
    162 	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    163 	    &reply_seg, 1, &reply_rseg, 0);
    164 	if (error) {
    165 		aprint_error("%s: unable to allocate reply area, error = %d\n",
    166 		    mpt->sc_dev.dv_xname, error);
    167 		goto fail_0;
    168 	}
    169 
    170 	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
    171 	    (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
    172 	if (error) {
    173 		aprint_error("%s: unable to map reply area, error = %d\n",
    174 		    mpt->sc_dev.dv_xname, error);
    175 		goto fail_1;
    176 	}
    177 
    178 	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
    179 	    0, 0, &mpt->reply_dmap);
    180 	if (error) {
    181 		aprint_error("%s: unable to create reply DMA map, error = %d\n",
    182 		    mpt->sc_dev.dv_xname, error);
    183 		goto fail_2;
    184 	}
    185 
    186 	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
    187 	    PAGE_SIZE, NULL, 0);
    188 	if (error) {
    189 		aprint_error("%s: unable to load reply DMA map, error = %d\n",
    190 		    mpt->sc_dev.dv_xname, error);
    191 		goto fail_3;
    192 	}
    193 	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
    194 
    195 	/*
    196 	 * Allocate DMA resources for request buffers.
    197 	 */
    198 	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
    199 	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
    200 	if (error) {
    201 		aprint_error("%s: unable to allocate request area, "
    202 		    "error = %d\n", mpt->sc_dev.dv_xname, error);
    203 		goto fail_4;
    204 	}
    205 
    206 	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
    207 	    MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request, 0);
    208 	if (error) {
    209 		aprint_error("%s: unable to map request area, error = %d\n",
    210 		    mpt->sc_dev.dv_xname, error);
    211 		goto fail_5;
    212 	}
    213 
    214 	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
    215 	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
    216 	if (error) {
    217 		aprint_error("%s: unable to create request DMA map, "
    218 		    "error = %d\n", mpt->sc_dev.dv_xname, error);
    219 		goto fail_6;
    220 	}
    221 
    222 	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
    223 	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
    224 	if (error) {
    225 		aprint_error("%s: unable to load request DMA map, error = %d\n",
    226 		    mpt->sc_dev.dv_xname, error);
    227 		goto fail_7;
    228 	}
    229 	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
    230 
    231 	pptr = mpt->request_phys;
    232 	vptr = (caddr_t) mpt->request;
    233 	end = pptr + MPT_REQ_MEM_SIZE(mpt);
    234 
    235 	for (i = 0; pptr < end; i++) {
    236 		request_t *req = &mpt->request_pool[i];
    237 		req->index = i;
    238 
    239 		/* Store location of Request Data */
    240 		req->req_pbuf = pptr;
    241 		req->req_vbuf = vptr;
    242 
    243 		pptr += MPT_REQUEST_AREA;
    244 		vptr += MPT_REQUEST_AREA;
    245 
    246 		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
    247 		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
    248 
    249 		error = bus_dmamap_create(mpt->sc_dmat, MAXBSIZE,
    250 		    MPT_SGL_MAX, MAXBSIZE, 0, 0, &req->dmap);
    251 		if (error) {
    252 			aprint_error("%s: unable to create req %d DMA map, "
    253 			    "error = %d\n", mpt->sc_dev.dv_xname, i, error);
    254 			goto fail_8;
    255 		}
    256 	}
    257 
    258 	return (0);
    259 
    260  fail_8:
    261 	for (--i; i >= 0; i--) {
    262 		request_t *req = &mpt->request_pool[i];
    263 		if (req->dmap != NULL)
    264 			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
    265 	}
    266 	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
    267  fail_7:
    268 	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
    269  fail_6:
    270 	bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE);
    271  fail_5:
    272 	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
    273  fail_4:
    274 	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
    275  fail_3:
    276 	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
    277  fail_2:
    278 	bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE);
    279  fail_1:
    280 	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
    281  fail_0:
    282 	free(mpt->request_pool, M_DEVBUF);
    283 
    284 	mpt->reply = NULL;
    285 	mpt->request = NULL;
    286 	mpt->request_pool = NULL;
    287 
    288 	return (error);
    289 }
    290 
    291 int
    292 mpt_intr(void *arg)
    293 {
    294 	mpt_softc_t *mpt = arg;
    295 	int nrepl = 0;
    296 	uint32_t reply;
    297 
    298 	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
    299 		return (0);
    300 
    301 	reply = mpt_pop_reply_queue(mpt);
    302 	while (reply != MPT_REPLY_EMPTY) {
    303 		nrepl++;
    304 		if (mpt->verbose > 1) {
    305 			if ((reply & MPT_CONTEXT_REPLY) != 0) {
    306 				/* Address reply; IOC has something to say */
    307 				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
    308 			} else {
    309 				/* Context reply; all went well */
    310 				mpt_prt(mpt, "context %u reply OK", reply);
    311 			}
    312 		}
    313 		mpt_done(mpt, reply);
    314 		reply = mpt_pop_reply_queue(mpt);
    315 	}
    316 	return (nrepl != 0);
    317 }
    318 
    319 void
    320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
    321 {
    322 	va_list ap;
    323 
    324 	printf("%s: ", mpt->sc_dev.dv_xname);
    325 	va_start(ap, fmt);
    326 	vprintf(fmt, ap);
    327 	va_end(ap);
    328 	printf("\n");
    329 }
    330 
    331 static int
    332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
    333 {
    334 
    335 	/* Timeouts are in msec, so we loop in 1000usec cycles */
    336 	while (count) {
    337 		mpt_intr(mpt);
    338 		if (xs->xs_status & XS_STS_DONE)
    339 			return (0);
    340 		delay(1000);		/* only happens in boot, so ok */
    341 		count--;
    342 	}
    343 	return (1);
    344 }
    345 
    346 static void
    347 mpt_timeout(void *arg)
    348 {
    349 	request_t *req = arg;
    350 	struct scsipi_xfer *xs = req->xfer;
    351 	struct scsipi_periph *periph = xs->xs_periph;
    352 	mpt_softc_t *mpt =
    353 	    (void *) periph->periph_channel->chan_adapter->adapt_dev;
    354 	uint32_t oseq;
    355 	int s;
    356 
    357 	scsipi_printaddr(periph);
    358 	printf("command timeout\n");
    359 
    360 	s = splbio();
    361 
    362 	oseq = req->sequence;
    363 	mpt->timeouts++;
    364 	if (mpt_intr(mpt)) {
    365 		if (req->sequence != oseq) {
    366 			mpt_prt(mpt, "recovered from command timeout");
    367 			splx(s);
    368 			return;
    369 		}
    370 	}
    371 	mpt_prt(mpt,
    372 	    "timeout on request index = 0x%x, seq = 0x%08x",
    373 	    req->index, req->sequence);
    374 	mpt_check_doorbell(mpt);
    375 	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
    376 	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
    377 	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
    378 	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
    379 	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
    380 	if (mpt->verbose > 1)
    381 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
    382 
    383 	xs->error = XS_TIMEOUT;
    384 	scsipi_done(xs);
    385 
    386 	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
    387 	req->xfer = NULL;
    388 	mpt_free_request(mpt, req);
    389 
    390 	splx(s);
    391 }
    392 
    393 static void
    394 mpt_done(mpt_softc_t *mpt, uint32_t reply)
    395 {
    396 	struct scsipi_xfer *xs;
    397 	struct scsipi_periph *periph;
    398 	int index;
    399 	request_t *req;
    400 	MSG_REQUEST_HEADER *mpt_req;
    401 	MSG_SCSI_IO_REPLY *mpt_reply;
    402 
    403 	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
    404 		/* context reply (ok) */
    405 		mpt_reply = NULL;
    406 		index = reply & MPT_CONTEXT_MASK;
    407 	} else {
    408 		/* address reply (error) */
    409 
    410 		/* XXX BUS_DMASYNC_POSTREAD XXX */
    411 		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
    412 		if (mpt->verbose > 1) {
    413 			uint32_t *pReply = (uint32_t *) mpt_reply;
    414 
    415 			mpt_prt(mpt, "Address Reply (index %u):",
    416 			    mpt_reply->MsgContext & 0xffff);
    417 			mpt_prt(mpt, "%08x %08x %08x %08x",
    418 			    pReply[0], pReply[1], pReply[2], pReply[3]);
    419 			mpt_prt(mpt, "%08x %08x %08x %08x",
    420 			    pReply[4], pReply[5], pReply[6], pReply[7]);
    421 			mpt_prt(mpt, "%08x %08x %08x %08x",
    422 			    pReply[8], pReply[9], pReply[10], pReply[11]);
    423 		}
    424 		index = mpt_reply->MsgContext;
    425 	}
    426 
    427 	/*
    428 	 * Address reply with MessageContext high bit set.
    429 	 * This is most likely a notify message, so we try
    430 	 * to process it, then free it.
    431 	 */
    432 	if (__predict_false((index & 0x80000000) != 0)) {
    433 		if (mpt_reply != NULL)
    434 			mpt_ctlop(mpt, mpt_reply, reply);
    435 		else
    436 			mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
    437 		return;
    438 	}
    439 
    440 	/* Did we end up with a valid index into the table? */
    441 	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
    442 		mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
    443 		return;
    444 	}
    445 
    446 	req = &mpt->request_pool[index];
    447 
    448 	/* Make sure memory hasn't been trashed. */
    449 	if (__predict_false(req->index != index)) {
    450 		mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
    451 		return;
    452 	}
    453 
    454 	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    455 	mpt_req = req->req_vbuf;
    456 
    457 	/* Short cut for task management replies; nothing more for us to do. */
    458 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
    459 		if (mpt->verbose > 1)
    460 			mpt_prt(mpt, "mpt_done: TASK MGMT");
    461 		goto done;
    462 	}
    463 
    464 	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
    465 		goto done;
    466 
    467 	/*
    468 	 * At this point, it had better be a SCSI I/O command, but don't
    469 	 * crash if it isn't.
    470 	 */
    471 	if (__predict_false(mpt_req->Function !=
    472 			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
    473 		if (mpt->verbose > 1)
    474 			mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
    475 			    mpt_req->Function, index);
    476 		goto done;
    477 	}
    478 
    479 	/* Recover scsipi_xfer from the request structure. */
    480 	xs = req->xfer;
    481 
    482 	/* Can't have a SCSI command without a scsipi_xfer. */
    483 	if (__predict_false(xs == NULL)) {
    484 		mpt_prt(mpt,
    485 		    "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
    486 		    req->index, req->sequence);
    487 		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
    488 		mpt_prt(mpt, "mpt_request:");
    489 		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
    490 
    491 		if (mpt_reply != NULL) {
    492 			mpt_prt(mpt, "mpt_reply:");
    493 			mpt_print_reply(mpt_reply);
    494 		} else {
    495 			mpt_prt(mpt, "context reply: 0x%08x", reply);
    496 		}
    497 		goto done;
    498 	}
    499 
    500 	callout_stop(&xs->xs_callout);
    501 
    502 	periph = xs->xs_periph;
    503 
    504 	/*
    505 	 * If we were a data transfer, unload the map that described
    506 	 * the data buffer.
    507 	 */
    508 	if (__predict_true(xs->datalen != 0)) {
    509 		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
    510 		    req->dmap->dm_mapsize,
    511 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
    512 						      : BUS_DMASYNC_POSTWRITE);
    513 		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
    514 	}
    515 
    516 	if (__predict_true(mpt_reply == NULL)) {
    517 		/*
    518 		 * Context reply; report that the command was
    519 		 * successful!
    520 		 *
    521 		 * Also report the xfer mode, if necessary.
    522 		 */
    523 		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
    524 			if ((mpt->mpt_report_xfer_mode &
    525 			     (1 << periph->periph_target)) != 0)
    526 				mpt_get_xfer_mode(mpt, periph);
    527 		}
    528 		xs->error = XS_NOERROR;
    529 		xs->status = SCSI_OK;
    530 		xs->resid = 0;
    531 		scsipi_done(xs);
    532 		goto done;
    533 	}
    534 
    535 	xs->status = mpt_reply->SCSIStatus;
    536 	switch (mpt_reply->IOCStatus) {
    537 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
    538 		xs->error = XS_DRIVER_STUFFUP;
    539 		break;
    540 
    541 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
    542 		/*
    543 		 * Yikes!  Tagged queue full comes through this path!
    544 		 *
    545 		 * So we'll change it to a status error and anything
    546 		 * that returns status should probably be a status
    547 		 * error as well.
    548 		 */
    549 		xs->resid = xs->datalen - mpt_reply->TransferCount;
    550 		if (mpt_reply->SCSIState &
    551 		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
    552 			xs->error = XS_DRIVER_STUFFUP;
    553 			break;
    554 		}
    555 		/* FALLTHROUGH */
    556 	case MPI_IOCSTATUS_SUCCESS:
    557 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
    558 		switch (xs->status) {
    559 		case SCSI_OK:
    560 			/* Report the xfer mode, if necessary. */
    561 			if ((mpt->mpt_report_xfer_mode &
    562 			     (1 << periph->periph_target)) != 0)
    563 				mpt_get_xfer_mode(mpt, periph);
    564 			xs->resid = 0;
    565 			break;
    566 
    567 		case SCSI_CHECK:
    568 			xs->error = XS_SENSE;
    569 			break;
    570 
    571 		case SCSI_BUSY:
    572 		case SCSI_QUEUE_FULL:
    573 			xs->error = XS_BUSY;
    574 			break;
    575 
    576 		default:
    577 			scsipi_printaddr(periph);
    578 			printf("invalid status code %d\n", xs->status);
    579 			xs->error = XS_DRIVER_STUFFUP;
    580 			break;
    581 		}
    582 		break;
    583 
    584 	case MPI_IOCSTATUS_BUSY:
    585 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
    586 		xs->error = XS_RESOURCE_SHORTAGE;
    587 		break;
    588 
    589 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
    590 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
    591 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
    592 		xs->error = XS_SELTIMEOUT;
    593 		break;
    594 
    595 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
    596 		xs->error = XS_DRIVER_STUFFUP;
    597 		break;
    598 
    599 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
    600 		/* XXX What should we do here? */
    601 		break;
    602 
    603 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
    604 		/* XXX */
    605 		xs->error = XS_DRIVER_STUFFUP;
    606 		break;
    607 
    608 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
    609 		/* XXX */
    610 		xs->error = XS_DRIVER_STUFFUP;
    611 		break;
    612 
    613 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
    614 		/* XXX This is a bus-reset */
    615 		xs->error = XS_DRIVER_STUFFUP;
    616 		break;
    617 
    618 	default:
    619 		/* XXX unrecognized HBA error */
    620 		xs->error = XS_DRIVER_STUFFUP;
    621 		break;
    622 	}
    623 
    624 	if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
    625 		memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
    626 		    sizeof(xs->sense.scsi_sense));
    627 	} else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
    628 		/*
    629 		 * This will cause the scsipi layer to issue
    630 		 * a REQUEST SENSE.
    631 		 */
    632 		if (xs->status == SCSI_CHECK)
    633 			xs->error = XS_BUSY;
    634 	}
    635 
    636 	scsipi_done(xs);
    637  done:
    638 	/* If IOC done with this requeset, free it up. */
    639 	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
    640 		mpt_free_request(mpt, req);
    641 
    642 	/* If address reply, give the buffer back to the IOC. */
    643 	if (mpt_reply != NULL)
    644 		mpt_free_reply(mpt, (reply << 1));
    645 }
    646 
    647 static void
    648 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
    649 {
    650 	struct scsipi_periph *periph = xs->xs_periph;
    651 	request_t *req;
    652 	MSG_SCSI_IO_REQUEST *mpt_req;
    653 	int error, s;
    654 
    655 	s = splbio();
    656 	req = mpt_get_request(mpt);
    657 	if (__predict_false(req == NULL)) {
    658 		/* This should happen very infrequently. */
    659 		xs->error = XS_RESOURCE_SHORTAGE;
    660 		scsipi_done(xs);
    661 		splx(s);
    662 		return;
    663 	}
    664 	splx(s);
    665 
    666 	/* Link the req and the scsipi_xfer. */
    667 	req->xfer = xs;
    668 
    669 	/* Now we build the command for the IOC */
    670 	mpt_req = req->req_vbuf;
    671 	memset(mpt_req, 0, sizeof(*mpt_req));
    672 
    673 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
    674 	mpt_req->Bus = mpt->bus;
    675 
    676 	mpt_req->SenseBufferLength =
    677 	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
    678 	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
    679 
    680 	/*
    681 	 * We use the message context to find the request structure when
    682 	 * we get the command completion interrupt from the IOC.
    683 	 */
    684 	mpt_req->MsgContext = req->index;
    685 
    686 	/* Which physical device to do the I/O on. */
    687 	mpt_req->TargetID = periph->periph_target;
    688 	mpt_req->LUN[1] = periph->periph_lun;
    689 
    690 	/* Set the direction of the transfer. */
    691 	if (xs->xs_control & XS_CTL_DATA_IN)
    692 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
    693 	else if (xs->xs_control & XS_CTL_DATA_OUT)
    694 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
    695 	else
    696 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
    697 
    698 	/* Set the queue behavior. */
    699 	if (__predict_true(mpt->is_fc ||
    700 			   (mpt->mpt_tag_enable &
    701 			    (1 << periph->periph_target)))) {
    702 		switch (XS_CTL_TAGTYPE(xs)) {
    703 		case XS_CTL_HEAD_TAG:
    704 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
    705 			break;
    706 
    707 #if 0	/* XXX */
    708 		case XS_CTL_ACA_TAG:
    709 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
    710 			break;
    711 #endif
    712 
    713 		case XS_CTL_ORDERED_TAG:
    714 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
    715 			break;
    716 
    717 		case XS_CTL_SIMPLE_TAG:
    718 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
    719 			break;
    720 
    721 		default:
    722 			if (mpt->is_fc)
    723 				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
    724 			else
    725 				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
    726 			break;
    727 		}
    728 	} else
    729 		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
    730 
    731 	if (__predict_false(mpt->is_fc == 0 &&
    732 			    (mpt->mpt_disc_enable &
    733 			     (1 << periph->periph_target)) == 0))
    734 		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
    735 
    736 	/* Copy the SCSI command block into place. */
    737 	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
    738 
    739 	mpt_req->CDBLength = xs->cmdlen;
    740 	mpt_req->DataLength = xs->datalen;
    741 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
    742 
    743 	/*
    744 	 * Map the DMA transfer.
    745 	 */
    746 	if (xs->datalen) {
    747 		SGE_SIMPLE32 *se;
    748 
    749 		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
    750 		    xs->datalen, NULL,
    751 		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
    752 						       : BUS_DMA_WAITOK) |
    753 		    BUS_DMA_STREAMING |
    754 		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
    755 						       : BUS_DMA_WRITE));
    756 		switch (error) {
    757 		case 0:
    758 			break;
    759 
    760 		case ENOMEM:
    761 		case EAGAIN:
    762 			xs->error = XS_RESOURCE_SHORTAGE;
    763 			goto out_bad;
    764 
    765 		default:
    766 			xs->error = XS_DRIVER_STUFFUP;
    767 			mpt_prt(mpt, "error %d loading DMA map", error);
    768  out_bad:
    769 			s = splbio();
    770 			mpt_free_request(mpt, req);
    771 			scsipi_done(xs);
    772 			splx(s);
    773 			return;
    774 		}
    775 
    776 		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
    777 			int seg, i, nleft = req->dmap->dm_nsegs;
    778 			uint32_t flags;
    779 			SGE_CHAIN32 *ce;
    780 
    781 			seg = 0;
    782 
    783 			mpt_req->DataLength = xs->datalen;
    784 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
    785 			if (xs->xs_control & XS_CTL_DATA_OUT)
    786 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
    787 
    788 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
    789 			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
    790 			     i++, se++, seg++) {
    791 				uint32_t tf;
    792 
    793 				memset(se, 0, sizeof(*se));
    794 				se->Address = req->dmap->dm_segs[seg].ds_addr;
    795 				MPI_pSGE_SET_LENGTH(se,
    796 				    req->dmap->dm_segs[seg].ds_len);
    797 				tf = flags;
    798 				if (i == MPT_NSGL_FIRST(mpt) - 2)
    799 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
    800 				MPI_pSGE_SET_FLAGS(se, tf);
    801 				nleft--;
    802 			}
    803 
    804 			/*
    805 			 * Tell the IOC where to find the first chain element.
    806 			 */
    807 			mpt_req->ChainOffset =
    808 			    ((char *)se - (char *)mpt_req) >> 2;
    809 
    810 			/*
    811 			 * Until we're finished with all segments...
    812 			 */
    813 			while (nleft) {
    814 				int ntodo;
    815 
    816 				/*
    817 				 * Construct the chain element that points to
    818 				 * the next segment.
    819 				 */
    820 				ce = (SGE_CHAIN32 *) se++;
    821 				if (nleft > MPT_NSGL(mpt)) {
    822 					ntodo = MPT_NSGL(mpt) - 1;
    823 					ce->NextChainOffset = (MPT_RQSL(mpt) -
    824 					    sizeof(SGE_SIMPLE32)) >> 2;
    825 				} else {
    826 					ntodo = nleft;
    827 					ce->NextChainOffset = 0;
    828 				}
    829 				ce->Length = ntodo * sizeof(SGE_SIMPLE32);
    830 				ce->Address = req->req_pbuf +
    831 				    ((char *)se - (char *)mpt_req);
    832 				ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
    833 				for (i = 0; i < ntodo; i++, se++, seg++) {
    834 					uint32_t tf;
    835 
    836 					memset(se, 0, sizeof(*se));
    837 					se->Address =
    838 					    req->dmap->dm_segs[seg].ds_addr;
    839 					MPI_pSGE_SET_LENGTH(se,
    840 					    req->dmap->dm_segs[seg].ds_len);
    841 					tf = flags;
    842 					if (i == ntodo - 1) {
    843 						tf |=
    844 						    MPI_SGE_FLAGS_LAST_ELEMENT;
    845 						if (ce->NextChainOffset == 0) {
    846 							tf |=
    847 						    MPI_SGE_FLAGS_END_OF_LIST |
    848 						    MPI_SGE_FLAGS_END_OF_BUFFER;
    849 						}
    850 					}
    851 					MPI_pSGE_SET_FLAGS(se, tf);
    852 					nleft--;
    853 				}
    854 			}
    855 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
    856 			    req->dmap->dm_mapsize,
    857 			    (xs->xs_control & XS_CTL_DATA_IN) ?
    858 			    				BUS_DMASYNC_PREREAD
    859 						      : BUS_DMASYNC_PREWRITE);
    860 		} else {
    861 			int i;
    862 			uint32_t flags;
    863 
    864 			mpt_req->DataLength = xs->datalen;
    865 			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
    866 			if (xs->xs_control & XS_CTL_DATA_OUT)
    867 				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
    868 
    869 			/* Copy the segments into our SG list. */
    870 			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
    871 			for (i = 0; i < req->dmap->dm_nsegs;
    872 			     i++, se++) {
    873 				uint32_t tf;
    874 
    875 				memset(se, 0, sizeof(*se));
    876 				se->Address = req->dmap->dm_segs[i].ds_addr;
    877 				MPI_pSGE_SET_LENGTH(se,
    878 				    req->dmap->dm_segs[i].ds_len);
    879 				tf = flags;
    880 				if (i == req->dmap->dm_nsegs - 1) {
    881 					tf |=
    882 					    MPI_SGE_FLAGS_LAST_ELEMENT |
    883 					    MPI_SGE_FLAGS_END_OF_BUFFER |
    884 					    MPI_SGE_FLAGS_END_OF_LIST;
    885 				}
    886 				MPI_pSGE_SET_FLAGS(se, tf);
    887 			}
    888 			bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
    889 			    req->dmap->dm_mapsize,
    890 			    (xs->xs_control & XS_CTL_DATA_IN) ?
    891 			    				BUS_DMASYNC_PREREAD
    892 						      : BUS_DMASYNC_PREWRITE);
    893 		}
    894 	} else {
    895 		/*
    896 		 * No data to transfer; just make a single simple SGL
    897 		 * with zero length.
    898 		 */
    899 		SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
    900 		memset(se, 0, sizeof(*se));
    901 		MPI_pSGE_SET_FLAGS(se,
    902 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
    903 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
    904 	}
    905 
    906 	if (mpt->verbose > 1)
    907 		mpt_print_scsi_io_request(mpt_req);
    908 
    909 	s = splbio();
    910 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
    911 		callout_reset(&xs->xs_callout,
    912 		    mstohz(xs->timeout), mpt_timeout, req);
    913 	mpt_send_cmd(mpt, req);
    914 	splx(s);
    915 
    916 	if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
    917 		return;
    918 
    919 	/*
    920 	 * If we can't use interrupts, poll on completion.
    921 	 */
    922 	if (mpt_poll(mpt, xs, xs->timeout))
    923 		mpt_timeout(req);
    924 }
    925 
    926 static void
    927 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
    928 {
    929 	fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
    930 
    931 	if (mpt->is_fc) {
    932 		/*
    933 		 * SCSI transport settings don't make any sense for
    934 		 * Fibre Channel; silently ignore the request.
    935 		 */
    936 		return;
    937 	}
    938 
    939 	/*
    940 	 * Always allow disconnect; we don't have a way to disable
    941 	 * it right now, in any case.
    942 	 */
    943 	mpt->mpt_disc_enable |= (1 << xm->xm_target);
    944 
    945 	if (xm->xm_mode & PERIPH_CAP_TQING)
    946 		mpt->mpt_tag_enable |= (1 << xm->xm_target);
    947 	else
    948 		mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
    949 
    950 	tmp = mpt->mpt_dev_page1[xm->xm_target];
    951 
    952 	/*
    953 	 * Set the wide/narrow parameter for the target.
    954 	 */
    955 	if (xm->xm_mode & PERIPH_CAP_WIDE16)
    956 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
    957 	else
    958 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
    959 
    960 	/*
    961 	 * Set the synchronous parameters for the target.
    962 	 *
    963 	 * XXX If we request sync transfers, we just go ahead and
    964 	 * XXX request the maximum available.  We need finer control
    965 	 * XXX in order to implement Domain Validation.
    966 	 */
    967 	tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
    968 	    MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
    969 	    MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
    970 	    MPI_SCSIDEVPAGE1_RP_IU);
    971 	if (xm->xm_mode & PERIPH_CAP_SYNC) {
    972 		int factor, offset, np;
    973 
    974 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
    975 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
    976 		np = 0;
    977 		if (factor < 0x9) {
    978 			/* Ultra320 */
    979 			np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
    980 		}
    981 		if (factor < 0xa) {
    982 			/* at least Ultra160 */
    983 			np |= MPI_SCSIDEVPAGE1_RP_DT;
    984 		}
    985 		np |= (factor << 8) | (offset << 16);
    986 		tmp.RequestedParameters |= np;
    987 	}
    988 
    989 	if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
    990 		mpt_prt(mpt, "unable to write Device Page 1");
    991 		return;
    992 	}
    993 
    994 	if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
    995 		mpt_prt(mpt, "unable to read back Device Page 1");
    996 		return;
    997 	}
    998 
    999 	mpt->mpt_dev_page1[xm->xm_target] = tmp;
   1000 	if (mpt->verbose > 1) {
   1001 		mpt_prt(mpt,
   1002 		    "SPI Target %d Page 1: RequestedParameters %x Config %x",
   1003 		    xm->xm_target,
   1004 		    mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
   1005 		    mpt->mpt_dev_page1[xm->xm_target].Configuration);
   1006 	}
   1007 
   1008 	/*
   1009 	 * Make a note that we should perform an async callback at the
   1010 	 * end of the next successful command completion to report the
   1011 	 * negotiated transfer mode.
   1012 	 */
   1013 	mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
   1014 }
   1015 
   1016 static void
   1017 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
   1018 {
   1019 	fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
   1020 	struct scsipi_xfer_mode xm;
   1021 	int period, offset;
   1022 
   1023 	tmp = mpt->mpt_dev_page0[periph->periph_target];
   1024 	if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
   1025 		mpt_prt(mpt, "unable to read Device Page 0");
   1026 		return;
   1027 	}
   1028 
   1029 	if (mpt->verbose > 1) {
   1030 		mpt_prt(mpt,
   1031 		    "SPI Tgt %d Page 0: NParms %x Information %x",
   1032 		    periph->periph_target,
   1033 		    tmp.NegotiatedParameters, tmp.Information);
   1034 	}
   1035 
   1036 	xm.xm_target = periph->periph_target;
   1037 	xm.xm_mode = 0;
   1038 
   1039 	if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
   1040 		xm.xm_mode |= PERIPH_CAP_WIDE16;
   1041 
   1042 	period = (tmp.NegotiatedParameters >> 8) & 0xff;
   1043 	offset = (tmp.NegotiatedParameters >> 16) & 0xff;
   1044 	if (offset) {
   1045 		xm.xm_period = period;
   1046 		xm.xm_offset = offset;
   1047 		xm.xm_mode |= PERIPH_CAP_SYNC;
   1048 	}
   1049 
   1050 	/*
   1051 	 * Tagged queueing is all controlled by us; there is no
   1052 	 * other setting to query.
   1053 	 */
   1054 	if (mpt->mpt_tag_enable & (1 << periph->periph_target))
   1055 		xm.xm_mode |= PERIPH_CAP_TQING;
   1056 
   1057 	/*
   1058 	 * We're going to deliver the async event, so clear the marker.
   1059 	 */
   1060 	mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
   1061 
   1062 	scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
   1063 }
   1064 
   1065 static void
   1066 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
   1067 {
   1068 	MSG_DEFAULT_REPLY *dmsg = vmsg;
   1069 
   1070 	switch (dmsg->Function) {
   1071 	case MPI_FUNCTION_EVENT_NOTIFICATION:
   1072 		mpt_event_notify_reply(mpt, vmsg);
   1073 		mpt_free_reply(mpt, (reply << 1));
   1074 		break;
   1075 
   1076 	case MPI_FUNCTION_EVENT_ACK:
   1077 		mpt_free_reply(mpt, (reply << 1));
   1078 		break;
   1079 
   1080 	case MPI_FUNCTION_PORT_ENABLE:
   1081 	    {
   1082 		MSG_PORT_ENABLE_REPLY *msg = vmsg;
   1083 		int index = msg->MsgContext & ~0x80000000;
   1084 		if (mpt->verbose > 1)
   1085 			mpt_prt(mpt, "enable port reply index %d", index);
   1086 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
   1087 			request_t *req = &mpt->request_pool[index];
   1088 			req->debug = REQ_DONE;
   1089 		}
   1090 		mpt_free_reply(mpt, (reply << 1));
   1091 		break;
   1092 	    }
   1093 
   1094 	case MPI_FUNCTION_CONFIG:
   1095 	    {
   1096 		MSG_CONFIG_REPLY *msg = vmsg;
   1097 		int index = msg->MsgContext & ~0x80000000;
   1098 		if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
   1099 			request_t *req = &mpt->request_pool[index];
   1100 			req->debug = REQ_DONE;
   1101 			req->sequence = reply;
   1102 		} else
   1103 			mpt_free_reply(mpt, (reply << 1));
   1104 		break;
   1105 	    }
   1106 
   1107 	default:
   1108 		mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
   1109 	}
   1110 }
   1111 
   1112 static void
   1113 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
   1114 {
   1115 
   1116 	switch (msg->Event) {
   1117 	case MPI_EVENT_LOG_DATA:
   1118 	    {
   1119 		int i;
   1120 
   1121 		/* Some error occurrerd that the Fusion wants logged. */
   1122 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
   1123 		mpt_prt(mpt, "EvtLogData: Event Data:");
   1124 		for (i = 0; i < msg->EventDataLength; i++) {
   1125 			if ((i % 4) == 0)
   1126 				printf("%s:\t", mpt->sc_dev.dv_xname);
   1127 			printf("0x%08x%c", msg->Data[i],
   1128 			    ((i % 4) == 3) ? '\n' : ' ');
   1129 		}
   1130 		if ((i % 4) != 0)
   1131 			printf("\n");
   1132 		break;
   1133 	    }
   1134 
   1135 	case MPI_EVENT_UNIT_ATTENTION:
   1136 		mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
   1137 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
   1138 		break;
   1139 
   1140 	case MPI_EVENT_IOC_BUS_RESET:
   1141 		/* We generated a bus reset. */
   1142 		mpt_prt(mpt, "IOC Bus Reset Port %d",
   1143 		    (msg->Data[0] >> 8) & 0xff);
   1144 		break;
   1145 
   1146 	case MPI_EVENT_EXT_BUS_RESET:
   1147 		/* Someone else generated a bus reset. */
   1148 		mpt_prt(mpt, "External Bus Reset");
   1149 		/*
   1150 		 * These replies don't return EventData like the MPI
   1151 		 * spec says they do.
   1152 		 */
   1153 		/* XXX Send an async event? */
   1154 		break;
   1155 
   1156 	case MPI_EVENT_RESCAN:
   1157 		/*
   1158 		 * In general, thise means a device has been added
   1159 		 * to the loop.
   1160 		 */
   1161 		mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
   1162 		/* XXX Send an async event? */
   1163 		break;
   1164 
   1165 	case MPI_EVENT_LINK_STATUS_CHANGE:
   1166 		mpt_prt(mpt, "Port %d: Link state %s",
   1167 		    (msg->Data[1] >> 8) & 0xff,
   1168 		    (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
   1169 		break;
   1170 
   1171 	case MPI_EVENT_LOOP_STATE_CHANGE:
   1172 		switch ((msg->Data[0] >> 16) & 0xff) {
   1173 		case 0x01:
   1174 			mpt_prt(mpt,
   1175 			    "Port %d: FC Link Event: LIP(%02x,%02x) "
   1176 			    "(Loop Initialization)",
   1177 			    (msg->Data[1] >> 8) & 0xff,
   1178 			    (msg->Data[0] >> 8) & 0xff,
   1179 			    (msg->Data[0]     ) & 0xff);
   1180 			switch ((msg->Data[0] >> 8) & 0xff) {
   1181 			case 0xf7:
   1182 				if ((msg->Data[0] & 0xff) == 0xf7)
   1183 					mpt_prt(mpt, "\tDevice needs AL_PA");
   1184 				else
   1185 					mpt_prt(mpt, "\tDevice %02x doesn't "
   1186 					    "like FC performance",
   1187 					    msg->Data[0] & 0xff);
   1188 				break;
   1189 
   1190 			case 0xf8:
   1191 				if ((msg->Data[0] & 0xff) == 0xf7)
   1192 					mpt_prt(mpt, "\tDevice detected loop "
   1193 					    "failure before acquiring AL_PA");
   1194 				else
   1195 					mpt_prt(mpt, "\tDevice %02x detected "
   1196 					    "loop failure",
   1197 					    msg->Data[0] & 0xff);
   1198 				break;
   1199 
   1200 			default:
   1201 				mpt_prt(mpt, "\tDevice %02x requests that "
   1202 				    "device %02x reset itself",
   1203 				    msg->Data[0] & 0xff,
   1204 				    (msg->Data[0] >> 8) & 0xff);
   1205 				break;
   1206 			}
   1207 			break;
   1208 
   1209 		case 0x02:
   1210 			mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
   1211 			    "(Loop Port Enable)",
   1212 			    (msg->Data[1] >> 8) & 0xff,
   1213 			    (msg->Data[0] >> 8) & 0xff,
   1214 			    (msg->Data[0]     ) & 0xff);
   1215 			break;
   1216 
   1217 		case 0x03:
   1218 			mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
   1219 			    "(Loop Port Bypass)",
   1220 			    (msg->Data[1] >> 8) & 0xff,
   1221 			    (msg->Data[0] >> 8) & 0xff,
   1222 			    (msg->Data[0]     ) & 0xff);
   1223 			break;
   1224 
   1225 		default:
   1226 			mpt_prt(mpt, "Port %d: FC Link Event: "
   1227 			    "Unknown event (%02x %02x %02x)",
   1228 			    (msg->Data[1] >>  8) & 0xff,
   1229 			    (msg->Data[0] >> 16) & 0xff,
   1230 			    (msg->Data[0] >>  8) & 0xff,
   1231 			    (msg->Data[0]      ) & 0xff);
   1232 			break;
   1233 		}
   1234 		break;
   1235 
   1236 	case MPI_EVENT_LOGOUT:
   1237 		mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
   1238 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
   1239 		break;
   1240 
   1241 	case MPI_EVENT_EVENT_CHANGE:
   1242 		/*
   1243 		 * This is just an acknowledgement of our
   1244 		 * mpt_send_event_request().
   1245 		 */
   1246 		break;
   1247 
   1248 	default:
   1249 		mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
   1250 		break;
   1251 	}
   1252 
   1253 	if (msg->AckRequired) {
   1254 		MSG_EVENT_ACK *ackp;
   1255 		request_t *req;
   1256 
   1257 		if ((req = mpt_get_request(mpt)) == NULL) {
   1258 			/* XXX XXX XXX XXXJRT */
   1259 			panic("mpt_event_notify_reply: unable to allocate "
   1260 			    "request structure");
   1261 		}
   1262 
   1263 		ackp = (MSG_EVENT_ACK *) req->req_vbuf;
   1264 		memset(ackp, 0, sizeof(*ackp));
   1265 		ackp->Function = MPI_FUNCTION_EVENT_ACK;
   1266 		ackp->Event = msg->Event;
   1267 		ackp->EventContext = msg->EventContext;
   1268 		ackp->MsgContext = req->index | 0x80000000;
   1269 		mpt_check_doorbell(mpt);
   1270 		mpt_send_cmd(mpt, req);
   1271 	}
   1272 }
   1273 
   1274 /* XXXJRT mpt_bus_reset() */
   1275 
   1276 /*****************************************************************************
   1277  * SCSI interface routines
   1278  *****************************************************************************/
   1279 
   1280 static void
   1281 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
   1282     void *arg)
   1283 {
   1284 	struct scsipi_adapter *adapt = chan->chan_adapter;
   1285 	mpt_softc_t *mpt = (void *) adapt->adapt_dev;
   1286 
   1287 	switch (req) {
   1288 	case ADAPTER_REQ_RUN_XFER:
   1289 		mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
   1290 		return;
   1291 
   1292 	case ADAPTER_REQ_GROW_RESOURCES:
   1293 		/* Not supported. */
   1294 		return;
   1295 
   1296 	case ADAPTER_REQ_SET_XFER_MODE:
   1297 		mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
   1298 		return;
   1299 	}
   1300 }
   1301 
   1302 static void
   1303 mpt_minphys(struct buf *bp)
   1304 {
   1305 
   1306 /*
   1307  * Subtract one from the SGL limit, since we need an extra one to handle
   1308  * an non-page-aligned transfer.
   1309  */
   1310 #define	MPT_MAX_XFER	((MPT_SGL_MAX - 1) * PAGE_SIZE)
   1311 
   1312 	if (bp->b_bcount > MPT_MAX_XFER)
   1313 		bp->b_bcount = MPT_MAX_XFER;
   1314 	minphys(bp);
   1315 }
   1316