mpt_netbsd.c revision 1.3 1 /* $NetBSD: mpt_netbsd.c,v 1.3 2003/04/16 23:16:41 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 */
76
77 #include <dev/ic/mpt.h> /* pulls in all headers */
78
79 #include <machine/stdarg.h> /* for mpt_prt() */
80
81 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
82 static void mpt_timeout(void *);
83 static void mpt_done(mpt_softc_t *, uint32_t);
84 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
85 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
86 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
87 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
88 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
89
90 static void mpt_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 static void mpt_minphys(struct buf *);
93
94 void
95 mpt_scsipi_attach(mpt_softc_t *mpt)
96 {
97 struct scsipi_adapter *adapt = &mpt->sc_adapter;
98 struct scsipi_channel *chan = &mpt->sc_channel;
99 int maxq;
100
101 mpt->bus = 0; /* XXX ?? */
102
103 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
104 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
105
106 /* Fill in the scsipi_adapter. */
107 memset(adapt, 0, sizeof(*adapt));
108 adapt->adapt_dev = &mpt->sc_dev;
109 adapt->adapt_nchannels = 1;
110 adapt->adapt_openings = maxq;
111 adapt->adapt_max_periph = maxq;
112 adapt->adapt_request = mpt_scsipi_request;
113 adapt->adapt_minphys = mpt_minphys;
114
115 /* Fill in the scsipi_channel. */
116 memset(chan, 0, sizeof(*chan));
117 chan->chan_adapter = adapt;
118 chan->chan_bustype = &scsi_bustype;
119 chan->chan_channel = 0;
120 chan->chan_flags = 0;
121 chan->chan_nluns = 8;
122 if (mpt->is_fc) {
123 chan->chan_ntargets = 256;
124 chan->chan_id = 256;
125 } else {
126 chan->chan_ntargets = 16;
127 chan->chan_id = mpt->mpt_ini_id;
128 }
129
130 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 bus_dma_segment_t reply_seg, request_seg;
137 int reply_rseg, request_rseg;
138 bus_addr_t pptr, end;
139 caddr_t vptr;
140 size_t len;
141 int error, i;
142
143 /* Check if we have already allocated the reply memory. */
144 if (mpt->reply != NULL)
145 return (0);
146
147 /*
148 * Allocate the request pool. This isn't really DMA'd memory,
149 * but it's a convenient place to do it.
150 */
151 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 if (mpt->request_pool == NULL) {
154 aprint_error("%s: unable to allocate request pool\n",
155 mpt->sc_dev.dv_xname);
156 return (ENOMEM);
157 }
158
159 /*
160 * Allocate DMA resources for reply buffers.
161 */
162 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
163 &reply_seg, 1, &reply_rseg, 0);
164 if (error) {
165 aprint_error("%s: unable to allocate reply area, error = %d\n",
166 mpt->sc_dev.dv_xname, error);
167 goto fail_0;
168 }
169
170 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
171 (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
172 if (error) {
173 aprint_error("%s: unable to map reply area, error = %d\n",
174 mpt->sc_dev.dv_xname, error);
175 goto fail_1;
176 }
177
178 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
179 0, 0, &mpt->reply_dmap);
180 if (error) {
181 aprint_error("%s: unable to create reply DMA map, error = %d\n",
182 mpt->sc_dev.dv_xname, error);
183 goto fail_2;
184 }
185
186 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
187 PAGE_SIZE, NULL, 0);
188 if (error) {
189 aprint_error("%s: unable to load reply DMA map, error = %d\n",
190 mpt->sc_dev.dv_xname, error);
191 goto fail_3;
192 }
193 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
194
195 /*
196 * Allocate DMA resources for request buffers.
197 */
198 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
199 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
200 if (error) {
201 aprint_error("%s: unable to allocate request area, "
202 "error = %d\n", mpt->sc_dev.dv_xname, error);
203 goto fail_4;
204 }
205
206 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
207 MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request, 0);
208 if (error) {
209 aprint_error("%s: unable to map request area, error = %d\n",
210 mpt->sc_dev.dv_xname, error);
211 goto fail_5;
212 }
213
214 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
215 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
216 if (error) {
217 aprint_error("%s: unable to create request DMA map, "
218 "error = %d\n", mpt->sc_dev.dv_xname, error);
219 goto fail_6;
220 }
221
222 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
223 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
224 if (error) {
225 aprint_error("%s: unable to load request DMA map, error = %d\n",
226 mpt->sc_dev.dv_xname, error);
227 goto fail_7;
228 }
229 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
230
231 pptr = mpt->request_phys;
232 vptr = (caddr_t) mpt->request;
233 end = pptr + MPT_REQ_MEM_SIZE(mpt);
234
235 for (i = 0; pptr < end; i++) {
236 request_t *req = &mpt->request_pool[i];
237 req->index = i;
238
239 /* Store location of Request Data */
240 req->req_pbuf = pptr;
241 req->req_vbuf = vptr;
242
243 pptr += MPT_REQUEST_AREA;
244 vptr += MPT_REQUEST_AREA;
245
246 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
247 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
248
249 error = bus_dmamap_create(mpt->sc_dmat, MAXBSIZE,
250 MPT_SGL_MAX, MAXBSIZE, 0, 0, &req->dmap);
251 if (error) {
252 aprint_error("%s: unable to create req %d DMA map, "
253 "error = %d\n", mpt->sc_dev.dv_xname, i, error);
254 goto fail_8;
255 }
256 }
257
258 return (0);
259
260 fail_8:
261 for (--i; i >= 0; i--) {
262 request_t *req = &mpt->request_pool[i];
263 if (req->dmap != NULL)
264 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
265 }
266 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
267 fail_7:
268 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
269 fail_6:
270 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE);
271 fail_5:
272 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
273 fail_4:
274 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
275 fail_3:
276 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
277 fail_2:
278 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE);
279 fail_1:
280 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
281 fail_0:
282 free(mpt->request_pool, M_DEVBUF);
283
284 mpt->reply = NULL;
285 mpt->request = NULL;
286 mpt->request_pool = NULL;
287
288 return (error);
289 }
290
291 int
292 mpt_intr(void *arg)
293 {
294 mpt_softc_t *mpt = arg;
295 int nrepl = 0;
296 uint32_t reply;
297
298 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
299 return (0);
300
301 reply = mpt_pop_reply_queue(mpt);
302 while (reply != MPT_REPLY_EMPTY) {
303 nrepl++;
304 if (mpt->verbose > 1) {
305 if ((reply & MPT_CONTEXT_REPLY) != 0) {
306 /* Address reply; IOC has something to say */
307 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
308 } else {
309 /* Context reply; all went well */
310 mpt_prt(mpt, "context %u reply OK", reply);
311 }
312 }
313 mpt_done(mpt, reply);
314 reply = mpt_pop_reply_queue(mpt);
315 }
316 return (nrepl != 0);
317 }
318
319 void
320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
321 {
322 va_list ap;
323
324 printf("%s: ", mpt->sc_dev.dv_xname);
325 va_start(ap, fmt);
326 vprintf(fmt, ap);
327 va_end(ap);
328 printf("\n");
329 }
330
331 static int
332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
333 {
334
335 /* Timeouts are in msec, so we loop in 1000usec cycles */
336 while (count) {
337 mpt_intr(mpt);
338 if (xs->xs_status & XS_STS_DONE)
339 return (0);
340 delay(1000); /* only happens in boot, so ok */
341 count--;
342 }
343 return (1);
344 }
345
346 static void
347 mpt_timeout(void *arg)
348 {
349 request_t *req = arg;
350 struct scsipi_xfer *xs = req->xfer;
351 struct scsipi_periph *periph = xs->xs_periph;
352 mpt_softc_t *mpt =
353 (void *) periph->periph_channel->chan_adapter->adapt_dev;
354 uint32_t oseq;
355 int s;
356
357 scsipi_printaddr(periph);
358 printf("command timeout\n");
359
360 s = splbio();
361
362 oseq = req->sequence;
363 mpt->timeouts++;
364 if (mpt_intr(mpt)) {
365 if (req->sequence != oseq) {
366 mpt_prt(mpt, "recovered from command timeout");
367 splx(s);
368 return;
369 }
370 }
371 mpt_prt(mpt,
372 "timeout on request index = 0x%x, seq = 0x%08x",
373 req->index, req->sequence);
374 mpt_check_doorbell(mpt);
375 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
376 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
377 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
378 mpt_read(mpt, MPT_OFFSET_DOORBELL));
379 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
380 if (mpt->verbose > 1)
381 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
382
383 xs->error = XS_TIMEOUT;
384 scsipi_done(xs);
385
386 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
387 req->xfer = NULL;
388 mpt_free_request(mpt, req);
389
390 splx(s);
391 }
392
393 static void
394 mpt_done(mpt_softc_t *mpt, uint32_t reply)
395 {
396 struct scsipi_xfer *xs;
397 struct scsipi_periph *periph;
398 int index;
399 request_t *req;
400 MSG_REQUEST_HEADER *mpt_req;
401 MSG_SCSI_IO_REPLY *mpt_reply;
402
403 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
404 /* context reply (ok) */
405 mpt_reply = NULL;
406 index = reply & MPT_CONTEXT_MASK;
407 } else {
408 /* address reply (error) */
409
410 /* XXX BUS_DMASYNC_POSTREAD XXX */
411 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
412 if (mpt->verbose > 1) {
413 uint32_t *pReply = (uint32_t *) mpt_reply;
414
415 mpt_prt(mpt, "Address Reply (index %u):",
416 mpt_reply->MsgContext & 0xffff);
417 mpt_prt(mpt, "%08x %08x %08x %08x",
418 pReply[0], pReply[1], pReply[2], pReply[3]);
419 mpt_prt(mpt, "%08x %08x %08x %08x",
420 pReply[4], pReply[5], pReply[6], pReply[7]);
421 mpt_prt(mpt, "%08x %08x %08x %08x",
422 pReply[8], pReply[9], pReply[10], pReply[11]);
423 }
424 index = mpt_reply->MsgContext;
425 }
426
427 /*
428 * Address reply with MessageContext high bit set.
429 * This is most likely a notify message, so we try
430 * to process it, then free it.
431 */
432 if (__predict_false((index & 0x80000000) != 0)) {
433 if (mpt_reply != NULL)
434 mpt_ctlop(mpt, mpt_reply, reply);
435 else
436 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
437 return;
438 }
439
440 /* Did we end up with a valid index into the table? */
441 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
442 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
443 return;
444 }
445
446 req = &mpt->request_pool[index];
447
448 /* Make sure memory hasn't been trashed. */
449 if (__predict_false(req->index != index)) {
450 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
451 return;
452 }
453
454 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
455 mpt_req = req->req_vbuf;
456
457 /* Short cut for task management replies; nothing more for us to do. */
458 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
459 if (mpt->verbose > 1)
460 mpt_prt(mpt, "mpt_done: TASK MGMT");
461 goto done;
462 }
463
464 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
465 goto done;
466
467 /*
468 * At this point, it had better be a SCSI I/O command, but don't
469 * crash if it isn't.
470 */
471 if (__predict_false(mpt_req->Function !=
472 MPI_FUNCTION_SCSI_IO_REQUEST)) {
473 if (mpt->verbose > 1)
474 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
475 mpt_req->Function, index);
476 goto done;
477 }
478
479 /* Recover scsipi_xfer from the request structure. */
480 xs = req->xfer;
481
482 /* Can't have a SCSI command without a scsipi_xfer. */
483 if (__predict_false(xs == NULL)) {
484 mpt_prt(mpt,
485 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
486 req->index, req->sequence);
487 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
488 mpt_prt(mpt, "mpt_request:");
489 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
490
491 if (mpt_reply != NULL) {
492 mpt_prt(mpt, "mpt_reply:");
493 mpt_print_reply(mpt_reply);
494 } else {
495 mpt_prt(mpt, "context reply: 0x%08x", reply);
496 }
497 goto done;
498 }
499
500 callout_stop(&xs->xs_callout);
501
502 periph = xs->xs_periph;
503
504 /*
505 * If we were a data transfer, unload the map that described
506 * the data buffer.
507 */
508 if (__predict_true(xs->datalen != 0)) {
509 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
510 req->dmap->dm_mapsize,
511 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
512 : BUS_DMASYNC_POSTWRITE);
513 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
514 }
515
516 if (__predict_true(mpt_reply == NULL)) {
517 /*
518 * Context reply; report that the command was
519 * successful!
520 *
521 * Also report the xfer mode, if necessary.
522 */
523 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
524 if ((mpt->mpt_report_xfer_mode &
525 (1 << periph->periph_target)) != 0)
526 mpt_get_xfer_mode(mpt, periph);
527 }
528 xs->error = XS_NOERROR;
529 xs->status = SCSI_OK;
530 xs->resid = 0;
531 scsipi_done(xs);
532 goto done;
533 }
534
535 xs->status = mpt_reply->SCSIStatus;
536 switch (mpt_reply->IOCStatus) {
537 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
538 xs->error = XS_DRIVER_STUFFUP;
539 break;
540
541 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
542 /*
543 * Yikes! Tagged queue full comes through this path!
544 *
545 * So we'll change it to a status error and anything
546 * that returns status should probably be a status
547 * error as well.
548 */
549 xs->resid = xs->datalen - mpt_reply->TransferCount;
550 if (mpt_reply->SCSIState &
551 MPI_SCSI_STATE_NO_SCSI_STATUS) {
552 xs->error = XS_DRIVER_STUFFUP;
553 break;
554 }
555 /* FALLTHROUGH */
556 case MPI_IOCSTATUS_SUCCESS:
557 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
558 switch (xs->status) {
559 case SCSI_OK:
560 /* Report the xfer mode, if necessary. */
561 if ((mpt->mpt_report_xfer_mode &
562 (1 << periph->periph_target)) != 0)
563 mpt_get_xfer_mode(mpt, periph);
564 xs->resid = 0;
565 break;
566
567 case SCSI_CHECK:
568 xs->error = XS_SENSE;
569 break;
570
571 case SCSI_BUSY:
572 case SCSI_QUEUE_FULL:
573 xs->error = XS_BUSY;
574 break;
575
576 default:
577 scsipi_printaddr(periph);
578 printf("invalid status code %d\n", xs->status);
579 xs->error = XS_DRIVER_STUFFUP;
580 break;
581 }
582 break;
583
584 case MPI_IOCSTATUS_BUSY:
585 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
586 xs->error = XS_RESOURCE_SHORTAGE;
587 break;
588
589 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
590 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
591 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
592 xs->error = XS_SELTIMEOUT;
593 break;
594
595 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
596 xs->error = XS_DRIVER_STUFFUP;
597 break;
598
599 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
600 /* XXX What should we do here? */
601 break;
602
603 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
604 /* XXX */
605 xs->error = XS_DRIVER_STUFFUP;
606 break;
607
608 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
609 /* XXX */
610 xs->error = XS_DRIVER_STUFFUP;
611 break;
612
613 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
614 /* XXX This is a bus-reset */
615 xs->error = XS_DRIVER_STUFFUP;
616 break;
617
618 default:
619 /* XXX unrecognized HBA error */
620 xs->error = XS_DRIVER_STUFFUP;
621 break;
622 }
623
624 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
625 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
626 sizeof(xs->sense.scsi_sense));
627 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
628 /*
629 * This will cause the scsipi layer to issue
630 * a REQUEST SENSE.
631 */
632 if (xs->status == SCSI_CHECK)
633 xs->error = XS_BUSY;
634 }
635
636 scsipi_done(xs);
637 done:
638 /* If IOC done with this requeset, free it up. */
639 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
640 mpt_free_request(mpt, req);
641
642 /* If address reply, give the buffer back to the IOC. */
643 if (mpt_reply != NULL)
644 mpt_free_reply(mpt, (reply << 1));
645 }
646
647 static void
648 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
649 {
650 struct scsipi_periph *periph = xs->xs_periph;
651 request_t *req;
652 MSG_SCSI_IO_REQUEST *mpt_req;
653 int error, s;
654
655 s = splbio();
656 req = mpt_get_request(mpt);
657 if (__predict_false(req == NULL)) {
658 /* This should happen very infrequently. */
659 xs->error = XS_RESOURCE_SHORTAGE;
660 scsipi_done(xs);
661 splx(s);
662 return;
663 }
664 splx(s);
665
666 /* Link the req and the scsipi_xfer. */
667 req->xfer = xs;
668
669 /* Now we build the command for the IOC */
670 mpt_req = req->req_vbuf;
671 memset(mpt_req, 0, sizeof(*mpt_req));
672
673 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
674 mpt_req->Bus = mpt->bus;
675
676 mpt_req->SenseBufferLength =
677 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
678 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
679
680 /*
681 * We use the message context to find the request structure when
682 * we get the command completion interrupt from the IOC.
683 */
684 mpt_req->MsgContext = req->index;
685
686 /* Which physical device to do the I/O on. */
687 mpt_req->TargetID = periph->periph_target;
688 mpt_req->LUN[1] = periph->periph_lun;
689
690 /* Set the direction of the transfer. */
691 if (xs->xs_control & XS_CTL_DATA_IN)
692 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
693 else if (xs->xs_control & XS_CTL_DATA_OUT)
694 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
695 else
696 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
697
698 /* Set the queue behavior. */
699 if (__predict_true(mpt->is_fc ||
700 (mpt->mpt_tag_enable &
701 (1 << periph->periph_target)))) {
702 switch (XS_CTL_TAGTYPE(xs)) {
703 case XS_CTL_HEAD_TAG:
704 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
705 break;
706
707 #if 0 /* XXX */
708 case XS_CTL_ACA_TAG:
709 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
710 break;
711 #endif
712
713 case XS_CTL_ORDERED_TAG:
714 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
715 break;
716
717 case XS_CTL_SIMPLE_TAG:
718 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
719 break;
720
721 default:
722 if (mpt->is_fc)
723 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
724 else
725 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
726 break;
727 }
728 } else
729 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
730
731 if (__predict_false(mpt->is_fc == 0 &&
732 (mpt->mpt_disc_enable &
733 (1 << periph->periph_target)) == 0))
734 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
735
736 /* Copy the SCSI command block into place. */
737 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
738
739 mpt_req->CDBLength = xs->cmdlen;
740 mpt_req->DataLength = xs->datalen;
741 mpt_req->SenseBufferLowAddr = req->sense_pbuf;
742
743 /*
744 * Map the DMA transfer.
745 */
746 if (xs->datalen) {
747 SGE_SIMPLE32 *se;
748
749 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
750 xs->datalen, NULL,
751 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
752 : BUS_DMA_WAITOK) |
753 BUS_DMA_STREAMING |
754 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
755 : BUS_DMA_WRITE));
756 switch (error) {
757 case 0:
758 break;
759
760 case ENOMEM:
761 case EAGAIN:
762 xs->error = XS_RESOURCE_SHORTAGE;
763 goto out_bad;
764
765 default:
766 xs->error = XS_DRIVER_STUFFUP;
767 mpt_prt(mpt, "error %d loading DMA map", error);
768 out_bad:
769 s = splbio();
770 mpt_free_request(mpt, req);
771 scsipi_done(xs);
772 splx(s);
773 return;
774 }
775
776 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
777 int seg, i, nleft = req->dmap->dm_nsegs;
778 uint32_t flags;
779 SGE_CHAIN32 *ce;
780
781 seg = 0;
782
783 mpt_req->DataLength = xs->datalen;
784 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
785 if (xs->xs_control & XS_CTL_DATA_OUT)
786 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
787
788 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
789 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
790 i++, se++, seg++) {
791 uint32_t tf;
792
793 memset(se, 0, sizeof(*se));
794 se->Address = req->dmap->dm_segs[seg].ds_addr;
795 MPI_pSGE_SET_LENGTH(se,
796 req->dmap->dm_segs[seg].ds_len);
797 tf = flags;
798 if (i == MPT_NSGL_FIRST(mpt) - 2)
799 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
800 MPI_pSGE_SET_FLAGS(se, tf);
801 nleft--;
802 }
803
804 /*
805 * Tell the IOC where to find the first chain element.
806 */
807 mpt_req->ChainOffset =
808 ((char *)se - (char *)mpt_req) >> 2;
809
810 /*
811 * Until we're finished with all segments...
812 */
813 while (nleft) {
814 int ntodo;
815
816 /*
817 * Construct the chain element that points to
818 * the next segment.
819 */
820 ce = (SGE_CHAIN32 *) se++;
821 if (nleft > MPT_NSGL(mpt)) {
822 ntodo = MPT_NSGL(mpt) - 1;
823 ce->NextChainOffset = (MPT_RQSL(mpt) -
824 sizeof(SGE_SIMPLE32)) >> 2;
825 } else {
826 ntodo = nleft;
827 ce->NextChainOffset = 0;
828 }
829 ce->Length = ntodo * sizeof(SGE_SIMPLE32);
830 ce->Address = req->req_pbuf +
831 ((char *)se - (char *)mpt_req);
832 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
833 for (i = 0; i < ntodo; i++, se++, seg++) {
834 uint32_t tf;
835
836 memset(se, 0, sizeof(*se));
837 se->Address =
838 req->dmap->dm_segs[seg].ds_addr;
839 MPI_pSGE_SET_LENGTH(se,
840 req->dmap->dm_segs[seg].ds_len);
841 tf = flags;
842 if (i == ntodo - 1) {
843 tf |=
844 MPI_SGE_FLAGS_LAST_ELEMENT;
845 if (ce->NextChainOffset == 0) {
846 tf |=
847 MPI_SGE_FLAGS_END_OF_LIST |
848 MPI_SGE_FLAGS_END_OF_BUFFER;
849 }
850 }
851 MPI_pSGE_SET_FLAGS(se, tf);
852 nleft--;
853 }
854 }
855 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
856 req->dmap->dm_mapsize,
857 (xs->xs_control & XS_CTL_DATA_IN) ?
858 BUS_DMASYNC_PREREAD
859 : BUS_DMASYNC_PREWRITE);
860 } else {
861 int i;
862 uint32_t flags;
863
864 mpt_req->DataLength = xs->datalen;
865 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
866 if (xs->xs_control & XS_CTL_DATA_OUT)
867 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
868
869 /* Copy the segments into our SG list. */
870 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
871 for (i = 0; i < req->dmap->dm_nsegs;
872 i++, se++) {
873 uint32_t tf;
874
875 memset(se, 0, sizeof(*se));
876 se->Address = req->dmap->dm_segs[i].ds_addr;
877 MPI_pSGE_SET_LENGTH(se,
878 req->dmap->dm_segs[i].ds_len);
879 tf = flags;
880 if (i == req->dmap->dm_nsegs - 1) {
881 tf |=
882 MPI_SGE_FLAGS_LAST_ELEMENT |
883 MPI_SGE_FLAGS_END_OF_BUFFER |
884 MPI_SGE_FLAGS_END_OF_LIST;
885 }
886 MPI_pSGE_SET_FLAGS(se, tf);
887 }
888 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
889 req->dmap->dm_mapsize,
890 (xs->xs_control & XS_CTL_DATA_IN) ?
891 BUS_DMASYNC_PREREAD
892 : BUS_DMASYNC_PREWRITE);
893 }
894 } else {
895 /*
896 * No data to transfer; just make a single simple SGL
897 * with zero length.
898 */
899 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
900 memset(se, 0, sizeof(*se));
901 MPI_pSGE_SET_FLAGS(se,
902 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
903 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
904 }
905
906 if (mpt->verbose > 1)
907 mpt_print_scsi_io_request(mpt_req);
908
909 s = splbio();
910 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
911 callout_reset(&xs->xs_callout,
912 mstohz(xs->timeout), mpt_timeout, req);
913 mpt_send_cmd(mpt, req);
914 splx(s);
915
916 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
917 return;
918
919 /*
920 * If we can't use interrupts, poll on completion.
921 */
922 if (mpt_poll(mpt, xs, xs->timeout)) {
923 mpt_timeout(req);
924 if (mpt_poll(mpt, xs, xs->timeout))
925 mpt_timeout(req);
926 }
927 }
928
929 static void
930 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
931 {
932 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
933
934 if (mpt->is_fc) {
935 /*
936 * SCSI transport settings don't make any sense for
937 * Fibre Channel; silently ignore the request.
938 */
939 return;
940 }
941
942 /*
943 * Always allow disconnect; we don't have a way to disable
944 * it right now, in any case.
945 */
946 mpt->mpt_disc_enable |= (1 << xm->xm_target);
947
948 if (xm->xm_mode & PERIPH_CAP_TQING)
949 mpt->mpt_tag_enable |= (1 << xm->xm_target);
950 else
951 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
952
953 tmp = mpt->mpt_dev_page1[xm->xm_target];
954
955 /*
956 * Set the wide/narrow parameter for the target.
957 */
958 if (xm->xm_mode & PERIPH_CAP_WIDE16)
959 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
960 else
961 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
962
963 /*
964 * Set the synchronous parameters for the target.
965 *
966 * XXX If we request sync transfers, we just go ahead and
967 * XXX request the maximum available. We need finer control
968 * XXX in order to implement Domain Validation.
969 */
970 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
971 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
972 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
973 MPI_SCSIDEVPAGE1_RP_IU);
974 if (xm->xm_mode & PERIPH_CAP_SYNC) {
975 int factor, offset, np;
976
977 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
978 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
979 np = 0;
980 if (factor < 0x9) {
981 /* Ultra320 */
982 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
983 }
984 if (factor < 0xa) {
985 /* at least Ultra160 */
986 np |= MPI_SCSIDEVPAGE1_RP_DT;
987 }
988 np |= (factor << 8) | (offset << 16);
989 tmp.RequestedParameters |= np;
990 }
991
992 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
993 mpt_prt(mpt, "unable to write Device Page 1");
994 return;
995 }
996
997 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
998 mpt_prt(mpt, "unable to read back Device Page 1");
999 return;
1000 }
1001
1002 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1003 if (mpt->verbose > 1) {
1004 mpt_prt(mpt,
1005 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1006 xm->xm_target,
1007 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1008 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1009 }
1010
1011 /*
1012 * Make a note that we should perform an async callback at the
1013 * end of the next successful command completion to report the
1014 * negotiated transfer mode.
1015 */
1016 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1017 }
1018
1019 static void
1020 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1021 {
1022 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1023 struct scsipi_xfer_mode xm;
1024 int period, offset;
1025
1026 tmp = mpt->mpt_dev_page0[periph->periph_target];
1027 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1028 mpt_prt(mpt, "unable to read Device Page 0");
1029 return;
1030 }
1031
1032 if (mpt->verbose > 1) {
1033 mpt_prt(mpt,
1034 "SPI Tgt %d Page 0: NParms %x Information %x",
1035 periph->periph_target,
1036 tmp.NegotiatedParameters, tmp.Information);
1037 }
1038
1039 xm.xm_target = periph->periph_target;
1040 xm.xm_mode = 0;
1041
1042 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1043 xm.xm_mode |= PERIPH_CAP_WIDE16;
1044
1045 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1046 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1047 if (offset) {
1048 xm.xm_period = period;
1049 xm.xm_offset = offset;
1050 xm.xm_mode |= PERIPH_CAP_SYNC;
1051 }
1052
1053 /*
1054 * Tagged queueing is all controlled by us; there is no
1055 * other setting to query.
1056 */
1057 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1058 xm.xm_mode |= PERIPH_CAP_TQING;
1059
1060 /*
1061 * We're going to deliver the async event, so clear the marker.
1062 */
1063 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1064
1065 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1066 }
1067
1068 static void
1069 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1070 {
1071 MSG_DEFAULT_REPLY *dmsg = vmsg;
1072
1073 switch (dmsg->Function) {
1074 case MPI_FUNCTION_EVENT_NOTIFICATION:
1075 mpt_event_notify_reply(mpt, vmsg);
1076 mpt_free_reply(mpt, (reply << 1));
1077 break;
1078
1079 case MPI_FUNCTION_EVENT_ACK:
1080 mpt_free_reply(mpt, (reply << 1));
1081 break;
1082
1083 case MPI_FUNCTION_PORT_ENABLE:
1084 {
1085 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1086 int index = msg->MsgContext & ~0x80000000;
1087 if (mpt->verbose > 1)
1088 mpt_prt(mpt, "enable port reply index %d", index);
1089 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1090 request_t *req = &mpt->request_pool[index];
1091 req->debug = REQ_DONE;
1092 }
1093 mpt_free_reply(mpt, (reply << 1));
1094 break;
1095 }
1096
1097 case MPI_FUNCTION_CONFIG:
1098 {
1099 MSG_CONFIG_REPLY *msg = vmsg;
1100 int index = msg->MsgContext & ~0x80000000;
1101 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1102 request_t *req = &mpt->request_pool[index];
1103 req->debug = REQ_DONE;
1104 req->sequence = reply;
1105 } else
1106 mpt_free_reply(mpt, (reply << 1));
1107 break;
1108 }
1109
1110 default:
1111 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1112 }
1113 }
1114
1115 static void
1116 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1117 {
1118
1119 switch (msg->Event) {
1120 case MPI_EVENT_LOG_DATA:
1121 {
1122 int i;
1123
1124 /* Some error occurrerd that the Fusion wants logged. */
1125 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1126 mpt_prt(mpt, "EvtLogData: Event Data:");
1127 for (i = 0; i < msg->EventDataLength; i++) {
1128 if ((i % 4) == 0)
1129 printf("%s:\t", mpt->sc_dev.dv_xname);
1130 printf("0x%08x%c", msg->Data[i],
1131 ((i % 4) == 3) ? '\n' : ' ');
1132 }
1133 if ((i % 4) != 0)
1134 printf("\n");
1135 break;
1136 }
1137
1138 case MPI_EVENT_UNIT_ATTENTION:
1139 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1140 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1141 break;
1142
1143 case MPI_EVENT_IOC_BUS_RESET:
1144 /* We generated a bus reset. */
1145 mpt_prt(mpt, "IOC Bus Reset Port %d",
1146 (msg->Data[0] >> 8) & 0xff);
1147 break;
1148
1149 case MPI_EVENT_EXT_BUS_RESET:
1150 /* Someone else generated a bus reset. */
1151 mpt_prt(mpt, "External Bus Reset");
1152 /*
1153 * These replies don't return EventData like the MPI
1154 * spec says they do.
1155 */
1156 /* XXX Send an async event? */
1157 break;
1158
1159 case MPI_EVENT_RESCAN:
1160 /*
1161 * In general, thise means a device has been added
1162 * to the loop.
1163 */
1164 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1165 /* XXX Send an async event? */
1166 break;
1167
1168 case MPI_EVENT_LINK_STATUS_CHANGE:
1169 mpt_prt(mpt, "Port %d: Link state %s",
1170 (msg->Data[1] >> 8) & 0xff,
1171 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1172 break;
1173
1174 case MPI_EVENT_LOOP_STATE_CHANGE:
1175 switch ((msg->Data[0] >> 16) & 0xff) {
1176 case 0x01:
1177 mpt_prt(mpt,
1178 "Port %d: FC Link Event: LIP(%02x,%02x) "
1179 "(Loop Initialization)",
1180 (msg->Data[1] >> 8) & 0xff,
1181 (msg->Data[0] >> 8) & 0xff,
1182 (msg->Data[0] ) & 0xff);
1183 switch ((msg->Data[0] >> 8) & 0xff) {
1184 case 0xf7:
1185 if ((msg->Data[0] & 0xff) == 0xf7)
1186 mpt_prt(mpt, "\tDevice needs AL_PA");
1187 else
1188 mpt_prt(mpt, "\tDevice %02x doesn't "
1189 "like FC performance",
1190 msg->Data[0] & 0xff);
1191 break;
1192
1193 case 0xf8:
1194 if ((msg->Data[0] & 0xff) == 0xf7)
1195 mpt_prt(mpt, "\tDevice detected loop "
1196 "failure before acquiring AL_PA");
1197 else
1198 mpt_prt(mpt, "\tDevice %02x detected "
1199 "loop failure",
1200 msg->Data[0] & 0xff);
1201 break;
1202
1203 default:
1204 mpt_prt(mpt, "\tDevice %02x requests that "
1205 "device %02x reset itself",
1206 msg->Data[0] & 0xff,
1207 (msg->Data[0] >> 8) & 0xff);
1208 break;
1209 }
1210 break;
1211
1212 case 0x02:
1213 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1214 "(Loop Port Enable)",
1215 (msg->Data[1] >> 8) & 0xff,
1216 (msg->Data[0] >> 8) & 0xff,
1217 (msg->Data[0] ) & 0xff);
1218 break;
1219
1220 case 0x03:
1221 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1222 "(Loop Port Bypass)",
1223 (msg->Data[1] >> 8) & 0xff,
1224 (msg->Data[0] >> 8) & 0xff,
1225 (msg->Data[0] ) & 0xff);
1226 break;
1227
1228 default:
1229 mpt_prt(mpt, "Port %d: FC Link Event: "
1230 "Unknown event (%02x %02x %02x)",
1231 (msg->Data[1] >> 8) & 0xff,
1232 (msg->Data[0] >> 16) & 0xff,
1233 (msg->Data[0] >> 8) & 0xff,
1234 (msg->Data[0] ) & 0xff);
1235 break;
1236 }
1237 break;
1238
1239 case MPI_EVENT_LOGOUT:
1240 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1241 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1242 break;
1243
1244 case MPI_EVENT_EVENT_CHANGE:
1245 /*
1246 * This is just an acknowledgement of our
1247 * mpt_send_event_request().
1248 */
1249 break;
1250
1251 default:
1252 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1253 break;
1254 }
1255
1256 if (msg->AckRequired) {
1257 MSG_EVENT_ACK *ackp;
1258 request_t *req;
1259
1260 if ((req = mpt_get_request(mpt)) == NULL) {
1261 /* XXX XXX XXX XXXJRT */
1262 panic("mpt_event_notify_reply: unable to allocate "
1263 "request structure");
1264 }
1265
1266 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1267 memset(ackp, 0, sizeof(*ackp));
1268 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1269 ackp->Event = msg->Event;
1270 ackp->EventContext = msg->EventContext;
1271 ackp->MsgContext = req->index | 0x80000000;
1272 mpt_check_doorbell(mpt);
1273 mpt_send_cmd(mpt, req);
1274 }
1275 }
1276
1277 /* XXXJRT mpt_bus_reset() */
1278
1279 /*****************************************************************************
1280 * SCSI interface routines
1281 *****************************************************************************/
1282
1283 static void
1284 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1285 void *arg)
1286 {
1287 struct scsipi_adapter *adapt = chan->chan_adapter;
1288 mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1289
1290 switch (req) {
1291 case ADAPTER_REQ_RUN_XFER:
1292 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1293 return;
1294
1295 case ADAPTER_REQ_GROW_RESOURCES:
1296 /* Not supported. */
1297 return;
1298
1299 case ADAPTER_REQ_SET_XFER_MODE:
1300 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1301 return;
1302 }
1303 }
1304
1305 static void
1306 mpt_minphys(struct buf *bp)
1307 {
1308
1309 /*
1310 * Subtract one from the SGL limit, since we need an extra one to handle
1311 * an non-page-aligned transfer.
1312 */
1313 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1314
1315 if (bp->b_bcount > MPT_MAX_XFER)
1316 bp->b_bcount = MPT_MAX_XFER;
1317 minphys(bp);
1318 }
1319