mpt_netbsd.c revision 1.1 1 /* $NetBSD: mpt_netbsd.c,v 1.1 2003/04/16 22:03:00 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 */
76
77 #include <dev/ic/mpt.h> /* pulls in all headers */
78
79 #include <machine/stdarg.h> /* for mpt_prt() */
80
81 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
82 static void mpt_timeout(void *);
83 static void mpt_done(mpt_softc_t *, uint32_t);
84 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
85 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
86 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
87 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
88 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
89
90 static void mpt_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 static void mpt_minphys(struct buf *);
93
94 void
95 mpt_scsipi_attach(mpt_softc_t *mpt)
96 {
97 struct scsipi_adapter *adapt = &mpt->sc_adapter;
98 struct scsipi_channel *chan = &mpt->sc_channel;
99 int maxq;
100
101 mpt->bus = 0; /* XXX ?? */
102
103 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
104 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
105
106 /* Fill in the scsipi_adapter. */
107 memset(adapt, 0, sizeof(*adapt));
108 adapt->adapt_dev = &mpt->sc_dev;
109 adapt->adapt_nchannels = 1;
110 adapt->adapt_openings = maxq;
111 adapt->adapt_max_periph = maxq;
112 adapt->adapt_request = mpt_scsipi_request;
113 adapt->adapt_minphys = mpt_minphys;
114
115 /* Fill in the scsipi_channel. */
116 memset(chan, 0, sizeof(*chan));
117 chan->chan_adapter = adapt;
118 chan->chan_bustype = &scsi_bustype;
119 chan->chan_channel = 0;
120 chan->chan_flags = 0;
121 chan->chan_nluns = 8;
122 if (mpt->is_fc) {
123 chan->chan_ntargets = 256;
124 chan->chan_id = 256;
125 } else {
126 chan->chan_ntargets = 16;
127 chan->chan_id = mpt->mpt_ini_id;
128 }
129
130 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 bus_dma_segment_t reply_seg, request_seg;
137 int reply_rseg, request_rseg;
138 bus_addr_t pptr, end;
139 caddr_t vptr;
140 size_t len;
141 int error, i;
142
143 /* Check if we have already allocated the reply memory. */
144 if (mpt->reply != NULL)
145 return (0);
146
147 /*
148 * Allocate the request pool. This isn't really DMA'd memory,
149 * but it's a convenient place to do it.
150 */
151 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 if (mpt->request_pool == NULL) {
154 aprint_error("%s: unable to allocate request pool\n",
155 mpt->sc_dev.dv_xname);
156 return (ENOMEM);
157 }
158
159 /*
160 * Allocate DMA resources for reply buffers.
161 */
162 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
163 &reply_seg, 1, &reply_rseg, 0);
164 if (error) {
165 aprint_error("%s: unable to allocate reply area, error = %d\n",
166 mpt->sc_dev.dv_xname, error);
167 goto fail_0;
168 }
169
170 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
171 (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
172 if (error) {
173 aprint_error("%s: unable to map reply area, error = %d\n",
174 mpt->sc_dev.dv_xname, error);
175 goto fail_1;
176 }
177
178 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
179 0, 0, &mpt->reply_dmap);
180 if (error) {
181 aprint_error("%s: unable to create reply DMA map, error = %d\n",
182 mpt->sc_dev.dv_xname, error);
183 goto fail_2;
184 }
185
186 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
187 PAGE_SIZE, NULL, 0);
188 if (error) {
189 aprint_error("%s: unable to load reply DMA map, error = %d\n",
190 mpt->sc_dev.dv_xname, error);
191 goto fail_3;
192 }
193 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
194
195 /*
196 * Allocate DMA resources for request buffers.
197 */
198 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
199 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
200 if (error) {
201 aprint_error("%s: unable to allocate request area, "
202 "error = %d\n", mpt->sc_dev.dv_xname, error);
203 goto fail_4;
204 }
205
206 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
207 MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request,
208 BUS_DMA_COHERENT/*XXX*/);
209 if (error) {
210 aprint_error("%s: unable to map request area, error = %d\n",
211 mpt->sc_dev.dv_xname, error);
212 goto fail_5;
213 }
214
215 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
216 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
217 if (error) {
218 aprint_error("%s: unable to create request DMA map, "
219 "error = %d\n", mpt->sc_dev.dv_xname, error);
220 goto fail_6;
221 }
222
223 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
224 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
225 if (error) {
226 aprint_error("%s: unable to load request DMA map, error = %d\n",
227 mpt->sc_dev.dv_xname, error);
228 goto fail_7;
229 }
230 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
231
232 pptr = mpt->request_phys;
233 vptr = (caddr_t) mpt->request;
234 end = pptr + MPT_REQ_MEM_SIZE(mpt);
235
236 for (i = 0; pptr < end; i++) {
237 request_t *req = &mpt->request_pool[i];
238 req->index = i;
239
240 /* Store location of Request Data */
241 req->req_pbuf = pptr;
242 req->req_vbuf = vptr;
243
244 pptr += MPT_REQUEST_AREA;
245 vptr += MPT_REQUEST_AREA;
246
247 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
248 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
249
250 error = bus_dmamap_create(mpt->sc_dmat, MAXBSIZE,
251 MPT_SGL_MAX, MAXBSIZE, 0, 0, &req->dmap);
252 if (error) {
253 aprint_error("%s: unable to create req %d DMA map, "
254 "error = %d\n", mpt->sc_dev.dv_xname, i, error);
255 goto fail_8;
256 }
257 }
258
259 return (0);
260
261 fail_8:
262 for (--i; i >= 0; i--) {
263 request_t *req = &mpt->request_pool[i];
264 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
265 }
266 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
267 fail_7:
268 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
269 fail_6:
270 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE);
271 fail_5:
272 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
273 fail_4:
274 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
275 fail_3:
276 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
277 fail_2:
278 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE);
279 fail_1:
280 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
281 fail_0:
282 free(mpt->request_pool, M_DEVBUF);
283
284 mpt->reply = NULL;
285 mpt->request = NULL;
286 mpt->request_pool = NULL;
287
288 return (error);
289 }
290
291 int
292 mpt_intr(void *arg)
293 {
294 mpt_softc_t *mpt = arg;
295 int nrepl = 0;
296 uint32_t reply;
297
298 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
299 return (0);
300
301 reply = mpt_pop_reply_queue(mpt);
302 while (reply != MPT_REPLY_EMPTY) {
303 nrepl++;
304 if (mpt->verbose > 1) {
305 if ((reply & MPT_CONTEXT_REPLY) != 0) {
306 /* Address reply; IOC has something to say */
307 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
308 } else {
309 /* Context reply; all went well */
310 mpt_prt(mpt, "context %u reply OK", reply);
311 }
312 }
313 mpt_done(mpt, reply);
314 reply = mpt_pop_reply_queue(mpt);
315 }
316 return (nrepl != 0);
317 }
318
319 void
320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
321 {
322 va_list ap;
323
324 printf("%s: ", mpt->sc_dev.dv_xname);
325 va_start(ap, fmt);
326 vprintf(fmt, ap);
327 va_end(ap);
328 printf("\n");
329 }
330
331 static int
332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
333 {
334
335 /* Timeouts are in msec, so we loop in 1000usec cycles */
336 while (count) {
337 mpt_intr(mpt);
338 if (xs->xs_status & XS_STS_DONE)
339 return (0);
340 delay(1000); /* only happens in boot, so ok */
341 count--;
342 }
343 return (1);
344 }
345
346 static void
347 mpt_timeout(void *arg)
348 {
349 request_t *req = arg;
350 struct scsipi_xfer *xs = req->xfer;
351 struct scsipi_periph *periph = xs->xs_periph;
352 mpt_softc_t *mpt =
353 (void *) periph->periph_channel->chan_adapter->adapt_dev;
354 uint32_t oseq;
355 int s;
356
357 scsipi_printaddr(periph);
358 printf("command timeout\n");
359
360 s = splbio();
361
362 oseq = req->sequence;
363 mpt->timeouts++;
364 if (mpt_intr(mpt)) {
365 if (req->sequence != oseq) {
366 mpt_prt(mpt, "recovered from command timeout");
367 splx(s);
368 return;
369 }
370 }
371 mpt_prt(mpt,
372 "timeout on request index = 0x%x, seq = 0x%08x",
373 req->index, req->sequence);
374 mpt_check_doorbell(mpt);
375 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
376 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
377 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
378 mpt_read(mpt, MPT_OFFSET_DOORBELL));
379 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
380 if (mpt->verbose > 1)
381 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
382
383 xs->error = XS_TIMEOUT;
384 scsipi_done(xs);
385
386 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
387 req->xfer = NULL;
388 mpt_free_request(mpt, req);
389
390 splx(s);
391 }
392
393 static void
394 mpt_done(mpt_softc_t *mpt, uint32_t reply)
395 {
396 struct scsipi_xfer *xs;
397 struct scsipi_periph *periph;
398 int index;
399 request_t *req;
400 MSG_REQUEST_HEADER *mpt_req;
401 MSG_SCSI_IO_REPLY *mpt_reply;
402
403 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
404 /* context reply (ok) */
405 mpt_reply = NULL;
406 index = reply & MPT_CONTEXT_MASK;
407 } else {
408 /* address reply (error) */
409
410 /* XXX BUS_DMASYNC_POSTREAD XXX */
411 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
412 if (mpt->verbose > 1) {
413 uint32_t *pReply = (uint32_t *) mpt_reply;
414
415 mpt_prt(mpt, "Address Reply (index %u):",
416 mpt_reply->MsgContext & 0xffff);
417 mpt_prt(mpt, "%08x %08x %08x %08x",
418 pReply[0], pReply[1], pReply[2], pReply[3]);
419 mpt_prt(mpt, "%08x %08x %08x %08x",
420 pReply[4], pReply[5], pReply[6], pReply[7]);
421 mpt_prt(mpt, "%08x %08x %08x %08x",
422 pReply[8], pReply[9], pReply[10], pReply[11]);
423 }
424 index = mpt_reply->MsgContext;
425 }
426
427 /*
428 * Address reply with MessageContext high bit set.
429 * This is most likely a notify message, so we try
430 * to process it, then free it.
431 */
432 if (__predict_false((index & 0x80000000) != 0)) {
433 if (mpt_reply != NULL)
434 mpt_ctlop(mpt, mpt_reply, reply);
435 else
436 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
437 return;
438 }
439
440 /* Did we end up with a valid index into the table? */
441 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
442 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
443 return;
444 }
445
446 req = &mpt->request_pool[index];
447
448 /* Make sure memory hasn't been trashed. */
449 if (__predict_false(req->index != index)) {
450 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
451 return;
452 }
453
454 mpt_req = req->req_vbuf;
455
456 /* Short cut for task management replies; nothing more for us to do. */
457 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
458 if (mpt->verbose > 1)
459 mpt_prt(mpt, "mpt_done: TASK MGMT");
460 goto done;
461 }
462
463 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
464 goto done;
465
466 /*
467 * At this point, it had better be a SCSI I/O command, but don't
468 * crash if it isn't.
469 */
470 if (__predict_false(mpt_req->Function !=
471 MPI_FUNCTION_SCSI_IO_REQUEST)) {
472 if (mpt->verbose > 1)
473 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
474 mpt_req->Function, index);
475 goto done;
476 }
477
478 /* Recover scsipi_xfer from the request structure. */
479 xs = req->xfer;
480
481 /* Can't have a SCSI command without a scsipi_xfer. */
482 if (__predict_false(xs == NULL)) {
483 mpt_prt(mpt,
484 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
485 req->index, req->sequence);
486 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
487 mpt_prt(mpt, "mpt_request:");
488 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
489
490 if (mpt_reply != NULL) {
491 mpt_prt(mpt, "mpt_reply:");
492 mpt_print_reply(mpt_reply);
493 } else {
494 mpt_prt(mpt, "context reply: 0x%08x", reply);
495 }
496 goto done;
497 }
498
499 callout_stop(&xs->xs_callout);
500
501 periph = xs->xs_periph;
502
503 /*
504 * If we were a data transfer, unload the map that described
505 * the data buffer.
506 */
507 if (__predict_true(xs->datalen != 0)) {
508 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
509 req->dmap->dm_mapsize,
510 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
511 : BUS_DMASYNC_POSTWRITE);
512 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
513 }
514
515 if (__predict_true(mpt_reply == NULL)) {
516 /*
517 * Context reply; report that the command was
518 * successful!
519 *
520 * Also report the xfer mode, if necessary.
521 */
522 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
523 if ((mpt->mpt_report_xfer_mode &
524 (1 << periph->periph_target)) != 0)
525 mpt_get_xfer_mode(mpt, periph);
526 }
527 xs->error = XS_NOERROR;
528 xs->status = SCSI_OK;
529 xs->resid = 0;
530 scsipi_done(xs);
531 goto done;
532 }
533
534 xs->status = mpt_reply->SCSIStatus;
535 switch (mpt_reply->IOCStatus) {
536 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
537 xs->error = XS_DRIVER_STUFFUP;
538 break;
539
540 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
541 /*
542 * Yikes! Tagged queue full comes through this path!
543 *
544 * So we'll change it to a status error and anything
545 * that returns status should probably be a status
546 * error as well.
547 */
548 xs->resid = xs->datalen - mpt_reply->TransferCount;
549 if (mpt_reply->SCSIState &
550 MPI_SCSI_STATE_NO_SCSI_STATUS) {
551 xs->error = XS_DRIVER_STUFFUP;
552 break;
553 }
554 /* FALLTHROUGH */
555 case MPI_IOCSTATUS_SUCCESS:
556 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
557 switch (xs->status) {
558 case SCSI_OK:
559 /* Report the xfer mode, if necessary. */
560 if ((mpt->mpt_report_xfer_mode &
561 (1 << periph->periph_target)) != 0)
562 mpt_get_xfer_mode(mpt, periph);
563 xs->resid = 0;
564 break;
565
566 case SCSI_CHECK:
567 xs->error = XS_SENSE;
568 break;
569
570 case SCSI_BUSY:
571 case SCSI_QUEUE_FULL:
572 xs->error = XS_BUSY;
573 break;
574
575 default:
576 scsipi_printaddr(periph);
577 printf("invalid status code %d\n", xs->status);
578 xs->error = XS_DRIVER_STUFFUP;
579 break;
580 }
581 break;
582
583 case MPI_IOCSTATUS_BUSY:
584 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
585 xs->error = XS_RESOURCE_SHORTAGE;
586 break;
587
588 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
589 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
590 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
591 xs->error = XS_SELTIMEOUT;
592 break;
593
594 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
595 xs->error = XS_DRIVER_STUFFUP;
596 break;
597
598 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
599 /* XXX What should we do here? */
600 break;
601
602 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
603 /* XXX */
604 xs->error = XS_DRIVER_STUFFUP;
605 break;
606
607 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
608 /* XXX */
609 xs->error = XS_DRIVER_STUFFUP;
610 break;
611
612 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
613 /* XXX This is a bus-reset */
614 xs->error = XS_DRIVER_STUFFUP;
615 break;
616
617 default:
618 /* XXX unrecognized HBA error */
619 xs->error = XS_DRIVER_STUFFUP;
620 break;
621 }
622
623 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
624 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
625 sizeof(xs->sense.scsi_sense));
626 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
627 /*
628 * This will cause the scsipi layer to issue
629 * a REQUEST SENSE.
630 */
631 if (xs->status == SCSI_CHECK)
632 xs->error = XS_BUSY;
633 }
634
635 scsipi_done(xs);
636 done:
637 /* If IOC done with this requeset, free it up. */
638 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
639 mpt_free_request(mpt, req);
640
641 /* If address reply, give the buffer back to the IOC. */
642 if (mpt_reply != NULL)
643 mpt_free_reply(mpt, (reply << 1));
644 }
645
646 static void
647 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
648 {
649 struct scsipi_periph *periph = xs->xs_periph;
650 request_t *req;
651 MSG_SCSI_IO_REQUEST *mpt_req;
652 int error, s;
653
654 s = splbio();
655 req = mpt_get_request(mpt);
656 if (__predict_false(req == NULL)) {
657 /* This should happen very infrequently. */
658 xs->error = XS_RESOURCE_SHORTAGE;
659 scsipi_done(xs);
660 splx(s);
661 return;
662 }
663 splx(s);
664
665 /* Link the req and the scsipi_xfer. */
666 req->xfer = xs;
667
668 /* Now we build the command for the IOC */
669 mpt_req = req->req_vbuf;
670 memset(mpt_req, 0, sizeof(*mpt_req));
671
672 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
673 mpt_req->Bus = mpt->bus;
674
675 mpt_req->SenseBufferLength =
676 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
677 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
678
679 /*
680 * We use the message context to find the request structure when
681 * we get the command completion interrupt from the IOC.
682 */
683 mpt_req->MsgContext = req->index;
684
685 /* Which physical device to do the I/O on. */
686 mpt_req->TargetID = periph->periph_target;
687 mpt_req->LUN[1] = periph->periph_lun;
688
689 /* Set the direction of the transfer. */
690 if (xs->xs_control & XS_CTL_DATA_IN)
691 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
692 else if (xs->xs_control & XS_CTL_DATA_OUT)
693 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
694 else
695 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
696
697 /* Set the queue behavior. */
698 if (__predict_true(mpt->is_fc ||
699 (mpt->mpt_tag_enable &
700 (1 << periph->periph_target)))) {
701 switch (XS_CTL_TAGTYPE(xs)) {
702 case XS_CTL_HEAD_TAG:
703 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
704 break;
705
706 #if 0 /* XXX */
707 case XS_CTL_ACA_TAG:
708 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
709 break;
710 #endif
711
712 case XS_CTL_ORDERED_TAG:
713 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
714 break;
715
716 case XS_CTL_SIMPLE_TAG:
717 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
718 break;
719
720 default:
721 if (mpt->is_fc)
722 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
723 else
724 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
725 break;
726 }
727 } else
728 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
729
730 if (__predict_false(mpt->is_fc == 0 &&
731 (mpt->mpt_disc_enable &
732 (1 << periph->periph_target)) == 0))
733 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
734
735 /* Copy the SCSI command block into place. */
736 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
737
738 mpt_req->CDBLength = xs->cmdlen;
739 mpt_req->DataLength = xs->datalen;
740 mpt_req->SenseBufferLowAddr = req->sense_pbuf;
741
742 /*
743 * Map the DMA transfer.
744 */
745 if (xs->datalen) {
746 SGE_SIMPLE32 *se;
747
748 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
749 xs->datalen, NULL,
750 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
751 : BUS_DMA_WAITOK) |
752 BUS_DMA_STREAMING |
753 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
754 : BUS_DMA_WRITE));
755 switch (error) {
756 case 0:
757 break;
758
759 case ENOMEM:
760 case EAGAIN:
761 xs->error = XS_RESOURCE_SHORTAGE;
762 goto out_bad;
763
764 default:
765 xs->error = XS_DRIVER_STUFFUP;
766 mpt_prt(mpt, "error %d loading DMA map", error);
767 out_bad:
768 s = splbio();
769 mpt_free_request(mpt, req);
770 scsipi_done(xs);
771 splx(s);
772 return;
773 }
774
775 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
776 int seg, i, nleft = req->dmap->dm_nsegs;
777 uint32_t flags;
778 SGE_CHAIN32 *ce;
779
780 seg = 0;
781
782 mpt_req->DataLength = xs->datalen;
783 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
784 if (xs->xs_control & XS_CTL_DATA_OUT)
785 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
786
787 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
788 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
789 i++, se++, seg++) {
790 uint32_t tf;
791
792 memset(se, 0, sizeof(*se));
793 se->Address = req->dmap->dm_segs[seg].ds_addr;
794 MPI_pSGE_SET_LENGTH(se,
795 req->dmap->dm_segs[seg].ds_len);
796 tf = flags;
797 if (i == MPT_NSGL_FIRST(mpt) - 2)
798 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
799 MPI_pSGE_SET_FLAGS(se, tf);
800 nleft--;
801 }
802
803 /*
804 * Tell the IOC where to find the first chain element.
805 */
806 mpt_req->ChainOffset =
807 ((char *)se - (char *)mpt_req) >> 2;
808
809 /*
810 * Until we're finished with all segments...
811 */
812 while (nleft) {
813 int ntodo;
814
815 /*
816 * Construct the chain element that points to
817 * the next segment.
818 */
819 ce = (SGE_CHAIN32 *) se++;
820 if (nleft > MPT_NSGL(mpt)) {
821 ntodo = MPT_NSGL(mpt) - 1;
822 ce->NextChainOffset = (MPT_RQSL(mpt) -
823 sizeof(SGE_SIMPLE32)) >> 2;
824 } else {
825 ntodo = nleft;
826 ce->NextChainOffset = 0;
827 }
828 ce->Length = ntodo * sizeof(SGE_SIMPLE32);
829 ce->Address = req->req_pbuf +
830 ((char *)se - (char *)mpt_req);
831 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
832 for (i = 0; i < ntodo; i++, se++, seg++) {
833 uint32_t tf;
834
835 memset(se, 0, sizeof(*se));
836 se->Address =
837 req->dmap->dm_segs[seg].ds_addr;
838 MPI_pSGE_SET_LENGTH(se,
839 req->dmap->dm_segs[seg].ds_len);
840 tf = flags;
841 if (i == ntodo - 1) {
842 tf |=
843 MPI_SGE_FLAGS_LAST_ELEMENT;
844 if (ce->NextChainOffset == 0) {
845 tf |=
846 MPI_SGE_FLAGS_END_OF_LIST |
847 MPI_SGE_FLAGS_END_OF_BUFFER;
848 }
849 }
850 MPI_pSGE_SET_FLAGS(se, tf);
851 nleft--;
852 }
853 }
854 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
855 req->dmap->dm_mapsize,
856 (xs->xs_control & XS_CTL_DATA_IN) ?
857 BUS_DMASYNC_PREREAD
858 : BUS_DMASYNC_PREWRITE);
859 } else {
860 int i;
861 uint32_t flags;
862
863 mpt_req->DataLength = xs->datalen;
864 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
865 if (xs->xs_control & XS_CTL_DATA_OUT)
866 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
867
868 /* Copy the segments into our SG list. */
869 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
870 for (i = 0; i < req->dmap->dm_nsegs;
871 i++, se++) {
872 uint32_t tf;
873
874 memset(se, 0, sizeof(*se));
875 se->Address = req->dmap->dm_segs[i].ds_addr;
876 MPI_pSGE_SET_LENGTH(se,
877 req->dmap->dm_segs[i].ds_len);
878 tf = flags;
879 if (i == req->dmap->dm_nsegs - 1) {
880 tf |=
881 MPI_SGE_FLAGS_LAST_ELEMENT |
882 MPI_SGE_FLAGS_END_OF_BUFFER |
883 MPI_SGE_FLAGS_END_OF_LIST;
884 }
885 MPI_pSGE_SET_FLAGS(se, tf);
886 }
887 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
888 req->dmap->dm_mapsize,
889 (xs->xs_control & XS_CTL_DATA_IN) ?
890 BUS_DMASYNC_PREREAD
891 : BUS_DMASYNC_PREWRITE);
892 }
893 } else {
894 /*
895 * No data to transfer; just make a single simple SGL
896 * with zero length.
897 */
898 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
899 memset(se, 0, sizeof(*se));
900 MPI_pSGE_SET_FLAGS(se,
901 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
902 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
903 }
904
905 if (mpt->verbose > 1)
906 mpt_print_scsi_io_request(mpt_req);
907
908 s = splbio();
909 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
910 callout_reset(&xs->xs_callout,
911 mstohz(xs->timeout), mpt_timeout, req);
912 mpt_send_cmd(mpt, req);
913 splx(s);
914
915 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
916 return;
917
918 /*
919 * If we can't use interrupts, poll on completion.
920 */
921 if (mpt_poll(mpt, xs, xs->timeout)) {
922 mpt_timeout(req);
923 if (mpt_poll(mpt, xs, xs->timeout))
924 mpt_timeout(req);
925 }
926 }
927
928 static void
929 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
930 {
931 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
932
933 if (mpt->is_fc) {
934 /*
935 * SCSI transport settings don't make any sense for
936 * Fibre Channel; silently ignore the request.
937 */
938 return;
939 }
940
941 /*
942 * Always allow disconnect; we don't have a way to disable
943 * it right now, in any case.
944 */
945 mpt->mpt_disc_enable |= (1 << xm->xm_target);
946
947 if (xm->xm_mode & PERIPH_CAP_TQING)
948 mpt->mpt_tag_enable |= (1 << xm->xm_target);
949 else
950 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
951
952 tmp = mpt->mpt_dev_page1[xm->xm_target];
953
954 /*
955 * Set the wide/narrow parameter for the target.
956 */
957 if (xm->xm_mode & PERIPH_CAP_WIDE16)
958 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
959 else
960 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
961
962 /*
963 * Set the synchronous parameters for the target.
964 *
965 * XXX If we request sync transfers, we just go ahead and
966 * XXX request the maximum available. We need finer control
967 * XXX in order to implement Domain Validation.
968 */
969 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
970 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
971 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
972 MPI_SCSIDEVPAGE1_RP_IU);
973 if (xm->xm_mode & PERIPH_CAP_SYNC) {
974 int factor, offset, np;
975
976 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
977 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
978 np = 0;
979 if (factor < 0x9) {
980 /* Ultra320 */
981 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
982 }
983 if (factor < 0xa) {
984 /* at least Ultra160 */
985 np |= MPI_SCSIDEVPAGE1_RP_DT;
986 }
987 np |= (factor << 8) | (offset << 16);
988 tmp.RequestedParameters |= np;
989 }
990
991 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
992 mpt_prt(mpt, "unable to write Device Page 1");
993 return;
994 }
995
996 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
997 mpt_prt(mpt, "unable to read back Device Page 1");
998 return;
999 }
1000
1001 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1002 if (mpt->verbose > 1) {
1003 mpt_prt(mpt,
1004 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1005 xm->xm_target,
1006 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1007 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1008 }
1009
1010 /*
1011 * Make a note that we should perform an async callback at the
1012 * end of the next successful command completion to report the
1013 * negotiated transfer mode.
1014 */
1015 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1016 }
1017
1018 static void
1019 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1020 {
1021 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1022 struct scsipi_xfer_mode xm;
1023 int period, offset;
1024
1025 tmp = mpt->mpt_dev_page0[periph->periph_target];
1026 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1027 mpt_prt(mpt, "unable to read Device Page 0");
1028 return;
1029 }
1030
1031 if (mpt->verbose > 1) {
1032 mpt_prt(mpt,
1033 "SPI Tgt %d Page 0: NParms %x Information %x",
1034 periph->periph_target,
1035 tmp.NegotiatedParameters, tmp.Information);
1036 }
1037
1038 xm.xm_target = periph->periph_target;
1039 xm.xm_mode = 0;
1040
1041 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1042 xm.xm_mode |= PERIPH_CAP_WIDE16;
1043
1044 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1045 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1046 if (offset) {
1047 xm.xm_period = period;
1048 xm.xm_offset = offset;
1049 xm.xm_mode |= PERIPH_CAP_SYNC;
1050 }
1051
1052 /*
1053 * Tagged queueing is all controlled by us; there is no
1054 * other setting to query.
1055 */
1056 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1057 xm.xm_mode |= PERIPH_CAP_TQING;
1058
1059 /*
1060 * We're going to deliver the async event, so clear the marker.
1061 */
1062 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1063
1064 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1065 }
1066
1067 static void
1068 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1069 {
1070 MSG_DEFAULT_REPLY *dmsg = vmsg;
1071
1072 switch (dmsg->Function) {
1073 case MPI_FUNCTION_EVENT_NOTIFICATION:
1074 mpt_event_notify_reply(mpt, vmsg);
1075 mpt_free_reply(mpt, (reply << 1));
1076 break;
1077
1078 case MPI_FUNCTION_EVENT_ACK:
1079 mpt_free_reply(mpt, (reply << 1));
1080 break;
1081
1082 case MPI_FUNCTION_PORT_ENABLE:
1083 {
1084 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1085 int index = msg->MsgContext & ~0x80000000;
1086 if (mpt->verbose > 1)
1087 mpt_prt(mpt, "enable port reply index %d", index);
1088 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1089 request_t *req = &mpt->request_pool[index];
1090 req->debug = REQ_DONE;
1091 }
1092 mpt_free_reply(mpt, (reply << 1));
1093 break;
1094 }
1095
1096 case MPI_FUNCTION_CONFIG:
1097 {
1098 MSG_CONFIG_REPLY *msg = vmsg;
1099 int index = msg->MsgContext & ~0x80000000;
1100 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1101 request_t *req = &mpt->request_pool[index];
1102 req->debug = REQ_DONE;
1103 req->sequence = reply;
1104 } else
1105 mpt_free_reply(mpt, (reply << 1));
1106 break;
1107 }
1108
1109 default:
1110 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1111 }
1112 }
1113
1114 static void
1115 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1116 {
1117
1118 switch (msg->Event) {
1119 case MPI_EVENT_LOG_DATA:
1120 {
1121 int i;
1122
1123 /* Some error occurrerd that the Fusion wants logged. */
1124 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1125 mpt_prt(mpt, "EvtLogData: Event Data:");
1126 for (i = 0; i < msg->EventDataLength; i++) {
1127 if ((i % 4) == 0)
1128 printf("%s:\t", mpt->sc_dev.dv_xname);
1129 printf("0x%08x%c", msg->Data[i],
1130 ((i % 4) == 3) ? '\n' : ' ');
1131 }
1132 if ((i % 4) != 0)
1133 printf("\n");
1134 break;
1135 }
1136
1137 case MPI_EVENT_UNIT_ATTENTION:
1138 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1139 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1140 break;
1141
1142 case MPI_EVENT_IOC_BUS_RESET:
1143 /* We generated a bus reset. */
1144 mpt_prt(mpt, "IOC Bus Reset Port %d",
1145 (msg->Data[0] >> 8) & 0xff);
1146 break;
1147
1148 case MPI_EVENT_EXT_BUS_RESET:
1149 /* Someone else generated a bus reset. */
1150 mpt_prt(mpt, "External Bus Reset");
1151 /*
1152 * These replies don't return EventData like the MPI
1153 * spec says they do.
1154 */
1155 /* XXX Send an async event? */
1156 break;
1157
1158 case MPI_EVENT_RESCAN:
1159 /*
1160 * In general, thise means a device has been added
1161 * to the loop.
1162 */
1163 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1164 /* XXX Send an async event? */
1165 break;
1166
1167 case MPI_EVENT_LINK_STATUS_CHANGE:
1168 mpt_prt(mpt, "Port %d: Link state %s",
1169 (msg->Data[1] >> 8) & 0xff,
1170 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1171 break;
1172
1173 case MPI_EVENT_LOOP_STATE_CHANGE:
1174 switch ((msg->Data[0] >> 16) & 0xff) {
1175 case 0x01:
1176 mpt_prt(mpt,
1177 "Port %d: FC Link Event: LIP(%02x,%02x) "
1178 "(Loop Initialization)",
1179 (msg->Data[1] >> 8) & 0xff,
1180 (msg->Data[0] >> 8) & 0xff,
1181 (msg->Data[0] ) & 0xff);
1182 switch ((msg->Data[0] >> 8) & 0xff) {
1183 case 0xf7:
1184 if ((msg->Data[0] & 0xff) == 0xf7)
1185 mpt_prt(mpt, "\tDevice needs AL_PA");
1186 else
1187 mpt_prt(mpt, "\tDevice %02x doesn't "
1188 "like FC performance",
1189 msg->Data[0] & 0xff);
1190 break;
1191
1192 case 0xf8:
1193 if ((msg->Data[0] & 0xff) == 0xf7)
1194 mpt_prt(mpt, "\tDevice detected loop "
1195 "failure before acquiring AL_PA");
1196 else
1197 mpt_prt(mpt, "\tDevice %02x detected "
1198 "loop failure",
1199 msg->Data[0] & 0xff);
1200 break;
1201
1202 default:
1203 mpt_prt(mpt, "\tDevice %02x requests that "
1204 "device %02x reset itself",
1205 msg->Data[0] & 0xff,
1206 (msg->Data[0] >> 8) & 0xff);
1207 break;
1208 }
1209 break;
1210
1211 case 0x02:
1212 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1213 "(Loop Port Enable)",
1214 (msg->Data[1] >> 8) & 0xff,
1215 (msg->Data[0] >> 8) & 0xff,
1216 (msg->Data[0] ) & 0xff);
1217 break;
1218
1219 case 0x03:
1220 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1221 "(Loop Port Bypass)",
1222 (msg->Data[1] >> 8) & 0xff,
1223 (msg->Data[0] >> 8) & 0xff,
1224 (msg->Data[0] ) & 0xff);
1225 break;
1226
1227 default:
1228 mpt_prt(mpt, "Port %d: FC Link Event: "
1229 "Unknown event (%02x %02x %02x)",
1230 (msg->Data[1] >> 8) & 0xff,
1231 (msg->Data[0] >> 16) & 0xff,
1232 (msg->Data[0] >> 8) & 0xff,
1233 (msg->Data[0] ) & 0xff);
1234 break;
1235 }
1236 break;
1237
1238 case MPI_EVENT_LOGOUT:
1239 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1240 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1241 break;
1242
1243 case MPI_EVENT_EVENT_CHANGE:
1244 /*
1245 * This is just an acknowledgement of our
1246 * mpt_send_event_request().
1247 */
1248 break;
1249
1250 default:
1251 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1252 break;
1253 }
1254
1255 if (msg->AckRequired) {
1256 MSG_EVENT_ACK *ackp;
1257 request_t *req;
1258
1259 if ((req = mpt_get_request(mpt)) == NULL) {
1260 /* XXX XXX XXX XXXJRT */
1261 panic("mpt_event_notify_reply: unable to allocate "
1262 "request structure");
1263 }
1264
1265 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1266 memset(ackp, 0, sizeof(*ackp));
1267 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1268 ackp->Event = msg->Event;
1269 ackp->EventContext = msg->EventContext;
1270 ackp->MsgContext = req->index | 0x80000000;
1271 mpt_check_doorbell(mpt);
1272 mpt_send_cmd(mpt, req);
1273 }
1274 }
1275
1276 /* XXXJRT mpt_bus_reset() */
1277
1278 /*****************************************************************************
1279 * SCSI interface routines
1280 *****************************************************************************/
1281
1282 static void
1283 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1284 void *arg)
1285 {
1286 struct scsipi_adapter *adapt = chan->chan_adapter;
1287 mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1288
1289 switch (req) {
1290 case ADAPTER_REQ_RUN_XFER:
1291 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1292 return;
1293
1294 case ADAPTER_REQ_GROW_RESOURCES:
1295 /* Not supported. */
1296 return;
1297
1298 case ADAPTER_REQ_SET_XFER_MODE:
1299 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1300 return;
1301 }
1302 }
1303
1304 static void
1305 mpt_minphys(struct buf *bp)
1306 {
1307
1308 /*
1309 * Subtract one from the SGL limit, since we need an extra one to handle
1310 * an non-page-aligned transfer.
1311 */
1312 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1313
1314 if (bp->b_bcount > MPT_MAX_XFER)
1315 bp->b_bcount = MPT_MAX_XFER;
1316 minphys(bp);
1317 }
1318