mpt_netbsd.c revision 1.14.14.2 1 /* $NetBSD: mpt_netbsd.c,v 1.14.14.2 2012/02/04 17:05:56 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.14.14.2 2012/02/04 17:05:56 bouyer Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83
84 #include <machine/stdarg.h> /* for mpt_prt() */
85
86 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
87 static void mpt_timeout(void *);
88 static void mpt_done(mpt_softc_t *, uint32_t);
89 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
90 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
91 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
92 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
93 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
94
95 static void mpt_scsipi_request(struct scsipi_channel *,
96 scsipi_adapter_req_t, void *);
97 static void mpt_minphys(struct buf *);
98
99 void
100 mpt_scsipi_attach(mpt_softc_t *mpt)
101 {
102 struct scsipi_adapter *adapt = &mpt->sc_adapter;
103 struct scsipi_channel *chan = &mpt->sc_channel;
104 int maxq;
105
106 mpt->bus = 0; /* XXX ?? */
107
108 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
109 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
110
111 /* Fill in the scsipi_adapter. */
112 memset(adapt, 0, sizeof(*adapt));
113 adapt->adapt_dev = &mpt->sc_dev;
114 adapt->adapt_nchannels = 1;
115 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
116 adapt->adapt_max_periph = maxq - 2;
117 adapt->adapt_request = mpt_scsipi_request;
118 adapt->adapt_minphys = mpt_minphys;
119
120 /* Fill in the scsipi_channel. */
121 memset(chan, 0, sizeof(*chan));
122 chan->chan_adapter = adapt;
123 chan->chan_bustype = &scsi_bustype;
124 chan->chan_channel = 0;
125 chan->chan_flags = 0;
126 chan->chan_nluns = 8;
127 chan->chan_ntargets = mpt->mpt_max_devices;
128 chan->chan_id = mpt->mpt_ini_id;
129
130 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
131 }
132
133 int
134 mpt_dma_mem_alloc(mpt_softc_t *mpt)
135 {
136 bus_dma_segment_t reply_seg, request_seg;
137 int reply_rseg, request_rseg;
138 bus_addr_t pptr, end;
139 char *vptr;
140 size_t len;
141 int error, i;
142
143 /* Check if we have already allocated the reply memory. */
144 if (mpt->reply != NULL)
145 return (0);
146
147 /*
148 * Allocate the request pool. This isn't really DMA'd memory,
149 * but it's a convenient place to do it.
150 */
151 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
152 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
153 if (mpt->request_pool == NULL) {
154 aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
155 return (ENOMEM);
156 }
157
158 /*
159 * Allocate DMA resources for reply buffers.
160 */
161 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
162 &reply_seg, 1, &reply_rseg, 0);
163 if (error) {
164 aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
165 error);
166 goto fail_0;
167 }
168
169 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
170 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
171 if (error) {
172 aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
173 error);
174 goto fail_1;
175 }
176
177 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
178 0, 0, &mpt->reply_dmap);
179 if (error) {
180 aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
181 error);
182 goto fail_2;
183 }
184
185 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
186 PAGE_SIZE, NULL, 0);
187 if (error) {
188 aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
189 error);
190 goto fail_3;
191 }
192 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
193
194 /*
195 * Allocate DMA resources for request buffers.
196 */
197 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
198 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
199 if (error) {
200 aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
201 "error = %d\n", error);
202 goto fail_4;
203 }
204
205 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
206 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
207 if (error) {
208 aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
209 error);
210 goto fail_5;
211 }
212
213 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
214 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
215 if (error) {
216 aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
217 "error = %d\n", error);
218 goto fail_6;
219 }
220
221 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
222 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
223 if (error) {
224 aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
225 error);
226 goto fail_7;
227 }
228 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
229
230 pptr = mpt->request_phys;
231 vptr = (void *) mpt->request;
232 end = pptr + MPT_REQ_MEM_SIZE(mpt);
233
234 for (i = 0; pptr < end; i++) {
235 request_t *req = &mpt->request_pool[i];
236 req->index = i;
237
238 /* Store location of Request Data */
239 req->req_pbuf = pptr;
240 req->req_vbuf = vptr;
241
242 pptr += MPT_REQUEST_AREA;
243 vptr += MPT_REQUEST_AREA;
244
245 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
246 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
247
248 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
249 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
250 if (error) {
251 aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
252 "error = %d\n", i, error);
253 goto fail_8;
254 }
255 }
256
257 return (0);
258
259 fail_8:
260 for (--i; i >= 0; i--) {
261 request_t *req = &mpt->request_pool[i];
262 if (req->dmap != NULL)
263 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
264 }
265 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
266 fail_7:
267 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
268 fail_6:
269 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
270 fail_5:
271 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
272 fail_4:
273 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
274 fail_3:
275 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
276 fail_2:
277 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
278 fail_1:
279 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
280 fail_0:
281 free(mpt->request_pool, M_DEVBUF);
282
283 mpt->reply = NULL;
284 mpt->request = NULL;
285 mpt->request_pool = NULL;
286
287 return (error);
288 }
289
290 int
291 mpt_intr(void *arg)
292 {
293 mpt_softc_t *mpt = arg;
294 int nrepl = 0;
295 uint32_t reply;
296
297 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
298 return (0);
299
300 reply = mpt_pop_reply_queue(mpt);
301 while (reply != MPT_REPLY_EMPTY) {
302 nrepl++;
303 if (mpt->verbose > 1) {
304 if ((reply & MPT_CONTEXT_REPLY) != 0) {
305 /* Address reply; IOC has something to say */
306 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
307 } else {
308 /* Context reply; all went well */
309 mpt_prt(mpt, "context %u reply OK", reply);
310 }
311 }
312 mpt_done(mpt, reply);
313 reply = mpt_pop_reply_queue(mpt);
314 }
315 return (nrepl != 0);
316 }
317
318 void
319 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
320 {
321 va_list ap;
322
323 printf("%s: ", device_xname(&mpt->sc_dev));
324 va_start(ap, fmt);
325 vprintf(fmt, ap);
326 va_end(ap);
327 printf("\n");
328 }
329
330 static int
331 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
332 {
333
334 /* Timeouts are in msec, so we loop in 1000usec cycles */
335 while (count) {
336 mpt_intr(mpt);
337 if (xs->xs_status & XS_STS_DONE)
338 return (0);
339 delay(1000); /* only happens in boot, so ok */
340 count--;
341 }
342 return (1);
343 }
344
345 static void
346 mpt_timeout(void *arg)
347 {
348 request_t *req = arg;
349 struct scsipi_xfer *xs = req->xfer;
350 struct scsipi_periph *periph = xs->xs_periph;
351 mpt_softc_t *mpt =
352 (void *) periph->periph_channel->chan_adapter->adapt_dev;
353 uint32_t oseq;
354 int s;
355
356 scsipi_printaddr(periph);
357 printf("command timeout\n");
358
359 s = splbio();
360
361 oseq = req->sequence;
362 mpt->timeouts++;
363 if (mpt_intr(mpt)) {
364 if (req->sequence != oseq) {
365 mpt_prt(mpt, "recovered from command timeout");
366 splx(s);
367 return;
368 }
369 }
370 mpt_prt(mpt,
371 "timeout on request index = 0x%x, seq = 0x%08x",
372 req->index, req->sequence);
373 mpt_check_doorbell(mpt);
374 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
375 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
376 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
377 mpt_read(mpt, MPT_OFFSET_DOORBELL));
378 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
379 if (mpt->verbose > 1)
380 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
381
382 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
383 req->xfer = NULL;
384 mpt_free_request(mpt, req);
385
386 xs->error = XS_TIMEOUT;
387 scsipi_done(xs);
388
389 splx(s);
390 }
391
392 static void
393 mpt_done(mpt_softc_t *mpt, uint32_t reply)
394 {
395 struct scsipi_xfer *xs = NULL;
396 struct scsipi_periph *periph;
397 int index;
398 request_t *req;
399 MSG_REQUEST_HEADER *mpt_req;
400 MSG_SCSI_IO_REPLY *mpt_reply;
401
402 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
403 /* context reply (ok) */
404 mpt_reply = NULL;
405 index = reply & MPT_CONTEXT_MASK;
406 } else {
407 /* address reply (error) */
408
409 /* XXX BUS_DMASYNC_POSTREAD XXX */
410 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
411 if (mpt->verbose > 1) {
412 uint32_t *pReply = (uint32_t *) mpt_reply;
413
414 mpt_prt(mpt, "Address Reply (index %u):",
415 le32toh(mpt_reply->MsgContext) & 0xffff);
416 mpt_prt(mpt, "%08x %08x %08x %08x",
417 pReply[0], pReply[1], pReply[2], pReply[3]);
418 mpt_prt(mpt, "%08x %08x %08x %08x",
419 pReply[4], pReply[5], pReply[6], pReply[7]);
420 mpt_prt(mpt, "%08x %08x %08x %08x",
421 pReply[8], pReply[9], pReply[10], pReply[11]);
422 }
423 index = le32toh(mpt_reply->MsgContext);
424 }
425
426 /*
427 * Address reply with MessageContext high bit set.
428 * This is most likely a notify message, so we try
429 * to process it, then free it.
430 */
431 if (__predict_false((index & 0x80000000) != 0)) {
432 if (mpt_reply != NULL)
433 mpt_ctlop(mpt, mpt_reply, reply);
434 else
435 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
436 return;
437 }
438
439 /* Did we end up with a valid index into the table? */
440 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
441 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
442 return;
443 }
444
445 req = &mpt->request_pool[index];
446
447 /* Make sure memory hasn't been trashed. */
448 if (__predict_false(req->index != index)) {
449 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
450 return;
451 }
452
453 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
454 mpt_req = req->req_vbuf;
455
456 /* Short cut for task management replies; nothing more for us to do. */
457 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
458 if (mpt->verbose > 1)
459 mpt_prt(mpt, "mpt_done: TASK MGMT");
460 goto done;
461 }
462
463 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
464 goto done;
465
466 /*
467 * At this point, it had better be a SCSI I/O command, but don't
468 * crash if it isn't.
469 */
470 if (__predict_false(mpt_req->Function !=
471 MPI_FUNCTION_SCSI_IO_REQUEST)) {
472 if (mpt->verbose > 1)
473 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
474 mpt_req->Function, index);
475 goto done;
476 }
477
478 /* Recover scsipi_xfer from the request structure. */
479 xs = req->xfer;
480
481 /* Can't have a SCSI command without a scsipi_xfer. */
482 if (__predict_false(xs == NULL)) {
483 mpt_prt(mpt,
484 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
485 req->index, req->sequence);
486 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
487 mpt_prt(mpt, "mpt_request:");
488 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
489
490 if (mpt_reply != NULL) {
491 mpt_prt(mpt, "mpt_reply:");
492 mpt_print_reply(mpt_reply);
493 } else {
494 mpt_prt(mpt, "context reply: 0x%08x", reply);
495 }
496 goto done;
497 }
498
499 callout_stop(&xs->xs_callout);
500
501 periph = xs->xs_periph;
502
503 /*
504 * If we were a data transfer, unload the map that described
505 * the data buffer.
506 */
507 if (__predict_true(xs->datalen != 0)) {
508 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
509 req->dmap->dm_mapsize,
510 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
511 : BUS_DMASYNC_POSTWRITE);
512 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
513 }
514
515 if (__predict_true(mpt_reply == NULL)) {
516 /*
517 * Context reply; report that the command was
518 * successful!
519 *
520 * Also report the xfer mode, if necessary.
521 */
522 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
523 if ((mpt->mpt_report_xfer_mode &
524 (1 << periph->periph_target)) != 0)
525 mpt_get_xfer_mode(mpt, periph);
526 }
527 xs->error = XS_NOERROR;
528 xs->status = SCSI_OK;
529 xs->resid = 0;
530 mpt_free_request(mpt, req);
531 scsipi_done(xs);
532 return;
533 }
534
535 xs->status = mpt_reply->SCSIStatus;
536 switch (le16toh(mpt_reply->IOCStatus)) {
537 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
538 xs->error = XS_DRIVER_STUFFUP;
539 break;
540
541 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
542 /*
543 * Yikes! Tagged queue full comes through this path!
544 *
545 * So we'll change it to a status error and anything
546 * that returns status should probably be a status
547 * error as well.
548 */
549 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
550 if (mpt_reply->SCSIState &
551 MPI_SCSI_STATE_NO_SCSI_STATUS) {
552 xs->error = XS_DRIVER_STUFFUP;
553 break;
554 }
555 /* FALLTHROUGH */
556 case MPI_IOCSTATUS_SUCCESS:
557 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
558 switch (xs->status) {
559 case SCSI_OK:
560 /* Report the xfer mode, if necessary. */
561 if ((mpt->mpt_report_xfer_mode &
562 (1 << periph->periph_target)) != 0)
563 mpt_get_xfer_mode(mpt, periph);
564 xs->resid = 0;
565 break;
566
567 case SCSI_CHECK:
568 xs->error = XS_SENSE;
569 break;
570
571 case SCSI_BUSY:
572 case SCSI_QUEUE_FULL:
573 xs->error = XS_BUSY;
574 break;
575
576 default:
577 scsipi_printaddr(periph);
578 printf("invalid status code %d\n", xs->status);
579 xs->error = XS_DRIVER_STUFFUP;
580 break;
581 }
582 break;
583
584 case MPI_IOCSTATUS_BUSY:
585 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
586 xs->error = XS_RESOURCE_SHORTAGE;
587 break;
588
589 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
590 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
591 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
592 xs->error = XS_SELTIMEOUT;
593 break;
594
595 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
596 xs->error = XS_DRIVER_STUFFUP;
597 break;
598
599 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
600 /* XXX What should we do here? */
601 break;
602
603 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
604 /* XXX */
605 xs->error = XS_DRIVER_STUFFUP;
606 break;
607
608 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
609 /* XXX */
610 xs->error = XS_DRIVER_STUFFUP;
611 break;
612
613 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
614 /* XXX This is a bus-reset */
615 xs->error = XS_DRIVER_STUFFUP;
616 break;
617
618 default:
619 /* XXX unrecognized HBA error */
620 xs->error = XS_DRIVER_STUFFUP;
621 break;
622 }
623
624 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
625 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
626 sizeof(xs->sense.scsi_sense));
627 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
628 /*
629 * This will cause the scsipi layer to issue
630 * a REQUEST SENSE.
631 */
632 if (xs->status == SCSI_CHECK)
633 xs->error = XS_BUSY;
634 }
635
636 done:
637 /* If IOC done with this requeset, free it up. */
638 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
639 mpt_free_request(mpt, req);
640
641 /* If address reply, give the buffer back to the IOC. */
642 if (mpt_reply != NULL)
643 mpt_free_reply(mpt, (reply << 1));
644
645 if (xs != NULL)
646 scsipi_done(xs);
647 }
648
649 static void
650 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
651 {
652 struct scsipi_periph *periph = xs->xs_periph;
653 request_t *req;
654 MSG_SCSI_IO_REQUEST *mpt_req;
655 int error, s;
656
657 s = splbio();
658 req = mpt_get_request(mpt);
659 if (__predict_false(req == NULL)) {
660 /* This should happen very infrequently. */
661 xs->error = XS_RESOURCE_SHORTAGE;
662 scsipi_done(xs);
663 splx(s);
664 return;
665 }
666 splx(s);
667
668 /* Link the req and the scsipi_xfer. */
669 req->xfer = xs;
670
671 /* Now we build the command for the IOC */
672 mpt_req = req->req_vbuf;
673 memset(mpt_req, 0, sizeof(*mpt_req));
674
675 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
676 mpt_req->Bus = mpt->bus;
677
678 mpt_req->SenseBufferLength =
679 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
680 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
681
682 /*
683 * We use the message context to find the request structure when
684 * we get the command completion interrupt from the IOC.
685 */
686 mpt_req->MsgContext = htole32(req->index);
687
688 /* Which physical device to do the I/O on. */
689 mpt_req->TargetID = periph->periph_target;
690 mpt_req->LUN[1] = periph->periph_lun;
691
692 /* Set the direction of the transfer. */
693 if (xs->xs_control & XS_CTL_DATA_IN)
694 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
695 else if (xs->xs_control & XS_CTL_DATA_OUT)
696 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
697 else
698 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
699
700 /* Set the queue behavior. */
701 if (__predict_true((!mpt->is_scsi) ||
702 (mpt->mpt_tag_enable &
703 (1 << periph->periph_target)))) {
704 switch (XS_CTL_TAGTYPE(xs)) {
705 case XS_CTL_HEAD_TAG:
706 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
707 break;
708
709 #if 0 /* XXX */
710 case XS_CTL_ACA_TAG:
711 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
712 break;
713 #endif
714
715 case XS_CTL_ORDERED_TAG:
716 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
717 break;
718
719 case XS_CTL_SIMPLE_TAG:
720 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
721 break;
722
723 default:
724 if (mpt->is_scsi)
725 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
726 else
727 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
728 break;
729 }
730 } else
731 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
732
733 if (__predict_false(mpt->is_scsi &&
734 (mpt->mpt_disc_enable &
735 (1 << periph->periph_target)) == 0))
736 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
737
738 mpt_req->Control = htole32(mpt_req->Control);
739
740 /* Copy the SCSI command block into place. */
741 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
742
743 mpt_req->CDBLength = xs->cmdlen;
744 mpt_req->DataLength = htole32(xs->datalen);
745 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
746
747 /*
748 * Map the DMA transfer.
749 */
750 if (xs->datalen) {
751 SGE_SIMPLE32 *se;
752
753 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
754 xs->datalen, NULL,
755 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
756 : BUS_DMA_WAITOK) |
757 BUS_DMA_STREAMING |
758 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
759 : BUS_DMA_WRITE));
760 switch (error) {
761 case 0:
762 break;
763
764 case ENOMEM:
765 case EAGAIN:
766 xs->error = XS_RESOURCE_SHORTAGE;
767 goto out_bad;
768
769 default:
770 xs->error = XS_DRIVER_STUFFUP;
771 mpt_prt(mpt, "error %d loading DMA map", error);
772 out_bad:
773 s = splbio();
774 mpt_free_request(mpt, req);
775 scsipi_done(xs);
776 splx(s);
777 return;
778 }
779
780 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
781 int seg, i, nleft = req->dmap->dm_nsegs;
782 uint32_t flags;
783 SGE_CHAIN32 *ce;
784
785 seg = 0;
786 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
787 if (xs->xs_control & XS_CTL_DATA_OUT)
788 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
789
790 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
791 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
792 i++, se++, seg++) {
793 uint32_t tf;
794
795 memset(se, 0, sizeof(*se));
796 se->Address =
797 htole32(req->dmap->dm_segs[seg].ds_addr);
798 MPI_pSGE_SET_LENGTH(se,
799 req->dmap->dm_segs[seg].ds_len);
800 tf = flags;
801 if (i == MPT_NSGL_FIRST(mpt) - 2)
802 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
803 MPI_pSGE_SET_FLAGS(se, tf);
804 se->FlagsLength = htole32(se->FlagsLength);
805 nleft--;
806 }
807
808 /*
809 * Tell the IOC where to find the first chain element.
810 */
811 mpt_req->ChainOffset =
812 ((char *)se - (char *)mpt_req) >> 2;
813
814 /*
815 * Until we're finished with all segments...
816 */
817 while (nleft) {
818 int ntodo;
819
820 /*
821 * Construct the chain element that points to
822 * the next segment.
823 */
824 ce = (SGE_CHAIN32 *) se++;
825 if (nleft > MPT_NSGL(mpt)) {
826 ntodo = MPT_NSGL(mpt) - 1;
827 ce->NextChainOffset = (MPT_RQSL(mpt) -
828 sizeof(SGE_SIMPLE32)) >> 2;
829 ce->Length = htole16(MPT_NSGL(mpt)
830 * sizeof(SGE_SIMPLE32));
831 } else {
832 ntodo = nleft;
833 ce->NextChainOffset = 0;
834 ce->Length = htole16(ntodo
835 * sizeof(SGE_SIMPLE32));
836 }
837 ce->Address = htole32(req->req_pbuf +
838 ((char *)se - (char *)mpt_req));
839 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
840 for (i = 0; i < ntodo; i++, se++, seg++) {
841 uint32_t tf;
842
843 memset(se, 0, sizeof(*se));
844 se->Address = htole32(
845 req->dmap->dm_segs[seg].ds_addr);
846 MPI_pSGE_SET_LENGTH(se,
847 req->dmap->dm_segs[seg].ds_len);
848 tf = flags;
849 if (i == ntodo - 1) {
850 tf |=
851 MPI_SGE_FLAGS_LAST_ELEMENT;
852 if (ce->NextChainOffset == 0) {
853 tf |=
854 MPI_SGE_FLAGS_END_OF_LIST |
855 MPI_SGE_FLAGS_END_OF_BUFFER;
856 }
857 }
858 MPI_pSGE_SET_FLAGS(se, tf);
859 se->FlagsLength =
860 htole32(se->FlagsLength);
861 nleft--;
862 }
863 }
864 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
865 req->dmap->dm_mapsize,
866 (xs->xs_control & XS_CTL_DATA_IN) ?
867 BUS_DMASYNC_PREREAD
868 : BUS_DMASYNC_PREWRITE);
869 } else {
870 int i;
871 uint32_t flags;
872
873 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
874 if (xs->xs_control & XS_CTL_DATA_OUT)
875 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
876
877 /* Copy the segments into our SG list. */
878 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
879 for (i = 0; i < req->dmap->dm_nsegs;
880 i++, se++) {
881 uint32_t tf;
882
883 memset(se, 0, sizeof(*se));
884 se->Address =
885 htole32(req->dmap->dm_segs[i].ds_addr);
886 MPI_pSGE_SET_LENGTH(se,
887 req->dmap->dm_segs[i].ds_len);
888 tf = flags;
889 if (i == req->dmap->dm_nsegs - 1) {
890 tf |=
891 MPI_SGE_FLAGS_LAST_ELEMENT |
892 MPI_SGE_FLAGS_END_OF_BUFFER |
893 MPI_SGE_FLAGS_END_OF_LIST;
894 }
895 MPI_pSGE_SET_FLAGS(se, tf);
896 se->FlagsLength = htole32(se->FlagsLength);
897 }
898 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
899 req->dmap->dm_mapsize,
900 (xs->xs_control & XS_CTL_DATA_IN) ?
901 BUS_DMASYNC_PREREAD
902 : BUS_DMASYNC_PREWRITE);
903 }
904 } else {
905 /*
906 * No data to transfer; just make a single simple SGL
907 * with zero length.
908 */
909 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
910 memset(se, 0, sizeof(*se));
911 MPI_pSGE_SET_FLAGS(se,
912 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
913 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
914 se->FlagsLength = htole32(se->FlagsLength);
915 }
916
917 if (mpt->verbose > 1)
918 mpt_print_scsi_io_request(mpt_req);
919
920 s = splbio();
921 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
922 callout_reset(&xs->xs_callout,
923 mstohz(xs->timeout), mpt_timeout, req);
924 mpt_send_cmd(mpt, req);
925 splx(s);
926
927 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
928 return;
929
930 /*
931 * If we can't use interrupts, poll on completion.
932 */
933 if (mpt_poll(mpt, xs, xs->timeout))
934 mpt_timeout(req);
935 }
936
937 static void
938 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
939 {
940 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
941
942 /*
943 * Always allow disconnect; we don't have a way to disable
944 * it right now, in any case.
945 */
946 mpt->mpt_disc_enable |= (1 << xm->xm_target);
947
948 if (xm->xm_mode & PERIPH_CAP_TQING)
949 mpt->mpt_tag_enable |= (1 << xm->xm_target);
950 else
951 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
952
953 if (mpt->is_scsi) {
954 /*
955 * SCSI transport settings only make any sense for
956 * SCSI
957 */
958
959 tmp = mpt->mpt_dev_page1[xm->xm_target];
960
961 /*
962 * Set the wide/narrow parameter for the target.
963 */
964 if (xm->xm_mode & PERIPH_CAP_WIDE16)
965 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
966 else
967 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
968
969 /*
970 * Set the synchronous parameters for the target.
971 *
972 * XXX If we request sync transfers, we just go ahead and
973 * XXX request the maximum available. We need finer control
974 * XXX in order to implement Domain Validation.
975 */
976 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
977 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
978 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
979 MPI_SCSIDEVPAGE1_RP_IU);
980 if (xm->xm_mode & PERIPH_CAP_SYNC) {
981 int factor, offset, np;
982
983 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
984 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
985 np = 0;
986 if (factor < 0x9) {
987 /* Ultra320 */
988 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
989 }
990 if (factor < 0xa) {
991 /* at least Ultra160 */
992 np |= MPI_SCSIDEVPAGE1_RP_DT;
993 }
994 np |= (factor << 8) | (offset << 16);
995 tmp.RequestedParameters |= np;
996 }
997
998 host2mpt_config_page_scsi_device_1(&tmp);
999 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1000 mpt_prt(mpt, "unable to write Device Page 1");
1001 return;
1002 }
1003
1004 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1005 mpt_prt(mpt, "unable to read back Device Page 1");
1006 return;
1007 }
1008
1009 mpt2host_config_page_scsi_device_1(&tmp);
1010 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1011 if (mpt->verbose > 1) {
1012 mpt_prt(mpt,
1013 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1014 xm->xm_target,
1015 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1016 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1017 }
1018 }
1019
1020 /*
1021 * Make a note that we should perform an async callback at the
1022 * end of the next successful command completion to report the
1023 * negotiated transfer mode.
1024 */
1025 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1026 }
1027
1028 static void
1029 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1030 {
1031 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1032 struct scsipi_xfer_mode xm;
1033 int period, offset;
1034
1035 tmp = mpt->mpt_dev_page0[periph->periph_target];
1036 host2mpt_config_page_scsi_device_0(&tmp);
1037 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1038 mpt_prt(mpt, "unable to read Device Page 0");
1039 return;
1040 }
1041 mpt2host_config_page_scsi_device_0(&tmp);
1042
1043 if (mpt->verbose > 1) {
1044 mpt_prt(mpt,
1045 "SPI Tgt %d Page 0: NParms %x Information %x",
1046 periph->periph_target,
1047 tmp.NegotiatedParameters, tmp.Information);
1048 }
1049
1050 xm.xm_target = periph->periph_target;
1051 xm.xm_mode = 0;
1052
1053 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1054 xm.xm_mode |= PERIPH_CAP_WIDE16;
1055
1056 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1057 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1058 if (offset) {
1059 xm.xm_period = period;
1060 xm.xm_offset = offset;
1061 xm.xm_mode |= PERIPH_CAP_SYNC;
1062 }
1063
1064 /*
1065 * Tagged queueing is all controlled by us; there is no
1066 * other setting to query.
1067 */
1068 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1069 xm.xm_mode |= PERIPH_CAP_TQING;
1070
1071 /*
1072 * We're going to deliver the async event, so clear the marker.
1073 */
1074 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1075
1076 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1077 }
1078
1079 static void
1080 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1081 {
1082 MSG_DEFAULT_REPLY *dmsg = vmsg;
1083
1084 switch (dmsg->Function) {
1085 case MPI_FUNCTION_EVENT_NOTIFICATION:
1086 mpt_event_notify_reply(mpt, vmsg);
1087 mpt_free_reply(mpt, (reply << 1));
1088 break;
1089
1090 case MPI_FUNCTION_EVENT_ACK:
1091 mpt_free_reply(mpt, (reply << 1));
1092 break;
1093
1094 case MPI_FUNCTION_PORT_ENABLE:
1095 {
1096 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1097 int index = le32toh(msg->MsgContext) & ~0x80000000;
1098 if (mpt->verbose > 1)
1099 mpt_prt(mpt, "enable port reply index %d", index);
1100 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1101 request_t *req = &mpt->request_pool[index];
1102 req->debug = REQ_DONE;
1103 }
1104 mpt_free_reply(mpt, (reply << 1));
1105 break;
1106 }
1107
1108 case MPI_FUNCTION_CONFIG:
1109 {
1110 MSG_CONFIG_REPLY *msg = vmsg;
1111 int index = le32toh(msg->MsgContext) & ~0x80000000;
1112 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1113 request_t *req = &mpt->request_pool[index];
1114 req->debug = REQ_DONE;
1115 req->sequence = reply;
1116 } else
1117 mpt_free_reply(mpt, (reply << 1));
1118 break;
1119 }
1120
1121 default:
1122 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1123 }
1124 }
1125
1126 static void
1127 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1128 {
1129
1130 switch (le32toh(msg->Event)) {
1131 case MPI_EVENT_LOG_DATA:
1132 {
1133 int i;
1134
1135 /* Some error occurrerd that the Fusion wants logged. */
1136 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1137 mpt_prt(mpt, "EvtLogData: Event Data:");
1138 for (i = 0; i < msg->EventDataLength; i++) {
1139 if ((i % 4) == 0)
1140 printf("%s:\t", device_xname(&mpt->sc_dev));
1141 printf("0x%08x%c", msg->Data[i],
1142 ((i % 4) == 3) ? '\n' : ' ');
1143 }
1144 if ((i % 4) != 0)
1145 printf("\n");
1146 break;
1147 }
1148
1149 case MPI_EVENT_UNIT_ATTENTION:
1150 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1151 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1152 break;
1153
1154 case MPI_EVENT_IOC_BUS_RESET:
1155 /* We generated a bus reset. */
1156 mpt_prt(mpt, "IOC Bus Reset Port %d",
1157 (msg->Data[0] >> 8) & 0xff);
1158 break;
1159
1160 case MPI_EVENT_EXT_BUS_RESET:
1161 /* Someone else generated a bus reset. */
1162 mpt_prt(mpt, "External Bus Reset");
1163 /*
1164 * These replies don't return EventData like the MPI
1165 * spec says they do.
1166 */
1167 /* XXX Send an async event? */
1168 break;
1169
1170 case MPI_EVENT_RESCAN:
1171 /*
1172 * In general, thise means a device has been added
1173 * to the loop.
1174 */
1175 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1176 /* XXX Send an async event? */
1177 break;
1178
1179 case MPI_EVENT_LINK_STATUS_CHANGE:
1180 mpt_prt(mpt, "Port %d: Link state %s",
1181 (msg->Data[1] >> 8) & 0xff,
1182 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1183 break;
1184
1185 case MPI_EVENT_LOOP_STATE_CHANGE:
1186 switch ((msg->Data[0] >> 16) & 0xff) {
1187 case 0x01:
1188 mpt_prt(mpt,
1189 "Port %d: FC Link Event: LIP(%02x,%02x) "
1190 "(Loop Initialization)",
1191 (msg->Data[1] >> 8) & 0xff,
1192 (msg->Data[0] >> 8) & 0xff,
1193 (msg->Data[0] ) & 0xff);
1194 switch ((msg->Data[0] >> 8) & 0xff) {
1195 case 0xf7:
1196 if ((msg->Data[0] & 0xff) == 0xf7)
1197 mpt_prt(mpt, "\tDevice needs AL_PA");
1198 else
1199 mpt_prt(mpt, "\tDevice %02x doesn't "
1200 "like FC performance",
1201 msg->Data[0] & 0xff);
1202 break;
1203
1204 case 0xf8:
1205 if ((msg->Data[0] & 0xff) == 0xf7)
1206 mpt_prt(mpt, "\tDevice detected loop "
1207 "failure before acquiring AL_PA");
1208 else
1209 mpt_prt(mpt, "\tDevice %02x detected "
1210 "loop failure",
1211 msg->Data[0] & 0xff);
1212 break;
1213
1214 default:
1215 mpt_prt(mpt, "\tDevice %02x requests that "
1216 "device %02x reset itself",
1217 msg->Data[0] & 0xff,
1218 (msg->Data[0] >> 8) & 0xff);
1219 break;
1220 }
1221 break;
1222
1223 case 0x02:
1224 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1225 "(Loop Port Enable)",
1226 (msg->Data[1] >> 8) & 0xff,
1227 (msg->Data[0] >> 8) & 0xff,
1228 (msg->Data[0] ) & 0xff);
1229 break;
1230
1231 case 0x03:
1232 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1233 "(Loop Port Bypass)",
1234 (msg->Data[1] >> 8) & 0xff,
1235 (msg->Data[0] >> 8) & 0xff,
1236 (msg->Data[0] ) & 0xff);
1237 break;
1238
1239 default:
1240 mpt_prt(mpt, "Port %d: FC Link Event: "
1241 "Unknown event (%02x %02x %02x)",
1242 (msg->Data[1] >> 8) & 0xff,
1243 (msg->Data[0] >> 16) & 0xff,
1244 (msg->Data[0] >> 8) & 0xff,
1245 (msg->Data[0] ) & 0xff);
1246 break;
1247 }
1248 break;
1249
1250 case MPI_EVENT_LOGOUT:
1251 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1252 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1253 break;
1254
1255 case MPI_EVENT_EVENT_CHANGE:
1256 /*
1257 * This is just an acknowledgement of our
1258 * mpt_send_event_request().
1259 */
1260 break;
1261
1262 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1263 switch ((msg->Data[0] >> 12) & 0x0f) {
1264 case 0x00:
1265 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1266 msg->Data[0] & 0xff);
1267 break;
1268 case 0x01:
1269 mpt_prt(mpt, "Phy %d: Link Disabled",
1270 msg->Data[0] & 0xff);
1271 break;
1272 case 0x02:
1273 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1274 msg->Data[0] & 0xff);
1275 break;
1276 case 0x03:
1277 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1278 msg->Data[0] & 0xff);
1279 break;
1280 case 0x08:
1281 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1282 msg->Data[0] & 0xff);
1283 break;
1284 case 0x09:
1285 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1286 msg->Data[0] & 0xff);
1287 break;
1288 default:
1289 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1290 "Unknown event (%0x)",
1291 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1292 }
1293 break;
1294
1295 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1296 case MPI_EVENT_SAS_DISCOVERY:
1297 /* ignore these events for now */
1298 break;
1299
1300 case MPI_EVENT_QUEUE_FULL:
1301 /* This can get a little chatty */
1302 if (mpt->verbose > 0)
1303 mpt_prt(mpt, "Queue Full Event");
1304 break;
1305
1306 default:
1307 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1308 break;
1309 }
1310
1311 if (msg->AckRequired) {
1312 MSG_EVENT_ACK *ackp;
1313 request_t *req;
1314
1315 if ((req = mpt_get_request(mpt)) == NULL) {
1316 /* XXX XXX XXX XXXJRT */
1317 panic("mpt_event_notify_reply: unable to allocate "
1318 "request structure");
1319 }
1320
1321 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1322 memset(ackp, 0, sizeof(*ackp));
1323 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1324 ackp->Event = msg->Event;
1325 ackp->EventContext = msg->EventContext;
1326 ackp->MsgContext = htole32(req->index | 0x80000000);
1327 mpt_check_doorbell(mpt);
1328 mpt_send_cmd(mpt, req);
1329 }
1330 }
1331
1332 /* XXXJRT mpt_bus_reset() */
1333
1334 /*****************************************************************************
1335 * SCSI interface routines
1336 *****************************************************************************/
1337
1338 static void
1339 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1340 void *arg)
1341 {
1342 struct scsipi_adapter *adapt = chan->chan_adapter;
1343 mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1344
1345 switch (req) {
1346 case ADAPTER_REQ_RUN_XFER:
1347 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1348 return;
1349
1350 case ADAPTER_REQ_GROW_RESOURCES:
1351 /* Not supported. */
1352 return;
1353
1354 case ADAPTER_REQ_SET_XFER_MODE:
1355 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1356 return;
1357 }
1358 }
1359
1360 static void
1361 mpt_minphys(struct buf *bp)
1362 {
1363
1364 /*
1365 * Subtract one from the SGL limit, since we need an extra one to handle
1366 * an non-page-aligned transfer.
1367 */
1368 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1369
1370 if (bp->b_bcount > MPT_MAX_XFER)
1371 bp->b_bcount = MPT_MAX_XFER;
1372 minphys(bp);
1373 }
1374