mpt_netbsd.c revision 1.16.6.2 1 /* $NetBSD: mpt_netbsd.c,v 1.16.6.2 2012/04/05 21:33:26 mrg Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.16.6.2 2012/04/05 21:33:26 mrg Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83
84 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
85 static void mpt_timeout(void *);
86 static void mpt_done(mpt_softc_t *, uint32_t);
87 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
88 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
89 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
90 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
91 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
92
93 static void mpt_scsipi_request(struct scsipi_channel *,
94 scsipi_adapter_req_t, void *);
95 static void mpt_minphys(struct buf *);
96
97 /*
98 * XXX - this assumes the device_private() of the attachement starts with
99 * a struct mpt_softc, so we can use the return value of device_private()
100 * straight without any offset.
101 */
102 #define DEV_TO_MPT(DEV) device_private(DEV)
103
104 void
105 mpt_scsipi_attach(mpt_softc_t *mpt)
106 {
107 struct scsipi_adapter *adapt = &mpt->sc_adapter;
108 struct scsipi_channel *chan = &mpt->sc_channel;
109 int maxq;
110
111 mpt->bus = 0; /* XXX ?? */
112
113 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
114 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
115
116 /* Fill in the scsipi_adapter. */
117 memset(adapt, 0, sizeof(*adapt));
118 adapt->adapt_dev = mpt->sc_dev;
119 adapt->adapt_nchannels = 1;
120 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
121 adapt->adapt_max_periph = maxq - 2;
122 adapt->adapt_request = mpt_scsipi_request;
123 adapt->adapt_minphys = mpt_minphys;
124
125 /* Fill in the scsipi_channel. */
126 memset(chan, 0, sizeof(*chan));
127 chan->chan_adapter = adapt;
128 chan->chan_bustype = &scsi_bustype;
129 chan->chan_channel = 0;
130 chan->chan_flags = 0;
131 chan->chan_nluns = 8;
132 chan->chan_ntargets = mpt->mpt_max_devices;
133 chan->chan_id = mpt->mpt_ini_id;
134
135 (void) config_found(mpt->sc_dev, &mpt->sc_channel, scsiprint);
136 }
137
138 int
139 mpt_dma_mem_alloc(mpt_softc_t *mpt)
140 {
141 bus_dma_segment_t reply_seg, request_seg;
142 int reply_rseg, request_rseg;
143 bus_addr_t pptr, end;
144 char *vptr;
145 size_t len;
146 int error, i;
147
148 /* Check if we have already allocated the reply memory. */
149 if (mpt->reply != NULL)
150 return (0);
151
152 /*
153 * Allocate the request pool. This isn't really DMA'd memory,
154 * but it's a convenient place to do it.
155 */
156 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
157 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
158 if (mpt->request_pool == NULL) {
159 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
160 return (ENOMEM);
161 }
162
163 /*
164 * Allocate DMA resources for reply buffers.
165 */
166 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
167 &reply_seg, 1, &reply_rseg, 0);
168 if (error) {
169 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
170 error);
171 goto fail_0;
172 }
173
174 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
175 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
176 if (error) {
177 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
178 error);
179 goto fail_1;
180 }
181
182 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
183 0, 0, &mpt->reply_dmap);
184 if (error) {
185 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
186 error);
187 goto fail_2;
188 }
189
190 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
191 PAGE_SIZE, NULL, 0);
192 if (error) {
193 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
194 error);
195 goto fail_3;
196 }
197 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
198
199 /*
200 * Allocate DMA resources for request buffers.
201 */
202 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
203 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
204 if (error) {
205 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
206 "error = %d\n", error);
207 goto fail_4;
208 }
209
210 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
211 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
212 if (error) {
213 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
214 error);
215 goto fail_5;
216 }
217
218 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
219 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
220 if (error) {
221 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
222 "error = %d\n", error);
223 goto fail_6;
224 }
225
226 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
227 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
228 if (error) {
229 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
230 error);
231 goto fail_7;
232 }
233 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
234
235 pptr = mpt->request_phys;
236 vptr = (void *) mpt->request;
237 end = pptr + MPT_REQ_MEM_SIZE(mpt);
238
239 for (i = 0; pptr < end; i++) {
240 request_t *req = &mpt->request_pool[i];
241 req->index = i;
242
243 /* Store location of Request Data */
244 req->req_pbuf = pptr;
245 req->req_vbuf = vptr;
246
247 pptr += MPT_REQUEST_AREA;
248 vptr += MPT_REQUEST_AREA;
249
250 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
251 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
252
253 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
254 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
255 if (error) {
256 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
257 "error = %d\n", i, error);
258 goto fail_8;
259 }
260 }
261
262 return (0);
263
264 fail_8:
265 for (--i; i >= 0; i--) {
266 request_t *req = &mpt->request_pool[i];
267 if (req->dmap != NULL)
268 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
269 }
270 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
271 fail_7:
272 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
273 fail_6:
274 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
275 fail_5:
276 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
277 fail_4:
278 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
279 fail_3:
280 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
281 fail_2:
282 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
283 fail_1:
284 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
285 fail_0:
286 free(mpt->request_pool, M_DEVBUF);
287
288 mpt->reply = NULL;
289 mpt->request = NULL;
290 mpt->request_pool = NULL;
291
292 return (error);
293 }
294
295 int
296 mpt_intr(void *arg)
297 {
298 mpt_softc_t *mpt = arg;
299 int nrepl = 0;
300 uint32_t reply;
301
302 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
303 return (0);
304
305 reply = mpt_pop_reply_queue(mpt);
306 while (reply != MPT_REPLY_EMPTY) {
307 nrepl++;
308 if (mpt->verbose > 1) {
309 if ((reply & MPT_CONTEXT_REPLY) != 0) {
310 /* Address reply; IOC has something to say */
311 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
312 } else {
313 /* Context reply; all went well */
314 mpt_prt(mpt, "context %u reply OK", reply);
315 }
316 }
317 mpt_done(mpt, reply);
318 reply = mpt_pop_reply_queue(mpt);
319 }
320 return (nrepl != 0);
321 }
322
323 void
324 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
325 {
326 va_list ap;
327
328 printf("%s: ", device_xname(mpt->sc_dev));
329 va_start(ap, fmt);
330 vprintf(fmt, ap);
331 va_end(ap);
332 printf("\n");
333 }
334
335 static int
336 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
337 {
338
339 /* Timeouts are in msec, so we loop in 1000usec cycles */
340 while (count) {
341 mpt_intr(mpt);
342 if (xs->xs_status & XS_STS_DONE)
343 return (0);
344 delay(1000); /* only happens in boot, so ok */
345 count--;
346 }
347 return (1);
348 }
349
350 static void
351 mpt_timeout(void *arg)
352 {
353 request_t *req = arg;
354 struct scsipi_xfer *xs = req->xfer;
355 struct scsipi_periph *periph = xs->xs_periph;
356 mpt_softc_t *mpt = DEV_TO_MPT(
357 periph->periph_channel->chan_adapter->adapt_dev);
358 uint32_t oseq;
359 int s;
360
361 scsipi_printaddr(periph);
362 printf("command timeout\n");
363
364 s = splbio();
365
366 oseq = req->sequence;
367 mpt->timeouts++;
368 if (mpt_intr(mpt)) {
369 if (req->sequence != oseq) {
370 mpt_prt(mpt, "recovered from command timeout");
371 splx(s);
372 return;
373 }
374 }
375 mpt_prt(mpt,
376 "timeout on request index = 0x%x, seq = 0x%08x",
377 req->index, req->sequence);
378 mpt_check_doorbell(mpt);
379 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
380 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
381 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
382 mpt_read(mpt, MPT_OFFSET_DOORBELL));
383 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
384 if (mpt->verbose > 1)
385 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
386
387 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
388 req->xfer = NULL;
389 mpt_free_request(mpt, req);
390
391 xs->error = XS_TIMEOUT;
392 scsipi_done(xs);
393
394 splx(s);
395 }
396
397 static void
398 mpt_done(mpt_softc_t *mpt, uint32_t reply)
399 {
400 struct scsipi_xfer *xs = NULL;
401 struct scsipi_periph *periph;
402 int index;
403 request_t *req;
404 MSG_REQUEST_HEADER *mpt_req;
405 MSG_SCSI_IO_REPLY *mpt_reply;
406
407 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
408 /* context reply (ok) */
409 mpt_reply = NULL;
410 index = reply & MPT_CONTEXT_MASK;
411 } else {
412 /* address reply (error) */
413
414 /* XXX BUS_DMASYNC_POSTREAD XXX */
415 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
416 if (mpt->verbose > 1) {
417 uint32_t *pReply = (uint32_t *) mpt_reply;
418
419 mpt_prt(mpt, "Address Reply (index %u):",
420 le32toh(mpt_reply->MsgContext) & 0xffff);
421 mpt_prt(mpt, "%08x %08x %08x %08x",
422 pReply[0], pReply[1], pReply[2], pReply[3]);
423 mpt_prt(mpt, "%08x %08x %08x %08x",
424 pReply[4], pReply[5], pReply[6], pReply[7]);
425 mpt_prt(mpt, "%08x %08x %08x %08x",
426 pReply[8], pReply[9], pReply[10], pReply[11]);
427 }
428 index = le32toh(mpt_reply->MsgContext);
429 }
430
431 /*
432 * Address reply with MessageContext high bit set.
433 * This is most likely a notify message, so we try
434 * to process it, then free it.
435 */
436 if (__predict_false((index & 0x80000000) != 0)) {
437 if (mpt_reply != NULL)
438 mpt_ctlop(mpt, mpt_reply, reply);
439 else
440 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
441 return;
442 }
443
444 /* Did we end up with a valid index into the table? */
445 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
446 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
447 return;
448 }
449
450 req = &mpt->request_pool[index];
451
452 /* Make sure memory hasn't been trashed. */
453 if (__predict_false(req->index != index)) {
454 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
455 return;
456 }
457
458 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
459 mpt_req = req->req_vbuf;
460
461 /* Short cut for task management replies; nothing more for us to do. */
462 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
463 if (mpt->verbose > 1)
464 mpt_prt(mpt, "mpt_done: TASK MGMT");
465 goto done;
466 }
467
468 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
469 goto done;
470
471 /*
472 * At this point, it had better be a SCSI I/O command, but don't
473 * crash if it isn't.
474 */
475 if (__predict_false(mpt_req->Function !=
476 MPI_FUNCTION_SCSI_IO_REQUEST)) {
477 if (mpt->verbose > 1)
478 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
479 mpt_req->Function, index);
480 goto done;
481 }
482
483 /* Recover scsipi_xfer from the request structure. */
484 xs = req->xfer;
485
486 /* Can't have a SCSI command without a scsipi_xfer. */
487 if (__predict_false(xs == NULL)) {
488 mpt_prt(mpt,
489 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
490 req->index, req->sequence);
491 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
492 mpt_prt(mpt, "mpt_request:");
493 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
494
495 if (mpt_reply != NULL) {
496 mpt_prt(mpt, "mpt_reply:");
497 mpt_print_reply(mpt_reply);
498 } else {
499 mpt_prt(mpt, "context reply: 0x%08x", reply);
500 }
501 goto done;
502 }
503
504 callout_stop(&xs->xs_callout);
505
506 periph = xs->xs_periph;
507
508 /*
509 * If we were a data transfer, unload the map that described
510 * the data buffer.
511 */
512 if (__predict_true(xs->datalen != 0)) {
513 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
514 req->dmap->dm_mapsize,
515 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
516 : BUS_DMASYNC_POSTWRITE);
517 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
518 }
519
520 if (__predict_true(mpt_reply == NULL)) {
521 /*
522 * Context reply; report that the command was
523 * successful!
524 *
525 * Also report the xfer mode, if necessary.
526 */
527 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
528 if ((mpt->mpt_report_xfer_mode &
529 (1 << periph->periph_target)) != 0)
530 mpt_get_xfer_mode(mpt, periph);
531 }
532 xs->error = XS_NOERROR;
533 xs->status = SCSI_OK;
534 xs->resid = 0;
535 mpt_free_request(mpt, req);
536 scsipi_done(xs);
537 return;
538 }
539
540 xs->status = mpt_reply->SCSIStatus;
541 switch (le16toh(mpt_reply->IOCStatus)) {
542 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
543 xs->error = XS_DRIVER_STUFFUP;
544 break;
545
546 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
547 /*
548 * Yikes! Tagged queue full comes through this path!
549 *
550 * So we'll change it to a status error and anything
551 * that returns status should probably be a status
552 * error as well.
553 */
554 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
555 if (mpt_reply->SCSIState &
556 MPI_SCSI_STATE_NO_SCSI_STATUS) {
557 xs->error = XS_DRIVER_STUFFUP;
558 break;
559 }
560 /* FALLTHROUGH */
561 case MPI_IOCSTATUS_SUCCESS:
562 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
563 switch (xs->status) {
564 case SCSI_OK:
565 /* Report the xfer mode, if necessary. */
566 if ((mpt->mpt_report_xfer_mode &
567 (1 << periph->periph_target)) != 0)
568 mpt_get_xfer_mode(mpt, periph);
569 xs->resid = 0;
570 break;
571
572 case SCSI_CHECK:
573 xs->error = XS_SENSE;
574 break;
575
576 case SCSI_BUSY:
577 case SCSI_QUEUE_FULL:
578 xs->error = XS_BUSY;
579 break;
580
581 default:
582 scsipi_printaddr(periph);
583 printf("invalid status code %d\n", xs->status);
584 xs->error = XS_DRIVER_STUFFUP;
585 break;
586 }
587 break;
588
589 case MPI_IOCSTATUS_BUSY:
590 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
591 xs->error = XS_RESOURCE_SHORTAGE;
592 break;
593
594 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
595 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
596 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
597 xs->error = XS_SELTIMEOUT;
598 break;
599
600 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
601 xs->error = XS_DRIVER_STUFFUP;
602 break;
603
604 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
605 /* XXX What should we do here? */
606 break;
607
608 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
609 /* XXX */
610 xs->error = XS_DRIVER_STUFFUP;
611 break;
612
613 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
614 /* XXX */
615 xs->error = XS_DRIVER_STUFFUP;
616 break;
617
618 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
619 /* XXX This is a bus-reset */
620 xs->error = XS_DRIVER_STUFFUP;
621 break;
622
623 default:
624 /* XXX unrecognized HBA error */
625 xs->error = XS_DRIVER_STUFFUP;
626 break;
627 }
628
629 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
630 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
631 sizeof(xs->sense.scsi_sense));
632 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
633 /*
634 * This will cause the scsipi layer to issue
635 * a REQUEST SENSE.
636 */
637 if (xs->status == SCSI_CHECK)
638 xs->error = XS_BUSY;
639 }
640
641 done:
642 /* If IOC done with this requeset, free it up. */
643 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
644 mpt_free_request(mpt, req);
645
646 /* If address reply, give the buffer back to the IOC. */
647 if (mpt_reply != NULL)
648 mpt_free_reply(mpt, (reply << 1));
649
650 if (xs != NULL)
651 scsipi_done(xs);
652 }
653
654 static void
655 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
656 {
657 struct scsipi_periph *periph = xs->xs_periph;
658 request_t *req;
659 MSG_SCSI_IO_REQUEST *mpt_req;
660 int error, s;
661
662 s = splbio();
663 req = mpt_get_request(mpt);
664 if (__predict_false(req == NULL)) {
665 /* This should happen very infrequently. */
666 xs->error = XS_RESOURCE_SHORTAGE;
667 scsipi_done(xs);
668 splx(s);
669 return;
670 }
671 splx(s);
672
673 /* Link the req and the scsipi_xfer. */
674 req->xfer = xs;
675
676 /* Now we build the command for the IOC */
677 mpt_req = req->req_vbuf;
678 memset(mpt_req, 0, sizeof(*mpt_req));
679
680 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
681 mpt_req->Bus = mpt->bus;
682
683 mpt_req->SenseBufferLength =
684 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
685 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
686
687 /*
688 * We use the message context to find the request structure when
689 * we get the command completion interrupt from the IOC.
690 */
691 mpt_req->MsgContext = htole32(req->index);
692
693 /* Which physical device to do the I/O on. */
694 mpt_req->TargetID = periph->periph_target;
695 mpt_req->LUN[1] = periph->periph_lun;
696
697 /* Set the direction of the transfer. */
698 if (xs->xs_control & XS_CTL_DATA_IN)
699 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
700 else if (xs->xs_control & XS_CTL_DATA_OUT)
701 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
702 else
703 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
704
705 /* Set the queue behavior. */
706 if (__predict_true((!mpt->is_scsi) ||
707 (mpt->mpt_tag_enable &
708 (1 << periph->periph_target)))) {
709 switch (XS_CTL_TAGTYPE(xs)) {
710 case XS_CTL_HEAD_TAG:
711 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
712 break;
713
714 #if 0 /* XXX */
715 case XS_CTL_ACA_TAG:
716 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
717 break;
718 #endif
719
720 case XS_CTL_ORDERED_TAG:
721 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
722 break;
723
724 case XS_CTL_SIMPLE_TAG:
725 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
726 break;
727
728 default:
729 if (mpt->is_scsi)
730 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
731 else
732 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
733 break;
734 }
735 } else
736 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
737
738 if (__predict_false(mpt->is_scsi &&
739 (mpt->mpt_disc_enable &
740 (1 << periph->periph_target)) == 0))
741 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
742
743 mpt_req->Control = htole32(mpt_req->Control);
744
745 /* Copy the SCSI command block into place. */
746 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
747
748 mpt_req->CDBLength = xs->cmdlen;
749 mpt_req->DataLength = htole32(xs->datalen);
750 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
751
752 /*
753 * Map the DMA transfer.
754 */
755 if (xs->datalen) {
756 SGE_SIMPLE32 *se;
757
758 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
759 xs->datalen, NULL,
760 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
761 : BUS_DMA_WAITOK) |
762 BUS_DMA_STREAMING |
763 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
764 : BUS_DMA_WRITE));
765 switch (error) {
766 case 0:
767 break;
768
769 case ENOMEM:
770 case EAGAIN:
771 xs->error = XS_RESOURCE_SHORTAGE;
772 goto out_bad;
773
774 default:
775 xs->error = XS_DRIVER_STUFFUP;
776 mpt_prt(mpt, "error %d loading DMA map", error);
777 out_bad:
778 s = splbio();
779 mpt_free_request(mpt, req);
780 scsipi_done(xs);
781 splx(s);
782 return;
783 }
784
785 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
786 int seg, i, nleft = req->dmap->dm_nsegs;
787 uint32_t flags;
788 SGE_CHAIN32 *ce;
789
790 seg = 0;
791 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
792 if (xs->xs_control & XS_CTL_DATA_OUT)
793 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
794
795 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
796 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
797 i++, se++, seg++) {
798 uint32_t tf;
799
800 memset(se, 0, sizeof(*se));
801 se->Address =
802 htole32(req->dmap->dm_segs[seg].ds_addr);
803 MPI_pSGE_SET_LENGTH(se,
804 req->dmap->dm_segs[seg].ds_len);
805 tf = flags;
806 if (i == MPT_NSGL_FIRST(mpt) - 2)
807 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
808 MPI_pSGE_SET_FLAGS(se, tf);
809 se->FlagsLength = htole32(se->FlagsLength);
810 nleft--;
811 }
812
813 /*
814 * Tell the IOC where to find the first chain element.
815 */
816 mpt_req->ChainOffset =
817 ((char *)se - (char *)mpt_req) >> 2;
818
819 /*
820 * Until we're finished with all segments...
821 */
822 while (nleft) {
823 int ntodo;
824
825 /*
826 * Construct the chain element that points to
827 * the next segment.
828 */
829 ce = (SGE_CHAIN32 *) se++;
830 if (nleft > MPT_NSGL(mpt)) {
831 ntodo = MPT_NSGL(mpt) - 1;
832 ce->NextChainOffset = (MPT_RQSL(mpt) -
833 sizeof(SGE_SIMPLE32)) >> 2;
834 ce->Length = htole16(MPT_NSGL(mpt)
835 * sizeof(SGE_SIMPLE32));
836 } else {
837 ntodo = nleft;
838 ce->NextChainOffset = 0;
839 ce->Length = htole16(ntodo
840 * sizeof(SGE_SIMPLE32));
841 }
842 ce->Address = htole32(req->req_pbuf +
843 ((char *)se - (char *)mpt_req));
844 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
845 for (i = 0; i < ntodo; i++, se++, seg++) {
846 uint32_t tf;
847
848 memset(se, 0, sizeof(*se));
849 se->Address = htole32(
850 req->dmap->dm_segs[seg].ds_addr);
851 MPI_pSGE_SET_LENGTH(se,
852 req->dmap->dm_segs[seg].ds_len);
853 tf = flags;
854 if (i == ntodo - 1) {
855 tf |=
856 MPI_SGE_FLAGS_LAST_ELEMENT;
857 if (ce->NextChainOffset == 0) {
858 tf |=
859 MPI_SGE_FLAGS_END_OF_LIST |
860 MPI_SGE_FLAGS_END_OF_BUFFER;
861 }
862 }
863 MPI_pSGE_SET_FLAGS(se, tf);
864 se->FlagsLength =
865 htole32(se->FlagsLength);
866 nleft--;
867 }
868 }
869 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
870 req->dmap->dm_mapsize,
871 (xs->xs_control & XS_CTL_DATA_IN) ?
872 BUS_DMASYNC_PREREAD
873 : BUS_DMASYNC_PREWRITE);
874 } else {
875 int i;
876 uint32_t flags;
877
878 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
879 if (xs->xs_control & XS_CTL_DATA_OUT)
880 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
881
882 /* Copy the segments into our SG list. */
883 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
884 for (i = 0; i < req->dmap->dm_nsegs;
885 i++, se++) {
886 uint32_t tf;
887
888 memset(se, 0, sizeof(*se));
889 se->Address =
890 htole32(req->dmap->dm_segs[i].ds_addr);
891 MPI_pSGE_SET_LENGTH(se,
892 req->dmap->dm_segs[i].ds_len);
893 tf = flags;
894 if (i == req->dmap->dm_nsegs - 1) {
895 tf |=
896 MPI_SGE_FLAGS_LAST_ELEMENT |
897 MPI_SGE_FLAGS_END_OF_BUFFER |
898 MPI_SGE_FLAGS_END_OF_LIST;
899 }
900 MPI_pSGE_SET_FLAGS(se, tf);
901 se->FlagsLength = htole32(se->FlagsLength);
902 }
903 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
904 req->dmap->dm_mapsize,
905 (xs->xs_control & XS_CTL_DATA_IN) ?
906 BUS_DMASYNC_PREREAD
907 : BUS_DMASYNC_PREWRITE);
908 }
909 } else {
910 /*
911 * No data to transfer; just make a single simple SGL
912 * with zero length.
913 */
914 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
915 memset(se, 0, sizeof(*se));
916 MPI_pSGE_SET_FLAGS(se,
917 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
918 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
919 se->FlagsLength = htole32(se->FlagsLength);
920 }
921
922 if (mpt->verbose > 1)
923 mpt_print_scsi_io_request(mpt_req);
924
925 s = splbio();
926 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
927 callout_reset(&xs->xs_callout,
928 mstohz(xs->timeout), mpt_timeout, req);
929 mpt_send_cmd(mpt, req);
930 splx(s);
931
932 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
933 return;
934
935 /*
936 * If we can't use interrupts, poll on completion.
937 */
938 if (mpt_poll(mpt, xs, xs->timeout))
939 mpt_timeout(req);
940 }
941
942 static void
943 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
944 {
945 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
946
947 /*
948 * Always allow disconnect; we don't have a way to disable
949 * it right now, in any case.
950 */
951 mpt->mpt_disc_enable |= (1 << xm->xm_target);
952
953 if (xm->xm_mode & PERIPH_CAP_TQING)
954 mpt->mpt_tag_enable |= (1 << xm->xm_target);
955 else
956 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
957
958 if (mpt->is_scsi) {
959 /*
960 * SCSI transport settings only make any sense for
961 * SCSI
962 */
963
964 tmp = mpt->mpt_dev_page1[xm->xm_target];
965
966 /*
967 * Set the wide/narrow parameter for the target.
968 */
969 if (xm->xm_mode & PERIPH_CAP_WIDE16)
970 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
971 else
972 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
973
974 /*
975 * Set the synchronous parameters for the target.
976 *
977 * XXX If we request sync transfers, we just go ahead and
978 * XXX request the maximum available. We need finer control
979 * XXX in order to implement Domain Validation.
980 */
981 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
982 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
983 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
984 MPI_SCSIDEVPAGE1_RP_IU);
985 if (xm->xm_mode & PERIPH_CAP_SYNC) {
986 int factor, offset, np;
987
988 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
989 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
990 np = 0;
991 if (factor < 0x9) {
992 /* Ultra320 */
993 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
994 }
995 if (factor < 0xa) {
996 /* at least Ultra160 */
997 np |= MPI_SCSIDEVPAGE1_RP_DT;
998 }
999 np |= (factor << 8) | (offset << 16);
1000 tmp.RequestedParameters |= np;
1001 }
1002
1003 host2mpt_config_page_scsi_device_1(&tmp);
1004 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1005 mpt_prt(mpt, "unable to write Device Page 1");
1006 return;
1007 }
1008
1009 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1010 mpt_prt(mpt, "unable to read back Device Page 1");
1011 return;
1012 }
1013
1014 mpt2host_config_page_scsi_device_1(&tmp);
1015 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1016 if (mpt->verbose > 1) {
1017 mpt_prt(mpt,
1018 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1019 xm->xm_target,
1020 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1021 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1022 }
1023 }
1024
1025 /*
1026 * Make a note that we should perform an async callback at the
1027 * end of the next successful command completion to report the
1028 * negotiated transfer mode.
1029 */
1030 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1031 }
1032
1033 static void
1034 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1035 {
1036 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1037 struct scsipi_xfer_mode xm;
1038 int period, offset;
1039
1040 tmp = mpt->mpt_dev_page0[periph->periph_target];
1041 host2mpt_config_page_scsi_device_0(&tmp);
1042 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1043 mpt_prt(mpt, "unable to read Device Page 0");
1044 return;
1045 }
1046 mpt2host_config_page_scsi_device_0(&tmp);
1047
1048 if (mpt->verbose > 1) {
1049 mpt_prt(mpt,
1050 "SPI Tgt %d Page 0: NParms %x Information %x",
1051 periph->periph_target,
1052 tmp.NegotiatedParameters, tmp.Information);
1053 }
1054
1055 xm.xm_target = periph->periph_target;
1056 xm.xm_mode = 0;
1057
1058 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1059 xm.xm_mode |= PERIPH_CAP_WIDE16;
1060
1061 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1062 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1063 if (offset) {
1064 xm.xm_period = period;
1065 xm.xm_offset = offset;
1066 xm.xm_mode |= PERIPH_CAP_SYNC;
1067 }
1068
1069 /*
1070 * Tagged queueing is all controlled by us; there is no
1071 * other setting to query.
1072 */
1073 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1074 xm.xm_mode |= PERIPH_CAP_TQING;
1075
1076 /*
1077 * We're going to deliver the async event, so clear the marker.
1078 */
1079 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1080
1081 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1082 }
1083
1084 static void
1085 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1086 {
1087 MSG_DEFAULT_REPLY *dmsg = vmsg;
1088
1089 switch (dmsg->Function) {
1090 case MPI_FUNCTION_EVENT_NOTIFICATION:
1091 mpt_event_notify_reply(mpt, vmsg);
1092 mpt_free_reply(mpt, (reply << 1));
1093 break;
1094
1095 case MPI_FUNCTION_EVENT_ACK:
1096 mpt_free_reply(mpt, (reply << 1));
1097 break;
1098
1099 case MPI_FUNCTION_PORT_ENABLE:
1100 {
1101 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1102 int index = le32toh(msg->MsgContext) & ~0x80000000;
1103 if (mpt->verbose > 1)
1104 mpt_prt(mpt, "enable port reply index %d", index);
1105 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1106 request_t *req = &mpt->request_pool[index];
1107 req->debug = REQ_DONE;
1108 }
1109 mpt_free_reply(mpt, (reply << 1));
1110 break;
1111 }
1112
1113 case MPI_FUNCTION_CONFIG:
1114 {
1115 MSG_CONFIG_REPLY *msg = vmsg;
1116 int index = le32toh(msg->MsgContext) & ~0x80000000;
1117 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1118 request_t *req = &mpt->request_pool[index];
1119 req->debug = REQ_DONE;
1120 req->sequence = reply;
1121 } else
1122 mpt_free_reply(mpt, (reply << 1));
1123 break;
1124 }
1125
1126 default:
1127 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1128 }
1129 }
1130
1131 static void
1132 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1133 {
1134
1135 switch (le32toh(msg->Event)) {
1136 case MPI_EVENT_LOG_DATA:
1137 {
1138 int i;
1139
1140 /* Some error occurrerd that the Fusion wants logged. */
1141 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1142 mpt_prt(mpt, "EvtLogData: Event Data:");
1143 for (i = 0; i < msg->EventDataLength; i++) {
1144 if ((i % 4) == 0)
1145 printf("%s:\t", device_xname(mpt->sc_dev));
1146 printf("0x%08x%c", msg->Data[i],
1147 ((i % 4) == 3) ? '\n' : ' ');
1148 }
1149 if ((i % 4) != 0)
1150 printf("\n");
1151 break;
1152 }
1153
1154 case MPI_EVENT_UNIT_ATTENTION:
1155 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1156 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1157 break;
1158
1159 case MPI_EVENT_IOC_BUS_RESET:
1160 /* We generated a bus reset. */
1161 mpt_prt(mpt, "IOC Bus Reset Port %d",
1162 (msg->Data[0] >> 8) & 0xff);
1163 break;
1164
1165 case MPI_EVENT_EXT_BUS_RESET:
1166 /* Someone else generated a bus reset. */
1167 mpt_prt(mpt, "External Bus Reset");
1168 /*
1169 * These replies don't return EventData like the MPI
1170 * spec says they do.
1171 */
1172 /* XXX Send an async event? */
1173 break;
1174
1175 case MPI_EVENT_RESCAN:
1176 /*
1177 * In general, thise means a device has been added
1178 * to the loop.
1179 */
1180 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1181 /* XXX Send an async event? */
1182 break;
1183
1184 case MPI_EVENT_LINK_STATUS_CHANGE:
1185 mpt_prt(mpt, "Port %d: Link state %s",
1186 (msg->Data[1] >> 8) & 0xff,
1187 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1188 break;
1189
1190 case MPI_EVENT_LOOP_STATE_CHANGE:
1191 switch ((msg->Data[0] >> 16) & 0xff) {
1192 case 0x01:
1193 mpt_prt(mpt,
1194 "Port %d: FC Link Event: LIP(%02x,%02x) "
1195 "(Loop Initialization)",
1196 (msg->Data[1] >> 8) & 0xff,
1197 (msg->Data[0] >> 8) & 0xff,
1198 (msg->Data[0] ) & 0xff);
1199 switch ((msg->Data[0] >> 8) & 0xff) {
1200 case 0xf7:
1201 if ((msg->Data[0] & 0xff) == 0xf7)
1202 mpt_prt(mpt, "\tDevice needs AL_PA");
1203 else
1204 mpt_prt(mpt, "\tDevice %02x doesn't "
1205 "like FC performance",
1206 msg->Data[0] & 0xff);
1207 break;
1208
1209 case 0xf8:
1210 if ((msg->Data[0] & 0xff) == 0xf7)
1211 mpt_prt(mpt, "\tDevice detected loop "
1212 "failure before acquiring AL_PA");
1213 else
1214 mpt_prt(mpt, "\tDevice %02x detected "
1215 "loop failure",
1216 msg->Data[0] & 0xff);
1217 break;
1218
1219 default:
1220 mpt_prt(mpt, "\tDevice %02x requests that "
1221 "device %02x reset itself",
1222 msg->Data[0] & 0xff,
1223 (msg->Data[0] >> 8) & 0xff);
1224 break;
1225 }
1226 break;
1227
1228 case 0x02:
1229 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1230 "(Loop Port Enable)",
1231 (msg->Data[1] >> 8) & 0xff,
1232 (msg->Data[0] >> 8) & 0xff,
1233 (msg->Data[0] ) & 0xff);
1234 break;
1235
1236 case 0x03:
1237 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1238 "(Loop Port Bypass)",
1239 (msg->Data[1] >> 8) & 0xff,
1240 (msg->Data[0] >> 8) & 0xff,
1241 (msg->Data[0] ) & 0xff);
1242 break;
1243
1244 default:
1245 mpt_prt(mpt, "Port %d: FC Link Event: "
1246 "Unknown event (%02x %02x %02x)",
1247 (msg->Data[1] >> 8) & 0xff,
1248 (msg->Data[0] >> 16) & 0xff,
1249 (msg->Data[0] >> 8) & 0xff,
1250 (msg->Data[0] ) & 0xff);
1251 break;
1252 }
1253 break;
1254
1255 case MPI_EVENT_LOGOUT:
1256 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1257 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1258 break;
1259
1260 case MPI_EVENT_EVENT_CHANGE:
1261 /*
1262 * This is just an acknowledgement of our
1263 * mpt_send_event_request().
1264 */
1265 break;
1266
1267 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1268 switch ((msg->Data[0] >> 12) & 0x0f) {
1269 case 0x00:
1270 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1271 msg->Data[0] & 0xff);
1272 break;
1273 case 0x01:
1274 mpt_prt(mpt, "Phy %d: Link Disabled",
1275 msg->Data[0] & 0xff);
1276 break;
1277 case 0x02:
1278 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1279 msg->Data[0] & 0xff);
1280 break;
1281 case 0x03:
1282 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1283 msg->Data[0] & 0xff);
1284 break;
1285 case 0x08:
1286 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1287 msg->Data[0] & 0xff);
1288 break;
1289 case 0x09:
1290 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1291 msg->Data[0] & 0xff);
1292 break;
1293 default:
1294 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1295 "Unknown event (%0x)",
1296 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1297 }
1298 break;
1299
1300 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1301 case MPI_EVENT_SAS_DISCOVERY:
1302 /* ignore these events for now */
1303 break;
1304
1305 case MPI_EVENT_QUEUE_FULL:
1306 /* This can get a little chatty */
1307 if (mpt->verbose > 0)
1308 mpt_prt(mpt, "Queue Full Event");
1309 break;
1310
1311 default:
1312 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1313 break;
1314 }
1315
1316 if (msg->AckRequired) {
1317 MSG_EVENT_ACK *ackp;
1318 request_t *req;
1319
1320 if ((req = mpt_get_request(mpt)) == NULL) {
1321 /* XXX XXX XXX XXXJRT */
1322 panic("mpt_event_notify_reply: unable to allocate "
1323 "request structure");
1324 }
1325
1326 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1327 memset(ackp, 0, sizeof(*ackp));
1328 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1329 ackp->Event = msg->Event;
1330 ackp->EventContext = msg->EventContext;
1331 ackp->MsgContext = htole32(req->index | 0x80000000);
1332 mpt_check_doorbell(mpt);
1333 mpt_send_cmd(mpt, req);
1334 }
1335 }
1336
1337 /* XXXJRT mpt_bus_reset() */
1338
1339 /*****************************************************************************
1340 * SCSI interface routines
1341 *****************************************************************************/
1342
1343 static void
1344 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1345 void *arg)
1346 {
1347 struct scsipi_adapter *adapt = chan->chan_adapter;
1348 mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1349
1350 switch (req) {
1351 case ADAPTER_REQ_RUN_XFER:
1352 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1353 return;
1354
1355 case ADAPTER_REQ_GROW_RESOURCES:
1356 /* Not supported. */
1357 return;
1358
1359 case ADAPTER_REQ_SET_XFER_MODE:
1360 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1361 return;
1362 }
1363 }
1364
1365 static void
1366 mpt_minphys(struct buf *bp)
1367 {
1368
1369 /*
1370 * Subtract one from the SGL limit, since we need an extra one to handle
1371 * an non-page-aligned transfer.
1372 */
1373 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1374
1375 if (bp->b_bcount > MPT_MAX_XFER)
1376 bp->b_bcount = MPT_MAX_XFER;
1377 minphys(bp);
1378 }
1379