mpt_netbsd.c revision 1.18.2.2 1 /* $NetBSD: mpt_netbsd.c,v 1.18.2.2 2012/11/20 03:02:06 tls Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.18.2.2 2012/11/20 03:02:06 tls Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83
84 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
85 static void mpt_timeout(void *);
86 static void mpt_done(mpt_softc_t *, uint32_t);
87 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
88 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
89 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
90 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
91 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
92
93 static void mpt_scsipi_request(struct scsipi_channel *,
94 scsipi_adapter_req_t, void *);
95 static void mpt_minphys(struct buf *);
96
97 /*
98 * XXX - this assumes the device_private() of the attachement starts with
99 * a struct mpt_softc, so we can use the return value of device_private()
100 * straight without any offset.
101 */
102 #define DEV_TO_MPT(DEV) device_private(DEV)
103
104 void
105 mpt_scsipi_attach(mpt_softc_t *mpt)
106 {
107 struct scsipi_adapter *adapt = &mpt->sc_adapter;
108 struct scsipi_channel *chan = &mpt->sc_channel;
109 int maxq;
110
111 mpt->bus = 0; /* XXX ?? */
112
113 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
114 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
115
116 /* Fill in the scsipi_adapter. */
117 memset(adapt, 0, sizeof(*adapt));
118 adapt->adapt_dev = mpt->sc_dev;
119 adapt->adapt_nchannels = 1;
120 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
121 adapt->adapt_max_periph = maxq - 2;
122 adapt->adapt_request = mpt_scsipi_request;
123 adapt->adapt_minphys = mpt_minphys;
124
125 /* Fill in the scsipi_channel. */
126 memset(chan, 0, sizeof(*chan));
127 chan->chan_adapter = adapt;
128 if (mpt->is_sas) {
129 chan->chan_bustype = &scsi_sas_bustype;
130 } else if (mpt->is_fc) {
131 chan->chan_bustype = &scsi_fc_bustype;
132 } else {
133 chan->chan_bustype = &scsi_bustype;
134 }
135 chan->chan_channel = 0;
136 chan->chan_flags = 0;
137 chan->chan_nluns = 8;
138 chan->chan_ntargets = mpt->mpt_max_devices;
139 chan->chan_id = mpt->mpt_ini_id;
140
141 (void) config_found(mpt->sc_dev, &mpt->sc_channel, scsiprint);
142 }
143
144 int
145 mpt_dma_mem_alloc(mpt_softc_t *mpt)
146 {
147 bus_dma_segment_t reply_seg, request_seg;
148 int reply_rseg, request_rseg;
149 bus_addr_t pptr, end;
150 char *vptr;
151 size_t len;
152 int error, i;
153
154 /* Check if we have already allocated the reply memory. */
155 if (mpt->reply != NULL)
156 return (0);
157
158 /*
159 * Allocate the request pool. This isn't really DMA'd memory,
160 * but it's a convenient place to do it.
161 */
162 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
163 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
164 if (mpt->request_pool == NULL) {
165 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
166 return (ENOMEM);
167 }
168
169 /*
170 * Allocate DMA resources for reply buffers.
171 */
172 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
173 &reply_seg, 1, &reply_rseg, 0);
174 if (error) {
175 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
176 error);
177 goto fail_0;
178 }
179
180 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
181 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
182 if (error) {
183 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
184 error);
185 goto fail_1;
186 }
187
188 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
189 0, 0, &mpt->reply_dmap);
190 if (error) {
191 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
192 error);
193 goto fail_2;
194 }
195
196 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
197 PAGE_SIZE, NULL, 0);
198 if (error) {
199 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
200 error);
201 goto fail_3;
202 }
203 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
204
205 /*
206 * Allocate DMA resources for request buffers.
207 */
208 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
209 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
210 if (error) {
211 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
212 "error = %d\n", error);
213 goto fail_4;
214 }
215
216 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
217 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
218 if (error) {
219 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
220 error);
221 goto fail_5;
222 }
223
224 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
225 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
226 if (error) {
227 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
228 "error = %d\n", error);
229 goto fail_6;
230 }
231
232 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
233 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
234 if (error) {
235 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
236 error);
237 goto fail_7;
238 }
239 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
240
241 pptr = mpt->request_phys;
242 vptr = (void *) mpt->request;
243 end = pptr + MPT_REQ_MEM_SIZE(mpt);
244
245 for (i = 0; pptr < end; i++) {
246 request_t *req = &mpt->request_pool[i];
247 req->index = i;
248
249 /* Store location of Request Data */
250 req->req_pbuf = pptr;
251 req->req_vbuf = vptr;
252
253 pptr += MPT_REQUEST_AREA;
254 vptr += MPT_REQUEST_AREA;
255
256 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
257 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
258
259 error = bus_dmamap_create(mpt->sc_dmat,
260 MPT_SGL_MAX * PAGE_SIZE ,
261 MPT_SGL_MAX,
262 MPT_SGL_MAX * PAGE_SIZE,
263 0, 0, &req->dmap);
264 if (error) {
265 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
266 "error = %d\n", i, error);
267 goto fail_8;
268 }
269 }
270
271 return (0);
272
273 fail_8:
274 for (--i; i >= 0; i--) {
275 request_t *req = &mpt->request_pool[i];
276 if (req->dmap != NULL)
277 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
278 }
279 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
280 fail_7:
281 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
282 fail_6:
283 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
284 fail_5:
285 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
286 fail_4:
287 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
288 fail_3:
289 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
290 fail_2:
291 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
292 fail_1:
293 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
294 fail_0:
295 free(mpt->request_pool, M_DEVBUF);
296
297 mpt->reply = NULL;
298 mpt->request = NULL;
299 mpt->request_pool = NULL;
300
301 return (error);
302 }
303
304 int
305 mpt_intr(void *arg)
306 {
307 mpt_softc_t *mpt = arg;
308 int nrepl = 0;
309 uint32_t reply;
310
311 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
312 return (0);
313
314 reply = mpt_pop_reply_queue(mpt);
315 while (reply != MPT_REPLY_EMPTY) {
316 nrepl++;
317 if (mpt->verbose > 1) {
318 if ((reply & MPT_CONTEXT_REPLY) != 0) {
319 /* Address reply; IOC has something to say */
320 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
321 } else {
322 /* Context reply; all went well */
323 mpt_prt(mpt, "context %u reply OK", reply);
324 }
325 }
326 mpt_done(mpt, reply);
327 reply = mpt_pop_reply_queue(mpt);
328 }
329 return (nrepl != 0);
330 }
331
332 void
333 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
334 {
335 va_list ap;
336
337 printf("%s: ", device_xname(mpt->sc_dev));
338 va_start(ap, fmt);
339 vprintf(fmt, ap);
340 va_end(ap);
341 printf("\n");
342 }
343
344 static int
345 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
346 {
347
348 /* Timeouts are in msec, so we loop in 1000usec cycles */
349 while (count) {
350 mpt_intr(mpt);
351 if (xs->xs_status & XS_STS_DONE)
352 return (0);
353 delay(1000); /* only happens in boot, so ok */
354 count--;
355 }
356 return (1);
357 }
358
359 static void
360 mpt_timeout(void *arg)
361 {
362 request_t *req = arg;
363 struct scsipi_xfer *xs = req->xfer;
364 struct scsipi_periph *periph = xs->xs_periph;
365 mpt_softc_t *mpt = DEV_TO_MPT(
366 periph->periph_channel->chan_adapter->adapt_dev);
367 uint32_t oseq;
368 int s;
369
370 scsipi_printaddr(periph);
371 printf("command timeout\n");
372
373 s = splbio();
374
375 oseq = req->sequence;
376 mpt->timeouts++;
377 if (mpt_intr(mpt)) {
378 if (req->sequence != oseq) {
379 mpt_prt(mpt, "recovered from command timeout");
380 splx(s);
381 return;
382 }
383 }
384 mpt_prt(mpt,
385 "timeout on request index = 0x%x, seq = 0x%08x",
386 req->index, req->sequence);
387 mpt_check_doorbell(mpt);
388 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
389 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
390 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
391 mpt_read(mpt, MPT_OFFSET_DOORBELL));
392 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
393 if (mpt->verbose > 1)
394 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
395
396 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
397 req->xfer = NULL;
398 mpt_free_request(mpt, req);
399
400 xs->error = XS_TIMEOUT;
401 scsipi_done(xs);
402
403 splx(s);
404 }
405
406 static void
407 mpt_done(mpt_softc_t *mpt, uint32_t reply)
408 {
409 struct scsipi_xfer *xs = NULL;
410 struct scsipi_periph *periph;
411 int index;
412 request_t *req;
413 MSG_REQUEST_HEADER *mpt_req;
414 MSG_SCSI_IO_REPLY *mpt_reply;
415
416 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
417 /* context reply (ok) */
418 mpt_reply = NULL;
419 index = reply & MPT_CONTEXT_MASK;
420 } else {
421 /* address reply (error) */
422
423 /* XXX BUS_DMASYNC_POSTREAD XXX */
424 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
425 if (mpt->verbose > 1) {
426 uint32_t *pReply = (uint32_t *) mpt_reply;
427
428 mpt_prt(mpt, "Address Reply (index %u):",
429 le32toh(mpt_reply->MsgContext) & 0xffff);
430 mpt_prt(mpt, "%08x %08x %08x %08x",
431 pReply[0], pReply[1], pReply[2], pReply[3]);
432 mpt_prt(mpt, "%08x %08x %08x %08x",
433 pReply[4], pReply[5], pReply[6], pReply[7]);
434 mpt_prt(mpt, "%08x %08x %08x %08x",
435 pReply[8], pReply[9], pReply[10], pReply[11]);
436 }
437 index = le32toh(mpt_reply->MsgContext);
438 }
439
440 /*
441 * Address reply with MessageContext high bit set.
442 * This is most likely a notify message, so we try
443 * to process it, then free it.
444 */
445 if (__predict_false((index & 0x80000000) != 0)) {
446 if (mpt_reply != NULL)
447 mpt_ctlop(mpt, mpt_reply, reply);
448 else
449 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
450 return;
451 }
452
453 /* Did we end up with a valid index into the table? */
454 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
455 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
456 return;
457 }
458
459 req = &mpt->request_pool[index];
460
461 /* Make sure memory hasn't been trashed. */
462 if (__predict_false(req->index != index)) {
463 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
464 return;
465 }
466
467 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
468 mpt_req = req->req_vbuf;
469
470 /* Short cut for task management replies; nothing more for us to do. */
471 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
472 if (mpt->verbose > 1)
473 mpt_prt(mpt, "mpt_done: TASK MGMT");
474 goto done;
475 }
476
477 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
478 goto done;
479
480 /*
481 * At this point, it had better be a SCSI I/O command, but don't
482 * crash if it isn't.
483 */
484 if (__predict_false(mpt_req->Function !=
485 MPI_FUNCTION_SCSI_IO_REQUEST)) {
486 if (mpt->verbose > 1)
487 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
488 mpt_req->Function, index);
489 goto done;
490 }
491
492 /* Recover scsipi_xfer from the request structure. */
493 xs = req->xfer;
494
495 /* Can't have a SCSI command without a scsipi_xfer. */
496 if (__predict_false(xs == NULL)) {
497 mpt_prt(mpt,
498 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
499 req->index, req->sequence);
500 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
501 mpt_prt(mpt, "mpt_request:");
502 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
503
504 if (mpt_reply != NULL) {
505 mpt_prt(mpt, "mpt_reply:");
506 mpt_print_reply(mpt_reply);
507 } else {
508 mpt_prt(mpt, "context reply: 0x%08x", reply);
509 }
510 goto done;
511 }
512
513 callout_stop(&xs->xs_callout);
514
515 periph = xs->xs_periph;
516
517 /*
518 * If we were a data transfer, unload the map that described
519 * the data buffer.
520 */
521 if (__predict_true(xs->datalen != 0)) {
522 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
523 req->dmap->dm_mapsize,
524 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
525 : BUS_DMASYNC_POSTWRITE);
526 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
527 }
528
529 if (__predict_true(mpt_reply == NULL)) {
530 /*
531 * Context reply; report that the command was
532 * successful!
533 *
534 * Also report the xfer mode, if necessary.
535 */
536 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
537 if ((mpt->mpt_report_xfer_mode &
538 (1 << periph->periph_target)) != 0)
539 mpt_get_xfer_mode(mpt, periph);
540 }
541 xs->error = XS_NOERROR;
542 xs->status = SCSI_OK;
543 xs->resid = 0;
544 mpt_free_request(mpt, req);
545 scsipi_done(xs);
546 return;
547 }
548
549 xs->status = mpt_reply->SCSIStatus;
550 switch (le16toh(mpt_reply->IOCStatus)) {
551 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
552 xs->error = XS_DRIVER_STUFFUP;
553 break;
554
555 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
556 /*
557 * Yikes! Tagged queue full comes through this path!
558 *
559 * So we'll change it to a status error and anything
560 * that returns status should probably be a status
561 * error as well.
562 */
563 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
564 if (mpt_reply->SCSIState &
565 MPI_SCSI_STATE_NO_SCSI_STATUS) {
566 xs->error = XS_DRIVER_STUFFUP;
567 break;
568 }
569 /* FALLTHROUGH */
570 case MPI_IOCSTATUS_SUCCESS:
571 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
572 switch (xs->status) {
573 case SCSI_OK:
574 /* Report the xfer mode, if necessary. */
575 if ((mpt->mpt_report_xfer_mode &
576 (1 << periph->periph_target)) != 0)
577 mpt_get_xfer_mode(mpt, periph);
578 xs->resid = 0;
579 break;
580
581 case SCSI_CHECK:
582 xs->error = XS_SENSE;
583 break;
584
585 case SCSI_BUSY:
586 case SCSI_QUEUE_FULL:
587 xs->error = XS_BUSY;
588 break;
589
590 default:
591 scsipi_printaddr(periph);
592 printf("invalid status code %d\n", xs->status);
593 xs->error = XS_DRIVER_STUFFUP;
594 break;
595 }
596 break;
597
598 case MPI_IOCSTATUS_BUSY:
599 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
600 xs->error = XS_RESOURCE_SHORTAGE;
601 break;
602
603 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
604 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
605 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
606 xs->error = XS_SELTIMEOUT;
607 break;
608
609 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
610 xs->error = XS_DRIVER_STUFFUP;
611 break;
612
613 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
614 /* XXX What should we do here? */
615 break;
616
617 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
618 /* XXX */
619 xs->error = XS_DRIVER_STUFFUP;
620 break;
621
622 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
623 /* XXX */
624 xs->error = XS_DRIVER_STUFFUP;
625 break;
626
627 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
628 /* XXX This is a bus-reset */
629 xs->error = XS_DRIVER_STUFFUP;
630 break;
631
632 default:
633 /* XXX unrecognized HBA error */
634 xs->error = XS_DRIVER_STUFFUP;
635 break;
636 }
637
638 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
639 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
640 sizeof(xs->sense.scsi_sense));
641 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
642 /*
643 * This will cause the scsipi layer to issue
644 * a REQUEST SENSE.
645 */
646 if (xs->status == SCSI_CHECK)
647 xs->error = XS_BUSY;
648 }
649
650 done:
651 /* If IOC done with this requeset, free it up. */
652 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
653 mpt_free_request(mpt, req);
654
655 /* If address reply, give the buffer back to the IOC. */
656 if (mpt_reply != NULL)
657 mpt_free_reply(mpt, (reply << 1));
658
659 if (xs != NULL)
660 scsipi_done(xs);
661 }
662
663 static void
664 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
665 {
666 struct scsipi_periph *periph = xs->xs_periph;
667 request_t *req;
668 MSG_SCSI_IO_REQUEST *mpt_req;
669 int error, s;
670
671 s = splbio();
672 req = mpt_get_request(mpt);
673 if (__predict_false(req == NULL)) {
674 /* This should happen very infrequently. */
675 xs->error = XS_RESOURCE_SHORTAGE;
676 scsipi_done(xs);
677 splx(s);
678 return;
679 }
680 splx(s);
681
682 /* Link the req and the scsipi_xfer. */
683 req->xfer = xs;
684
685 /* Now we build the command for the IOC */
686 mpt_req = req->req_vbuf;
687 memset(mpt_req, 0, sizeof(*mpt_req));
688
689 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
690 mpt_req->Bus = mpt->bus;
691
692 mpt_req->SenseBufferLength =
693 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
694 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
695
696 /*
697 * We use the message context to find the request structure when
698 * we get the command completion interrupt from the IOC.
699 */
700 mpt_req->MsgContext = htole32(req->index);
701
702 /* Which physical device to do the I/O on. */
703 mpt_req->TargetID = periph->periph_target;
704 mpt_req->LUN[1] = periph->periph_lun;
705
706 /* Set the direction of the transfer. */
707 if (xs->xs_control & XS_CTL_DATA_IN)
708 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
709 else if (xs->xs_control & XS_CTL_DATA_OUT)
710 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
711 else
712 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
713
714 /* Set the queue behavior. */
715 if (__predict_true((!mpt->is_scsi) ||
716 (mpt->mpt_tag_enable &
717 (1 << periph->periph_target)))) {
718 switch (XS_CTL_TAGTYPE(xs)) {
719 case XS_CTL_HEAD_TAG:
720 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
721 break;
722
723 #if 0 /* XXX */
724 case XS_CTL_ACA_TAG:
725 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
726 break;
727 #endif
728
729 case XS_CTL_ORDERED_TAG:
730 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
731 break;
732
733 case XS_CTL_SIMPLE_TAG:
734 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
735 break;
736
737 default:
738 if (mpt->is_scsi)
739 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
740 else
741 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
742 break;
743 }
744 } else
745 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
746
747 if (__predict_false(mpt->is_scsi &&
748 (mpt->mpt_disc_enable &
749 (1 << periph->periph_target)) == 0))
750 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
751
752 mpt_req->Control = htole32(mpt_req->Control);
753
754 /* Copy the SCSI command block into place. */
755 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
756
757 mpt_req->CDBLength = xs->cmdlen;
758 mpt_req->DataLength = htole32(xs->datalen);
759 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
760
761 /*
762 * Map the DMA transfer.
763 */
764 if (xs->datalen) {
765 SGE_SIMPLE32 *se;
766
767 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
768 xs->datalen, NULL,
769 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
770 : BUS_DMA_WAITOK) |
771 BUS_DMA_STREAMING |
772 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
773 : BUS_DMA_WRITE));
774 switch (error) {
775 case 0:
776 break;
777
778 case ENOMEM:
779 case EAGAIN:
780 xs->error = XS_RESOURCE_SHORTAGE;
781 goto out_bad;
782
783 default:
784 xs->error = XS_DRIVER_STUFFUP;
785 mpt_prt(mpt, "error %d loading DMA map", error);
786 out_bad:
787 s = splbio();
788 mpt_free_request(mpt, req);
789 scsipi_done(xs);
790 splx(s);
791 return;
792 }
793
794 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
795 int seg, i, nleft = req->dmap->dm_nsegs;
796 uint32_t flags;
797 SGE_CHAIN32 *ce;
798
799 seg = 0;
800 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
801 if (xs->xs_control & XS_CTL_DATA_OUT)
802 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
803
804 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
805 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
806 i++, se++, seg++) {
807 uint32_t tf;
808
809 memset(se, 0, sizeof(*se));
810 se->Address =
811 htole32(req->dmap->dm_segs[seg].ds_addr);
812 MPI_pSGE_SET_LENGTH(se,
813 req->dmap->dm_segs[seg].ds_len);
814 tf = flags;
815 if (i == MPT_NSGL_FIRST(mpt) - 2)
816 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
817 MPI_pSGE_SET_FLAGS(se, tf);
818 se->FlagsLength = htole32(se->FlagsLength);
819 nleft--;
820 }
821
822 /*
823 * Tell the IOC where to find the first chain element.
824 */
825 mpt_req->ChainOffset =
826 ((char *)se - (char *)mpt_req) >> 2;
827
828 /*
829 * Until we're finished with all segments...
830 */
831 while (nleft) {
832 int ntodo;
833
834 /*
835 * Construct the chain element that points to
836 * the next segment.
837 */
838 ce = (SGE_CHAIN32 *) se++;
839 if (nleft > MPT_NSGL(mpt)) {
840 ntodo = MPT_NSGL(mpt) - 1;
841 ce->NextChainOffset = (MPT_RQSL(mpt) -
842 sizeof(SGE_SIMPLE32)) >> 2;
843 ce->Length = htole16(MPT_NSGL(mpt)
844 * sizeof(SGE_SIMPLE32));
845 } else {
846 ntodo = nleft;
847 ce->NextChainOffset = 0;
848 ce->Length = htole16(ntodo
849 * sizeof(SGE_SIMPLE32));
850 }
851 ce->Address = htole32(req->req_pbuf +
852 ((char *)se - (char *)mpt_req));
853 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
854 for (i = 0; i < ntodo; i++, se++, seg++) {
855 uint32_t tf;
856
857 memset(se, 0, sizeof(*se));
858 se->Address = htole32(
859 req->dmap->dm_segs[seg].ds_addr);
860 MPI_pSGE_SET_LENGTH(se,
861 req->dmap->dm_segs[seg].ds_len);
862 tf = flags;
863 if (i == ntodo - 1) {
864 tf |=
865 MPI_SGE_FLAGS_LAST_ELEMENT;
866 if (ce->NextChainOffset == 0) {
867 tf |=
868 MPI_SGE_FLAGS_END_OF_LIST |
869 MPI_SGE_FLAGS_END_OF_BUFFER;
870 }
871 }
872 MPI_pSGE_SET_FLAGS(se, tf);
873 se->FlagsLength =
874 htole32(se->FlagsLength);
875 nleft--;
876 }
877 }
878 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
879 req->dmap->dm_mapsize,
880 (xs->xs_control & XS_CTL_DATA_IN) ?
881 BUS_DMASYNC_PREREAD
882 : BUS_DMASYNC_PREWRITE);
883 } else {
884 int i;
885 uint32_t flags;
886
887 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
888 if (xs->xs_control & XS_CTL_DATA_OUT)
889 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
890
891 /* Copy the segments into our SG list. */
892 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
893 for (i = 0; i < req->dmap->dm_nsegs;
894 i++, se++) {
895 uint32_t tf;
896
897 memset(se, 0, sizeof(*se));
898 se->Address =
899 htole32(req->dmap->dm_segs[i].ds_addr);
900 MPI_pSGE_SET_LENGTH(se,
901 req->dmap->dm_segs[i].ds_len);
902 tf = flags;
903 if (i == req->dmap->dm_nsegs - 1) {
904 tf |=
905 MPI_SGE_FLAGS_LAST_ELEMENT |
906 MPI_SGE_FLAGS_END_OF_BUFFER |
907 MPI_SGE_FLAGS_END_OF_LIST;
908 }
909 MPI_pSGE_SET_FLAGS(se, tf);
910 se->FlagsLength = htole32(se->FlagsLength);
911 }
912 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
913 req->dmap->dm_mapsize,
914 (xs->xs_control & XS_CTL_DATA_IN) ?
915 BUS_DMASYNC_PREREAD
916 : BUS_DMASYNC_PREWRITE);
917 }
918 } else {
919 /*
920 * No data to transfer; just make a single simple SGL
921 * with zero length.
922 */
923 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
924 memset(se, 0, sizeof(*se));
925 MPI_pSGE_SET_FLAGS(se,
926 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
927 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
928 se->FlagsLength = htole32(se->FlagsLength);
929 }
930
931 if (mpt->verbose > 1)
932 mpt_print_scsi_io_request(mpt_req);
933
934 s = splbio();
935 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
936 callout_reset(&xs->xs_callout,
937 mstohz(xs->timeout), mpt_timeout, req);
938 mpt_send_cmd(mpt, req);
939 splx(s);
940
941 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
942 return;
943
944 /*
945 * If we can't use interrupts, poll on completion.
946 */
947 if (mpt_poll(mpt, xs, xs->timeout))
948 mpt_timeout(req);
949 }
950
951 static void
952 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
953 {
954 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
955
956 /*
957 * Always allow disconnect; we don't have a way to disable
958 * it right now, in any case.
959 */
960 mpt->mpt_disc_enable |= (1 << xm->xm_target);
961
962 if (xm->xm_mode & PERIPH_CAP_TQING)
963 mpt->mpt_tag_enable |= (1 << xm->xm_target);
964 else
965 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
966
967 if (mpt->is_scsi) {
968 /*
969 * SCSI transport settings only make any sense for
970 * SCSI
971 */
972
973 tmp = mpt->mpt_dev_page1[xm->xm_target];
974
975 /*
976 * Set the wide/narrow parameter for the target.
977 */
978 if (xm->xm_mode & PERIPH_CAP_WIDE16)
979 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
980 else
981 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
982
983 /*
984 * Set the synchronous parameters for the target.
985 *
986 * XXX If we request sync transfers, we just go ahead and
987 * XXX request the maximum available. We need finer control
988 * XXX in order to implement Domain Validation.
989 */
990 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
991 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
992 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
993 MPI_SCSIDEVPAGE1_RP_IU);
994 if (xm->xm_mode & PERIPH_CAP_SYNC) {
995 int factor, offset, np;
996
997 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
998 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
999 np = 0;
1000 if (factor < 0x9) {
1001 /* Ultra320 */
1002 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1003 }
1004 if (factor < 0xa) {
1005 /* at least Ultra160 */
1006 np |= MPI_SCSIDEVPAGE1_RP_DT;
1007 }
1008 np |= (factor << 8) | (offset << 16);
1009 tmp.RequestedParameters |= np;
1010 }
1011
1012 host2mpt_config_page_scsi_device_1(&tmp);
1013 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1014 mpt_prt(mpt, "unable to write Device Page 1");
1015 return;
1016 }
1017
1018 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1019 mpt_prt(mpt, "unable to read back Device Page 1");
1020 return;
1021 }
1022
1023 mpt2host_config_page_scsi_device_1(&tmp);
1024 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1025 if (mpt->verbose > 1) {
1026 mpt_prt(mpt,
1027 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1028 xm->xm_target,
1029 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1030 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1031 }
1032 }
1033
1034 /*
1035 * Make a note that we should perform an async callback at the
1036 * end of the next successful command completion to report the
1037 * negotiated transfer mode.
1038 */
1039 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1040 }
1041
1042 static void
1043 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1044 {
1045 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1046 struct scsipi_xfer_mode xm;
1047 int period, offset;
1048
1049 tmp = mpt->mpt_dev_page0[periph->periph_target];
1050 host2mpt_config_page_scsi_device_0(&tmp);
1051 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1052 mpt_prt(mpt, "unable to read Device Page 0");
1053 return;
1054 }
1055 mpt2host_config_page_scsi_device_0(&tmp);
1056
1057 if (mpt->verbose > 1) {
1058 mpt_prt(mpt,
1059 "SPI Tgt %d Page 0: NParms %x Information %x",
1060 periph->periph_target,
1061 tmp.NegotiatedParameters, tmp.Information);
1062 }
1063
1064 xm.xm_target = periph->periph_target;
1065 xm.xm_mode = 0;
1066
1067 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1068 xm.xm_mode |= PERIPH_CAP_WIDE16;
1069
1070 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1071 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1072 if (offset) {
1073 xm.xm_period = period;
1074 xm.xm_offset = offset;
1075 xm.xm_mode |= PERIPH_CAP_SYNC;
1076 }
1077
1078 /*
1079 * Tagged queueing is all controlled by us; there is no
1080 * other setting to query.
1081 */
1082 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1083 xm.xm_mode |= PERIPH_CAP_TQING;
1084
1085 /*
1086 * We're going to deliver the async event, so clear the marker.
1087 */
1088 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1089
1090 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1091 }
1092
1093 static void
1094 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1095 {
1096 MSG_DEFAULT_REPLY *dmsg = vmsg;
1097
1098 switch (dmsg->Function) {
1099 case MPI_FUNCTION_EVENT_NOTIFICATION:
1100 mpt_event_notify_reply(mpt, vmsg);
1101 mpt_free_reply(mpt, (reply << 1));
1102 break;
1103
1104 case MPI_FUNCTION_EVENT_ACK:
1105 mpt_free_reply(mpt, (reply << 1));
1106 break;
1107
1108 case MPI_FUNCTION_PORT_ENABLE:
1109 {
1110 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1111 int index = le32toh(msg->MsgContext) & ~0x80000000;
1112 if (mpt->verbose > 1)
1113 mpt_prt(mpt, "enable port reply index %d", index);
1114 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1115 request_t *req = &mpt->request_pool[index];
1116 req->debug = REQ_DONE;
1117 }
1118 mpt_free_reply(mpt, (reply << 1));
1119 break;
1120 }
1121
1122 case MPI_FUNCTION_CONFIG:
1123 {
1124 MSG_CONFIG_REPLY *msg = vmsg;
1125 int index = le32toh(msg->MsgContext) & ~0x80000000;
1126 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1127 request_t *req = &mpt->request_pool[index];
1128 req->debug = REQ_DONE;
1129 req->sequence = reply;
1130 } else
1131 mpt_free_reply(mpt, (reply << 1));
1132 break;
1133 }
1134
1135 default:
1136 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1137 }
1138 }
1139
1140 static void
1141 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1142 {
1143
1144 switch (le32toh(msg->Event)) {
1145 case MPI_EVENT_LOG_DATA:
1146 {
1147 int i;
1148
1149 /* Some error occurrerd that the Fusion wants logged. */
1150 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1151 mpt_prt(mpt, "EvtLogData: Event Data:");
1152 for (i = 0; i < msg->EventDataLength; i++) {
1153 if ((i % 4) == 0)
1154 printf("%s:\t", device_xname(mpt->sc_dev));
1155 printf("0x%08x%c", msg->Data[i],
1156 ((i % 4) == 3) ? '\n' : ' ');
1157 }
1158 if ((i % 4) != 0)
1159 printf("\n");
1160 break;
1161 }
1162
1163 case MPI_EVENT_UNIT_ATTENTION:
1164 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1165 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1166 break;
1167
1168 case MPI_EVENT_IOC_BUS_RESET:
1169 /* We generated a bus reset. */
1170 mpt_prt(mpt, "IOC Bus Reset Port %d",
1171 (msg->Data[0] >> 8) & 0xff);
1172 break;
1173
1174 case MPI_EVENT_EXT_BUS_RESET:
1175 /* Someone else generated a bus reset. */
1176 mpt_prt(mpt, "External Bus Reset");
1177 /*
1178 * These replies don't return EventData like the MPI
1179 * spec says they do.
1180 */
1181 /* XXX Send an async event? */
1182 break;
1183
1184 case MPI_EVENT_RESCAN:
1185 /*
1186 * In general, thise means a device has been added
1187 * to the loop.
1188 */
1189 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1190 /* XXX Send an async event? */
1191 break;
1192
1193 case MPI_EVENT_LINK_STATUS_CHANGE:
1194 mpt_prt(mpt, "Port %d: Link state %s",
1195 (msg->Data[1] >> 8) & 0xff,
1196 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1197 break;
1198
1199 case MPI_EVENT_LOOP_STATE_CHANGE:
1200 switch ((msg->Data[0] >> 16) & 0xff) {
1201 case 0x01:
1202 mpt_prt(mpt,
1203 "Port %d: FC Link Event: LIP(%02x,%02x) "
1204 "(Loop Initialization)",
1205 (msg->Data[1] >> 8) & 0xff,
1206 (msg->Data[0] >> 8) & 0xff,
1207 (msg->Data[0] ) & 0xff);
1208 switch ((msg->Data[0] >> 8) & 0xff) {
1209 case 0xf7:
1210 if ((msg->Data[0] & 0xff) == 0xf7)
1211 mpt_prt(mpt, "\tDevice needs AL_PA");
1212 else
1213 mpt_prt(mpt, "\tDevice %02x doesn't "
1214 "like FC performance",
1215 msg->Data[0] & 0xff);
1216 break;
1217
1218 case 0xf8:
1219 if ((msg->Data[0] & 0xff) == 0xf7)
1220 mpt_prt(mpt, "\tDevice detected loop "
1221 "failure before acquiring AL_PA");
1222 else
1223 mpt_prt(mpt, "\tDevice %02x detected "
1224 "loop failure",
1225 msg->Data[0] & 0xff);
1226 break;
1227
1228 default:
1229 mpt_prt(mpt, "\tDevice %02x requests that "
1230 "device %02x reset itself",
1231 msg->Data[0] & 0xff,
1232 (msg->Data[0] >> 8) & 0xff);
1233 break;
1234 }
1235 break;
1236
1237 case 0x02:
1238 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1239 "(Loop Port Enable)",
1240 (msg->Data[1] >> 8) & 0xff,
1241 (msg->Data[0] >> 8) & 0xff,
1242 (msg->Data[0] ) & 0xff);
1243 break;
1244
1245 case 0x03:
1246 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1247 "(Loop Port Bypass)",
1248 (msg->Data[1] >> 8) & 0xff,
1249 (msg->Data[0] >> 8) & 0xff,
1250 (msg->Data[0] ) & 0xff);
1251 break;
1252
1253 default:
1254 mpt_prt(mpt, "Port %d: FC Link Event: "
1255 "Unknown event (%02x %02x %02x)",
1256 (msg->Data[1] >> 8) & 0xff,
1257 (msg->Data[0] >> 16) & 0xff,
1258 (msg->Data[0] >> 8) & 0xff,
1259 (msg->Data[0] ) & 0xff);
1260 break;
1261 }
1262 break;
1263
1264 case MPI_EVENT_LOGOUT:
1265 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1266 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1267 break;
1268
1269 case MPI_EVENT_EVENT_CHANGE:
1270 /*
1271 * This is just an acknowledgement of our
1272 * mpt_send_event_request().
1273 */
1274 break;
1275
1276 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1277 switch ((msg->Data[0] >> 12) & 0x0f) {
1278 case 0x00:
1279 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1280 msg->Data[0] & 0xff);
1281 break;
1282 case 0x01:
1283 mpt_prt(mpt, "Phy %d: Link Disabled",
1284 msg->Data[0] & 0xff);
1285 break;
1286 case 0x02:
1287 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1288 msg->Data[0] & 0xff);
1289 break;
1290 case 0x03:
1291 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1292 msg->Data[0] & 0xff);
1293 break;
1294 case 0x08:
1295 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1296 msg->Data[0] & 0xff);
1297 break;
1298 case 0x09:
1299 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1300 msg->Data[0] & 0xff);
1301 break;
1302 default:
1303 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1304 "Unknown event (%0x)",
1305 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1306 }
1307 break;
1308
1309 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1310 case MPI_EVENT_SAS_DISCOVERY:
1311 /* ignore these events for now */
1312 break;
1313
1314 case MPI_EVENT_QUEUE_FULL:
1315 /* This can get a little chatty */
1316 if (mpt->verbose > 0)
1317 mpt_prt(mpt, "Queue Full Event");
1318 break;
1319
1320 default:
1321 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1322 break;
1323 }
1324
1325 if (msg->AckRequired) {
1326 MSG_EVENT_ACK *ackp;
1327 request_t *req;
1328
1329 if ((req = mpt_get_request(mpt)) == NULL) {
1330 /* XXX XXX XXX XXXJRT */
1331 panic("mpt_event_notify_reply: unable to allocate "
1332 "request structure");
1333 }
1334
1335 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1336 memset(ackp, 0, sizeof(*ackp));
1337 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1338 ackp->Event = msg->Event;
1339 ackp->EventContext = msg->EventContext;
1340 ackp->MsgContext = htole32(req->index | 0x80000000);
1341 mpt_check_doorbell(mpt);
1342 mpt_send_cmd(mpt, req);
1343 }
1344 }
1345
1346 /* XXXJRT mpt_bus_reset() */
1347
1348 /*****************************************************************************
1349 * SCSI interface routines
1350 *****************************************************************************/
1351
1352 static void
1353 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1354 void *arg)
1355 {
1356 struct scsipi_adapter *adapt = chan->chan_adapter;
1357 mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1358
1359 switch (req) {
1360 case ADAPTER_REQ_RUN_XFER:
1361 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1362 return;
1363
1364 case ADAPTER_REQ_GROW_RESOURCES:
1365 /* Not supported. */
1366 return;
1367
1368 case ADAPTER_REQ_SET_XFER_MODE:
1369 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1370 return;
1371 }
1372 }
1373
1374 static void
1375 mpt_minphys(struct buf *bp)
1376 {
1377 if (bp->b_bcount > MPT_MAX_XFER)
1378 bp->b_bcount = MPT_MAX_XFER;
1379 minphys(bp);
1380 }
1381