mpt_netbsd.c revision 1.17.2.1 1 /* $NetBSD: mpt_netbsd.c,v 1.17.2.1 2012/11/22 17:19:56 riz Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.17.2.1 2012/11/22 17:19:56 riz Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83
84 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
85 static void mpt_timeout(void *);
86 static void mpt_done(mpt_softc_t *, uint32_t);
87 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
88 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
89 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
90 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
91 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
92
93 static void mpt_scsipi_request(struct scsipi_channel *,
94 scsipi_adapter_req_t, void *);
95 static void mpt_minphys(struct buf *);
96
97 void
98 mpt_scsipi_attach(mpt_softc_t *mpt)
99 {
100 struct scsipi_adapter *adapt = &mpt->sc_adapter;
101 struct scsipi_channel *chan = &mpt->sc_channel;
102 int maxq;
103
104 mpt->bus = 0; /* XXX ?? */
105
106 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
107 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
108
109 /* Fill in the scsipi_adapter. */
110 memset(adapt, 0, sizeof(*adapt));
111 adapt->adapt_dev = &mpt->sc_dev;
112 adapt->adapt_nchannels = 1;
113 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
114 adapt->adapt_max_periph = maxq - 2;
115 adapt->adapt_request = mpt_scsipi_request;
116 adapt->adapt_minphys = mpt_minphys;
117
118 /* Fill in the scsipi_channel. */
119 memset(chan, 0, sizeof(*chan));
120 chan->chan_adapter = adapt;
121 if (mpt->is_sas) {
122 chan->chan_bustype = &scsi_sas_bustype;
123 } else if (mpt->is_fc) {
124 chan->chan_bustype = &scsi_fc_bustype;
125 } else {
126 chan->chan_bustype = &scsi_bustype;
127 }
128 chan->chan_channel = 0;
129 chan->chan_flags = 0;
130 chan->chan_nluns = 8;
131 chan->chan_ntargets = mpt->mpt_max_devices;
132 chan->chan_id = mpt->mpt_ini_id;
133
134 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
135 }
136
137 int
138 mpt_dma_mem_alloc(mpt_softc_t *mpt)
139 {
140 bus_dma_segment_t reply_seg, request_seg;
141 int reply_rseg, request_rseg;
142 bus_addr_t pptr, end;
143 char *vptr;
144 size_t len;
145 int error, i;
146
147 /* Check if we have already allocated the reply memory. */
148 if (mpt->reply != NULL)
149 return (0);
150
151 /*
152 * Allocate the request pool. This isn't really DMA'd memory,
153 * but it's a convenient place to do it.
154 */
155 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
156 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
157 if (mpt->request_pool == NULL) {
158 aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
159 return (ENOMEM);
160 }
161
162 /*
163 * Allocate DMA resources for reply buffers.
164 */
165 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
166 &reply_seg, 1, &reply_rseg, 0);
167 if (error) {
168 aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
169 error);
170 goto fail_0;
171 }
172
173 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
174 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
175 if (error) {
176 aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
177 error);
178 goto fail_1;
179 }
180
181 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
182 0, 0, &mpt->reply_dmap);
183 if (error) {
184 aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
185 error);
186 goto fail_2;
187 }
188
189 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
190 PAGE_SIZE, NULL, 0);
191 if (error) {
192 aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
193 error);
194 goto fail_3;
195 }
196 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
197
198 /*
199 * Allocate DMA resources for request buffers.
200 */
201 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
202 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
203 if (error) {
204 aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
205 "error = %d\n", error);
206 goto fail_4;
207 }
208
209 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
210 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
211 if (error) {
212 aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
213 error);
214 goto fail_5;
215 }
216
217 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
218 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
219 if (error) {
220 aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
221 "error = %d\n", error);
222 goto fail_6;
223 }
224
225 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
226 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
227 if (error) {
228 aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
229 error);
230 goto fail_7;
231 }
232 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
233
234 pptr = mpt->request_phys;
235 vptr = (void *) mpt->request;
236 end = pptr + MPT_REQ_MEM_SIZE(mpt);
237
238 for (i = 0; pptr < end; i++) {
239 request_t *req = &mpt->request_pool[i];
240 req->index = i;
241
242 /* Store location of Request Data */
243 req->req_pbuf = pptr;
244 req->req_vbuf = vptr;
245
246 pptr += MPT_REQUEST_AREA;
247 vptr += MPT_REQUEST_AREA;
248
249 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
250 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
251
252 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
253 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
254 if (error) {
255 aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
256 "error = %d\n", i, error);
257 goto fail_8;
258 }
259 }
260
261 return (0);
262
263 fail_8:
264 for (--i; i >= 0; i--) {
265 request_t *req = &mpt->request_pool[i];
266 if (req->dmap != NULL)
267 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
268 }
269 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
270 fail_7:
271 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
272 fail_6:
273 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
274 fail_5:
275 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
276 fail_4:
277 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
278 fail_3:
279 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
280 fail_2:
281 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
282 fail_1:
283 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
284 fail_0:
285 free(mpt->request_pool, M_DEVBUF);
286
287 mpt->reply = NULL;
288 mpt->request = NULL;
289 mpt->request_pool = NULL;
290
291 return (error);
292 }
293
294 int
295 mpt_intr(void *arg)
296 {
297 mpt_softc_t *mpt = arg;
298 int nrepl = 0;
299 uint32_t reply;
300
301 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
302 return (0);
303
304 reply = mpt_pop_reply_queue(mpt);
305 while (reply != MPT_REPLY_EMPTY) {
306 nrepl++;
307 if (mpt->verbose > 1) {
308 if ((reply & MPT_CONTEXT_REPLY) != 0) {
309 /* Address reply; IOC has something to say */
310 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
311 } else {
312 /* Context reply; all went well */
313 mpt_prt(mpt, "context %u reply OK", reply);
314 }
315 }
316 mpt_done(mpt, reply);
317 reply = mpt_pop_reply_queue(mpt);
318 }
319 return (nrepl != 0);
320 }
321
322 void
323 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
324 {
325 va_list ap;
326
327 printf("%s: ", device_xname(&mpt->sc_dev));
328 va_start(ap, fmt);
329 vprintf(fmt, ap);
330 va_end(ap);
331 printf("\n");
332 }
333
334 static int
335 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
336 {
337
338 /* Timeouts are in msec, so we loop in 1000usec cycles */
339 while (count) {
340 mpt_intr(mpt);
341 if (xs->xs_status & XS_STS_DONE)
342 return (0);
343 delay(1000); /* only happens in boot, so ok */
344 count--;
345 }
346 return (1);
347 }
348
349 static void
350 mpt_timeout(void *arg)
351 {
352 request_t *req = arg;
353 struct scsipi_xfer *xs = req->xfer;
354 struct scsipi_periph *periph = xs->xs_periph;
355 mpt_softc_t *mpt =
356 (void *) periph->periph_channel->chan_adapter->adapt_dev;
357 uint32_t oseq;
358 int s;
359
360 scsipi_printaddr(periph);
361 printf("command timeout\n");
362
363 s = splbio();
364
365 oseq = req->sequence;
366 mpt->timeouts++;
367 if (mpt_intr(mpt)) {
368 if (req->sequence != oseq) {
369 mpt_prt(mpt, "recovered from command timeout");
370 splx(s);
371 return;
372 }
373 }
374 mpt_prt(mpt,
375 "timeout on request index = 0x%x, seq = 0x%08x",
376 req->index, req->sequence);
377 mpt_check_doorbell(mpt);
378 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
379 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
380 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
381 mpt_read(mpt, MPT_OFFSET_DOORBELL));
382 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
383 if (mpt->verbose > 1)
384 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
385
386 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
387 req->xfer = NULL;
388 mpt_free_request(mpt, req);
389
390 xs->error = XS_TIMEOUT;
391 scsipi_done(xs);
392
393 splx(s);
394 }
395
396 static void
397 mpt_done(mpt_softc_t *mpt, uint32_t reply)
398 {
399 struct scsipi_xfer *xs = NULL;
400 struct scsipi_periph *periph;
401 int index;
402 request_t *req;
403 MSG_REQUEST_HEADER *mpt_req;
404 MSG_SCSI_IO_REPLY *mpt_reply;
405
406 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
407 /* context reply (ok) */
408 mpt_reply = NULL;
409 index = reply & MPT_CONTEXT_MASK;
410 } else {
411 /* address reply (error) */
412
413 /* XXX BUS_DMASYNC_POSTREAD XXX */
414 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
415 if (mpt->verbose > 1) {
416 uint32_t *pReply = (uint32_t *) mpt_reply;
417
418 mpt_prt(mpt, "Address Reply (index %u):",
419 le32toh(mpt_reply->MsgContext) & 0xffff);
420 mpt_prt(mpt, "%08x %08x %08x %08x",
421 pReply[0], pReply[1], pReply[2], pReply[3]);
422 mpt_prt(mpt, "%08x %08x %08x %08x",
423 pReply[4], pReply[5], pReply[6], pReply[7]);
424 mpt_prt(mpt, "%08x %08x %08x %08x",
425 pReply[8], pReply[9], pReply[10], pReply[11]);
426 }
427 index = le32toh(mpt_reply->MsgContext);
428 }
429
430 /*
431 * Address reply with MessageContext high bit set.
432 * This is most likely a notify message, so we try
433 * to process it, then free it.
434 */
435 if (__predict_false((index & 0x80000000) != 0)) {
436 if (mpt_reply != NULL)
437 mpt_ctlop(mpt, mpt_reply, reply);
438 else
439 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
440 return;
441 }
442
443 /* Did we end up with a valid index into the table? */
444 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
445 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
446 return;
447 }
448
449 req = &mpt->request_pool[index];
450
451 /* Make sure memory hasn't been trashed. */
452 if (__predict_false(req->index != index)) {
453 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
454 return;
455 }
456
457 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
458 mpt_req = req->req_vbuf;
459
460 /* Short cut for task management replies; nothing more for us to do. */
461 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
462 if (mpt->verbose > 1)
463 mpt_prt(mpt, "mpt_done: TASK MGMT");
464 goto done;
465 }
466
467 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
468 goto done;
469
470 /*
471 * At this point, it had better be a SCSI I/O command, but don't
472 * crash if it isn't.
473 */
474 if (__predict_false(mpt_req->Function !=
475 MPI_FUNCTION_SCSI_IO_REQUEST)) {
476 if (mpt->verbose > 1)
477 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
478 mpt_req->Function, index);
479 goto done;
480 }
481
482 /* Recover scsipi_xfer from the request structure. */
483 xs = req->xfer;
484
485 /* Can't have a SCSI command without a scsipi_xfer. */
486 if (__predict_false(xs == NULL)) {
487 mpt_prt(mpt,
488 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
489 req->index, req->sequence);
490 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
491 mpt_prt(mpt, "mpt_request:");
492 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
493
494 if (mpt_reply != NULL) {
495 mpt_prt(mpt, "mpt_reply:");
496 mpt_print_reply(mpt_reply);
497 } else {
498 mpt_prt(mpt, "context reply: 0x%08x", reply);
499 }
500 goto done;
501 }
502
503 callout_stop(&xs->xs_callout);
504
505 periph = xs->xs_periph;
506
507 /*
508 * If we were a data transfer, unload the map that described
509 * the data buffer.
510 */
511 if (__predict_true(xs->datalen != 0)) {
512 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
513 req->dmap->dm_mapsize,
514 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
515 : BUS_DMASYNC_POSTWRITE);
516 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
517 }
518
519 if (__predict_true(mpt_reply == NULL)) {
520 /*
521 * Context reply; report that the command was
522 * successful!
523 *
524 * Also report the xfer mode, if necessary.
525 */
526 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
527 if ((mpt->mpt_report_xfer_mode &
528 (1 << periph->periph_target)) != 0)
529 mpt_get_xfer_mode(mpt, periph);
530 }
531 xs->error = XS_NOERROR;
532 xs->status = SCSI_OK;
533 xs->resid = 0;
534 mpt_free_request(mpt, req);
535 scsipi_done(xs);
536 return;
537 }
538
539 xs->status = mpt_reply->SCSIStatus;
540 switch (le16toh(mpt_reply->IOCStatus)) {
541 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
542 xs->error = XS_DRIVER_STUFFUP;
543 break;
544
545 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
546 /*
547 * Yikes! Tagged queue full comes through this path!
548 *
549 * So we'll change it to a status error and anything
550 * that returns status should probably be a status
551 * error as well.
552 */
553 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
554 if (mpt_reply->SCSIState &
555 MPI_SCSI_STATE_NO_SCSI_STATUS) {
556 xs->error = XS_DRIVER_STUFFUP;
557 break;
558 }
559 /* FALLTHROUGH */
560 case MPI_IOCSTATUS_SUCCESS:
561 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
562 switch (xs->status) {
563 case SCSI_OK:
564 /* Report the xfer mode, if necessary. */
565 if ((mpt->mpt_report_xfer_mode &
566 (1 << periph->periph_target)) != 0)
567 mpt_get_xfer_mode(mpt, periph);
568 xs->resid = 0;
569 break;
570
571 case SCSI_CHECK:
572 xs->error = XS_SENSE;
573 break;
574
575 case SCSI_BUSY:
576 case SCSI_QUEUE_FULL:
577 xs->error = XS_BUSY;
578 break;
579
580 default:
581 scsipi_printaddr(periph);
582 printf("invalid status code %d\n", xs->status);
583 xs->error = XS_DRIVER_STUFFUP;
584 break;
585 }
586 break;
587
588 case MPI_IOCSTATUS_BUSY:
589 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
590 xs->error = XS_RESOURCE_SHORTAGE;
591 break;
592
593 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
594 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
595 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
596 xs->error = XS_SELTIMEOUT;
597 break;
598
599 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
600 xs->error = XS_DRIVER_STUFFUP;
601 break;
602
603 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
604 /* XXX What should we do here? */
605 break;
606
607 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
608 /* XXX */
609 xs->error = XS_DRIVER_STUFFUP;
610 break;
611
612 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
613 /* XXX */
614 xs->error = XS_DRIVER_STUFFUP;
615 break;
616
617 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
618 /* XXX This is a bus-reset */
619 xs->error = XS_DRIVER_STUFFUP;
620 break;
621
622 default:
623 /* XXX unrecognized HBA error */
624 xs->error = XS_DRIVER_STUFFUP;
625 break;
626 }
627
628 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
629 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
630 sizeof(xs->sense.scsi_sense));
631 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
632 /*
633 * This will cause the scsipi layer to issue
634 * a REQUEST SENSE.
635 */
636 if (xs->status == SCSI_CHECK)
637 xs->error = XS_BUSY;
638 }
639
640 done:
641 /* If IOC done with this requeset, free it up. */
642 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
643 mpt_free_request(mpt, req);
644
645 /* If address reply, give the buffer back to the IOC. */
646 if (mpt_reply != NULL)
647 mpt_free_reply(mpt, (reply << 1));
648
649 if (xs != NULL)
650 scsipi_done(xs);
651 }
652
653 static void
654 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
655 {
656 struct scsipi_periph *periph = xs->xs_periph;
657 request_t *req;
658 MSG_SCSI_IO_REQUEST *mpt_req;
659 int error, s;
660
661 s = splbio();
662 req = mpt_get_request(mpt);
663 if (__predict_false(req == NULL)) {
664 /* This should happen very infrequently. */
665 xs->error = XS_RESOURCE_SHORTAGE;
666 scsipi_done(xs);
667 splx(s);
668 return;
669 }
670 splx(s);
671
672 /* Link the req and the scsipi_xfer. */
673 req->xfer = xs;
674
675 /* Now we build the command for the IOC */
676 mpt_req = req->req_vbuf;
677 memset(mpt_req, 0, sizeof(*mpt_req));
678
679 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
680 mpt_req->Bus = mpt->bus;
681
682 mpt_req->SenseBufferLength =
683 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
684 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
685
686 /*
687 * We use the message context to find the request structure when
688 * we get the command completion interrupt from the IOC.
689 */
690 mpt_req->MsgContext = htole32(req->index);
691
692 /* Which physical device to do the I/O on. */
693 mpt_req->TargetID = periph->periph_target;
694 mpt_req->LUN[1] = periph->periph_lun;
695
696 /* Set the direction of the transfer. */
697 if (xs->xs_control & XS_CTL_DATA_IN)
698 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
699 else if (xs->xs_control & XS_CTL_DATA_OUT)
700 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
701 else
702 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
703
704 /* Set the queue behavior. */
705 if (__predict_true((!mpt->is_scsi) ||
706 (mpt->mpt_tag_enable &
707 (1 << periph->periph_target)))) {
708 switch (XS_CTL_TAGTYPE(xs)) {
709 case XS_CTL_HEAD_TAG:
710 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
711 break;
712
713 #if 0 /* XXX */
714 case XS_CTL_ACA_TAG:
715 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
716 break;
717 #endif
718
719 case XS_CTL_ORDERED_TAG:
720 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
721 break;
722
723 case XS_CTL_SIMPLE_TAG:
724 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
725 break;
726
727 default:
728 if (mpt->is_scsi)
729 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
730 else
731 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
732 break;
733 }
734 } else
735 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
736
737 if (__predict_false(mpt->is_scsi &&
738 (mpt->mpt_disc_enable &
739 (1 << periph->periph_target)) == 0))
740 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
741
742 mpt_req->Control = htole32(mpt_req->Control);
743
744 /* Copy the SCSI command block into place. */
745 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
746
747 mpt_req->CDBLength = xs->cmdlen;
748 mpt_req->DataLength = htole32(xs->datalen);
749 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
750
751 /*
752 * Map the DMA transfer.
753 */
754 if (xs->datalen) {
755 SGE_SIMPLE32 *se;
756
757 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
758 xs->datalen, NULL,
759 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
760 : BUS_DMA_WAITOK) |
761 BUS_DMA_STREAMING |
762 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
763 : BUS_DMA_WRITE));
764 switch (error) {
765 case 0:
766 break;
767
768 case ENOMEM:
769 case EAGAIN:
770 xs->error = XS_RESOURCE_SHORTAGE;
771 goto out_bad;
772
773 default:
774 xs->error = XS_DRIVER_STUFFUP;
775 mpt_prt(mpt, "error %d loading DMA map", error);
776 out_bad:
777 s = splbio();
778 mpt_free_request(mpt, req);
779 scsipi_done(xs);
780 splx(s);
781 return;
782 }
783
784 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
785 int seg, i, nleft = req->dmap->dm_nsegs;
786 uint32_t flags;
787 SGE_CHAIN32 *ce;
788
789 seg = 0;
790 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
791 if (xs->xs_control & XS_CTL_DATA_OUT)
792 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
793
794 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
795 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
796 i++, se++, seg++) {
797 uint32_t tf;
798
799 memset(se, 0, sizeof(*se));
800 se->Address =
801 htole32(req->dmap->dm_segs[seg].ds_addr);
802 MPI_pSGE_SET_LENGTH(se,
803 req->dmap->dm_segs[seg].ds_len);
804 tf = flags;
805 if (i == MPT_NSGL_FIRST(mpt) - 2)
806 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
807 MPI_pSGE_SET_FLAGS(se, tf);
808 se->FlagsLength = htole32(se->FlagsLength);
809 nleft--;
810 }
811
812 /*
813 * Tell the IOC where to find the first chain element.
814 */
815 mpt_req->ChainOffset =
816 ((char *)se - (char *)mpt_req) >> 2;
817
818 /*
819 * Until we're finished with all segments...
820 */
821 while (nleft) {
822 int ntodo;
823
824 /*
825 * Construct the chain element that points to
826 * the next segment.
827 */
828 ce = (SGE_CHAIN32 *) se++;
829 if (nleft > MPT_NSGL(mpt)) {
830 ntodo = MPT_NSGL(mpt) - 1;
831 ce->NextChainOffset = (MPT_RQSL(mpt) -
832 sizeof(SGE_SIMPLE32)) >> 2;
833 ce->Length = htole16(MPT_NSGL(mpt)
834 * sizeof(SGE_SIMPLE32));
835 } else {
836 ntodo = nleft;
837 ce->NextChainOffset = 0;
838 ce->Length = htole16(ntodo
839 * sizeof(SGE_SIMPLE32));
840 }
841 ce->Address = htole32(req->req_pbuf +
842 ((char *)se - (char *)mpt_req));
843 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
844 for (i = 0; i < ntodo; i++, se++, seg++) {
845 uint32_t tf;
846
847 memset(se, 0, sizeof(*se));
848 se->Address = htole32(
849 req->dmap->dm_segs[seg].ds_addr);
850 MPI_pSGE_SET_LENGTH(se,
851 req->dmap->dm_segs[seg].ds_len);
852 tf = flags;
853 if (i == ntodo - 1) {
854 tf |=
855 MPI_SGE_FLAGS_LAST_ELEMENT;
856 if (ce->NextChainOffset == 0) {
857 tf |=
858 MPI_SGE_FLAGS_END_OF_LIST |
859 MPI_SGE_FLAGS_END_OF_BUFFER;
860 }
861 }
862 MPI_pSGE_SET_FLAGS(se, tf);
863 se->FlagsLength =
864 htole32(se->FlagsLength);
865 nleft--;
866 }
867 }
868 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
869 req->dmap->dm_mapsize,
870 (xs->xs_control & XS_CTL_DATA_IN) ?
871 BUS_DMASYNC_PREREAD
872 : BUS_DMASYNC_PREWRITE);
873 } else {
874 int i;
875 uint32_t flags;
876
877 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
878 if (xs->xs_control & XS_CTL_DATA_OUT)
879 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
880
881 /* Copy the segments into our SG list. */
882 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
883 for (i = 0; i < req->dmap->dm_nsegs;
884 i++, se++) {
885 uint32_t tf;
886
887 memset(se, 0, sizeof(*se));
888 se->Address =
889 htole32(req->dmap->dm_segs[i].ds_addr);
890 MPI_pSGE_SET_LENGTH(se,
891 req->dmap->dm_segs[i].ds_len);
892 tf = flags;
893 if (i == req->dmap->dm_nsegs - 1) {
894 tf |=
895 MPI_SGE_FLAGS_LAST_ELEMENT |
896 MPI_SGE_FLAGS_END_OF_BUFFER |
897 MPI_SGE_FLAGS_END_OF_LIST;
898 }
899 MPI_pSGE_SET_FLAGS(se, tf);
900 se->FlagsLength = htole32(se->FlagsLength);
901 }
902 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
903 req->dmap->dm_mapsize,
904 (xs->xs_control & XS_CTL_DATA_IN) ?
905 BUS_DMASYNC_PREREAD
906 : BUS_DMASYNC_PREWRITE);
907 }
908 } else {
909 /*
910 * No data to transfer; just make a single simple SGL
911 * with zero length.
912 */
913 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
914 memset(se, 0, sizeof(*se));
915 MPI_pSGE_SET_FLAGS(se,
916 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
917 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
918 se->FlagsLength = htole32(se->FlagsLength);
919 }
920
921 if (mpt->verbose > 1)
922 mpt_print_scsi_io_request(mpt_req);
923
924 s = splbio();
925 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
926 callout_reset(&xs->xs_callout,
927 mstohz(xs->timeout), mpt_timeout, req);
928 mpt_send_cmd(mpt, req);
929 splx(s);
930
931 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
932 return;
933
934 /*
935 * If we can't use interrupts, poll on completion.
936 */
937 if (mpt_poll(mpt, xs, xs->timeout))
938 mpt_timeout(req);
939 }
940
941 static void
942 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
943 {
944 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
945
946 /*
947 * Always allow disconnect; we don't have a way to disable
948 * it right now, in any case.
949 */
950 mpt->mpt_disc_enable |= (1 << xm->xm_target);
951
952 if (xm->xm_mode & PERIPH_CAP_TQING)
953 mpt->mpt_tag_enable |= (1 << xm->xm_target);
954 else
955 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
956
957 if (mpt->is_scsi) {
958 /*
959 * SCSI transport settings only make any sense for
960 * SCSI
961 */
962
963 tmp = mpt->mpt_dev_page1[xm->xm_target];
964
965 /*
966 * Set the wide/narrow parameter for the target.
967 */
968 if (xm->xm_mode & PERIPH_CAP_WIDE16)
969 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
970 else
971 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
972
973 /*
974 * Set the synchronous parameters for the target.
975 *
976 * XXX If we request sync transfers, we just go ahead and
977 * XXX request the maximum available. We need finer control
978 * XXX in order to implement Domain Validation.
979 */
980 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
981 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
982 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
983 MPI_SCSIDEVPAGE1_RP_IU);
984 if (xm->xm_mode & PERIPH_CAP_SYNC) {
985 int factor, offset, np;
986
987 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
988 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
989 np = 0;
990 if (factor < 0x9) {
991 /* Ultra320 */
992 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
993 }
994 if (factor < 0xa) {
995 /* at least Ultra160 */
996 np |= MPI_SCSIDEVPAGE1_RP_DT;
997 }
998 np |= (factor << 8) | (offset << 16);
999 tmp.RequestedParameters |= np;
1000 }
1001
1002 host2mpt_config_page_scsi_device_1(&tmp);
1003 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1004 mpt_prt(mpt, "unable to write Device Page 1");
1005 return;
1006 }
1007
1008 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1009 mpt_prt(mpt, "unable to read back Device Page 1");
1010 return;
1011 }
1012
1013 mpt2host_config_page_scsi_device_1(&tmp);
1014 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1015 if (mpt->verbose > 1) {
1016 mpt_prt(mpt,
1017 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1018 xm->xm_target,
1019 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1020 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1021 }
1022 }
1023
1024 /*
1025 * Make a note that we should perform an async callback at the
1026 * end of the next successful command completion to report the
1027 * negotiated transfer mode.
1028 */
1029 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1030 }
1031
1032 static void
1033 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1034 {
1035 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1036 struct scsipi_xfer_mode xm;
1037 int period, offset;
1038
1039 tmp = mpt->mpt_dev_page0[periph->periph_target];
1040 host2mpt_config_page_scsi_device_0(&tmp);
1041 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1042 mpt_prt(mpt, "unable to read Device Page 0");
1043 return;
1044 }
1045 mpt2host_config_page_scsi_device_0(&tmp);
1046
1047 if (mpt->verbose > 1) {
1048 mpt_prt(mpt,
1049 "SPI Tgt %d Page 0: NParms %x Information %x",
1050 periph->periph_target,
1051 tmp.NegotiatedParameters, tmp.Information);
1052 }
1053
1054 xm.xm_target = periph->periph_target;
1055 xm.xm_mode = 0;
1056
1057 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1058 xm.xm_mode |= PERIPH_CAP_WIDE16;
1059
1060 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1061 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1062 if (offset) {
1063 xm.xm_period = period;
1064 xm.xm_offset = offset;
1065 xm.xm_mode |= PERIPH_CAP_SYNC;
1066 }
1067
1068 /*
1069 * Tagged queueing is all controlled by us; there is no
1070 * other setting to query.
1071 */
1072 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1073 xm.xm_mode |= PERIPH_CAP_TQING;
1074
1075 /*
1076 * We're going to deliver the async event, so clear the marker.
1077 */
1078 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1079
1080 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1081 }
1082
1083 static void
1084 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1085 {
1086 MSG_DEFAULT_REPLY *dmsg = vmsg;
1087
1088 switch (dmsg->Function) {
1089 case MPI_FUNCTION_EVENT_NOTIFICATION:
1090 mpt_event_notify_reply(mpt, vmsg);
1091 mpt_free_reply(mpt, (reply << 1));
1092 break;
1093
1094 case MPI_FUNCTION_EVENT_ACK:
1095 mpt_free_reply(mpt, (reply << 1));
1096 break;
1097
1098 case MPI_FUNCTION_PORT_ENABLE:
1099 {
1100 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1101 int index = le32toh(msg->MsgContext) & ~0x80000000;
1102 if (mpt->verbose > 1)
1103 mpt_prt(mpt, "enable port reply index %d", index);
1104 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1105 request_t *req = &mpt->request_pool[index];
1106 req->debug = REQ_DONE;
1107 }
1108 mpt_free_reply(mpt, (reply << 1));
1109 break;
1110 }
1111
1112 case MPI_FUNCTION_CONFIG:
1113 {
1114 MSG_CONFIG_REPLY *msg = vmsg;
1115 int index = le32toh(msg->MsgContext) & ~0x80000000;
1116 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1117 request_t *req = &mpt->request_pool[index];
1118 req->debug = REQ_DONE;
1119 req->sequence = reply;
1120 } else
1121 mpt_free_reply(mpt, (reply << 1));
1122 break;
1123 }
1124
1125 default:
1126 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1127 }
1128 }
1129
1130 static void
1131 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1132 {
1133
1134 switch (le32toh(msg->Event)) {
1135 case MPI_EVENT_LOG_DATA:
1136 {
1137 int i;
1138
1139 /* Some error occurrerd that the Fusion wants logged. */
1140 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1141 mpt_prt(mpt, "EvtLogData: Event Data:");
1142 for (i = 0; i < msg->EventDataLength; i++) {
1143 if ((i % 4) == 0)
1144 printf("%s:\t", device_xname(&mpt->sc_dev));
1145 printf("0x%08x%c", msg->Data[i],
1146 ((i % 4) == 3) ? '\n' : ' ');
1147 }
1148 if ((i % 4) != 0)
1149 printf("\n");
1150 break;
1151 }
1152
1153 case MPI_EVENT_UNIT_ATTENTION:
1154 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1155 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1156 break;
1157
1158 case MPI_EVENT_IOC_BUS_RESET:
1159 /* We generated a bus reset. */
1160 mpt_prt(mpt, "IOC Bus Reset Port %d",
1161 (msg->Data[0] >> 8) & 0xff);
1162 break;
1163
1164 case MPI_EVENT_EXT_BUS_RESET:
1165 /* Someone else generated a bus reset. */
1166 mpt_prt(mpt, "External Bus Reset");
1167 /*
1168 * These replies don't return EventData like the MPI
1169 * spec says they do.
1170 */
1171 /* XXX Send an async event? */
1172 break;
1173
1174 case MPI_EVENT_RESCAN:
1175 /*
1176 * In general, thise means a device has been added
1177 * to the loop.
1178 */
1179 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1180 /* XXX Send an async event? */
1181 break;
1182
1183 case MPI_EVENT_LINK_STATUS_CHANGE:
1184 mpt_prt(mpt, "Port %d: Link state %s",
1185 (msg->Data[1] >> 8) & 0xff,
1186 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1187 break;
1188
1189 case MPI_EVENT_LOOP_STATE_CHANGE:
1190 switch ((msg->Data[0] >> 16) & 0xff) {
1191 case 0x01:
1192 mpt_prt(mpt,
1193 "Port %d: FC Link Event: LIP(%02x,%02x) "
1194 "(Loop Initialization)",
1195 (msg->Data[1] >> 8) & 0xff,
1196 (msg->Data[0] >> 8) & 0xff,
1197 (msg->Data[0] ) & 0xff);
1198 switch ((msg->Data[0] >> 8) & 0xff) {
1199 case 0xf7:
1200 if ((msg->Data[0] & 0xff) == 0xf7)
1201 mpt_prt(mpt, "\tDevice needs AL_PA");
1202 else
1203 mpt_prt(mpt, "\tDevice %02x doesn't "
1204 "like FC performance",
1205 msg->Data[0] & 0xff);
1206 break;
1207
1208 case 0xf8:
1209 if ((msg->Data[0] & 0xff) == 0xf7)
1210 mpt_prt(mpt, "\tDevice detected loop "
1211 "failure before acquiring AL_PA");
1212 else
1213 mpt_prt(mpt, "\tDevice %02x detected "
1214 "loop failure",
1215 msg->Data[0] & 0xff);
1216 break;
1217
1218 default:
1219 mpt_prt(mpt, "\tDevice %02x requests that "
1220 "device %02x reset itself",
1221 msg->Data[0] & 0xff,
1222 (msg->Data[0] >> 8) & 0xff);
1223 break;
1224 }
1225 break;
1226
1227 case 0x02:
1228 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1229 "(Loop Port Enable)",
1230 (msg->Data[1] >> 8) & 0xff,
1231 (msg->Data[0] >> 8) & 0xff,
1232 (msg->Data[0] ) & 0xff);
1233 break;
1234
1235 case 0x03:
1236 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1237 "(Loop Port Bypass)",
1238 (msg->Data[1] >> 8) & 0xff,
1239 (msg->Data[0] >> 8) & 0xff,
1240 (msg->Data[0] ) & 0xff);
1241 break;
1242
1243 default:
1244 mpt_prt(mpt, "Port %d: FC Link Event: "
1245 "Unknown event (%02x %02x %02x)",
1246 (msg->Data[1] >> 8) & 0xff,
1247 (msg->Data[0] >> 16) & 0xff,
1248 (msg->Data[0] >> 8) & 0xff,
1249 (msg->Data[0] ) & 0xff);
1250 break;
1251 }
1252 break;
1253
1254 case MPI_EVENT_LOGOUT:
1255 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1256 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1257 break;
1258
1259 case MPI_EVENT_EVENT_CHANGE:
1260 /*
1261 * This is just an acknowledgement of our
1262 * mpt_send_event_request().
1263 */
1264 break;
1265
1266 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1267 switch ((msg->Data[0] >> 12) & 0x0f) {
1268 case 0x00:
1269 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1270 msg->Data[0] & 0xff);
1271 break;
1272 case 0x01:
1273 mpt_prt(mpt, "Phy %d: Link Disabled",
1274 msg->Data[0] & 0xff);
1275 break;
1276 case 0x02:
1277 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1278 msg->Data[0] & 0xff);
1279 break;
1280 case 0x03:
1281 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1282 msg->Data[0] & 0xff);
1283 break;
1284 case 0x08:
1285 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1286 msg->Data[0] & 0xff);
1287 break;
1288 case 0x09:
1289 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1290 msg->Data[0] & 0xff);
1291 break;
1292 default:
1293 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1294 "Unknown event (%0x)",
1295 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1296 }
1297 break;
1298
1299 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1300 case MPI_EVENT_SAS_DISCOVERY:
1301 /* ignore these events for now */
1302 break;
1303
1304 case MPI_EVENT_QUEUE_FULL:
1305 /* This can get a little chatty */
1306 if (mpt->verbose > 0)
1307 mpt_prt(mpt, "Queue Full Event");
1308 break;
1309
1310 default:
1311 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1312 break;
1313 }
1314
1315 if (msg->AckRequired) {
1316 MSG_EVENT_ACK *ackp;
1317 request_t *req;
1318
1319 if ((req = mpt_get_request(mpt)) == NULL) {
1320 /* XXX XXX XXX XXXJRT */
1321 panic("mpt_event_notify_reply: unable to allocate "
1322 "request structure");
1323 }
1324
1325 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1326 memset(ackp, 0, sizeof(*ackp));
1327 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1328 ackp->Event = msg->Event;
1329 ackp->EventContext = msg->EventContext;
1330 ackp->MsgContext = htole32(req->index | 0x80000000);
1331 mpt_check_doorbell(mpt);
1332 mpt_send_cmd(mpt, req);
1333 }
1334 }
1335
1336 /* XXXJRT mpt_bus_reset() */
1337
1338 /*****************************************************************************
1339 * SCSI interface routines
1340 *****************************************************************************/
1341
1342 static void
1343 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1344 void *arg)
1345 {
1346 struct scsipi_adapter *adapt = chan->chan_adapter;
1347 mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1348
1349 switch (req) {
1350 case ADAPTER_REQ_RUN_XFER:
1351 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1352 return;
1353
1354 case ADAPTER_REQ_GROW_RESOURCES:
1355 /* Not supported. */
1356 return;
1357
1358 case ADAPTER_REQ_SET_XFER_MODE:
1359 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1360 return;
1361 }
1362 }
1363
1364 static void
1365 mpt_minphys(struct buf *bp)
1366 {
1367
1368 /*
1369 * Subtract one from the SGL limit, since we need an extra one to handle
1370 * an non-page-aligned transfer.
1371 */
1372 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1373
1374 if (bp->b_bcount > MPT_MAX_XFER)
1375 bp->b_bcount = MPT_MAX_XFER;
1376 minphys(bp);
1377 }
1378