mpt_netbsd.c revision 1.18.2.3 1 /* $NetBSD: mpt_netbsd.c,v 1.18.2.3 2014/08/20 00:03:38 tls Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.18.2.3 2014/08/20 00:03:38 tls Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83 #include <sys/scsiio.h>
84
85 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
86 static void mpt_timeout(void *);
87 static void mpt_restart(mpt_softc_t *, request_t *);
88 static void mpt_done(mpt_softc_t *, uint32_t);
89 static int mpt_drain_queue(mpt_softc_t *);
90 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
91 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
92 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
93 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
94 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
95 static void mpt_bus_reset(mpt_softc_t *);
96
97 static void mpt_scsipi_request(struct scsipi_channel *,
98 scsipi_adapter_req_t, void *);
99 static void mpt_minphys(struct buf *);
100 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
101 struct proc *);
102
103 void
104 mpt_scsipi_attach(mpt_softc_t *mpt)
105 {
106 struct scsipi_adapter *adapt = &mpt->sc_adapter;
107 struct scsipi_channel *chan = &mpt->sc_channel;
108 int maxq;
109
110 mpt->bus = 0; /* XXX ?? */
111
112 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
113 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
114
115 /* Fill in the scsipi_adapter. */
116 memset(adapt, 0, sizeof(*adapt));
117 adapt->adapt_dev = mpt->sc_dev;
118 adapt->adapt_nchannels = 1;
119 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
120 adapt->adapt_max_periph = maxq - 2;
121 adapt->adapt_request = mpt_scsipi_request;
122 adapt->adapt_minphys = mpt_minphys;
123 adapt->adapt_ioctl = mpt_ioctl;
124
125 /* Fill in the scsipi_channel. */
126 memset(chan, 0, sizeof(*chan));
127 chan->chan_adapter = adapt;
128 if (mpt->is_sas) {
129 chan->chan_bustype = &scsi_sas_bustype;
130 } else if (mpt->is_fc) {
131 chan->chan_bustype = &scsi_fc_bustype;
132 } else {
133 chan->chan_bustype = &scsi_bustype;
134 }
135 chan->chan_channel = 0;
136 chan->chan_flags = 0;
137 chan->chan_nluns = 8;
138 chan->chan_ntargets = mpt->mpt_max_devices;
139 chan->chan_id = mpt->mpt_ini_id;
140
141 /*
142 * Save the output of the config so we can rescan the bus in case of
143 * errors
144 */
145 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
146 scsiprint);
147 }
148
149 int
150 mpt_dma_mem_alloc(mpt_softc_t *mpt)
151 {
152 bus_dma_segment_t reply_seg, request_seg;
153 int reply_rseg, request_rseg;
154 bus_addr_t pptr, end;
155 char *vptr;
156 size_t len;
157 int error, i;
158
159 /* Check if we have already allocated the reply memory. */
160 if (mpt->reply != NULL)
161 return (0);
162
163 /*
164 * Allocate the request pool. This isn't really DMA'd memory,
165 * but it's a convenient place to do it.
166 */
167 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
168 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
169 if (mpt->request_pool == NULL) {
170 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
171 return (ENOMEM);
172 }
173
174 /*
175 * Allocate DMA resources for reply buffers.
176 */
177 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
178 &reply_seg, 1, &reply_rseg, 0);
179 if (error) {
180 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
181 error);
182 goto fail_0;
183 }
184
185 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
186 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
187 if (error) {
188 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
189 error);
190 goto fail_1;
191 }
192
193 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
194 0, 0, &mpt->reply_dmap);
195 if (error) {
196 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
197 error);
198 goto fail_2;
199 }
200
201 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
202 PAGE_SIZE, NULL, 0);
203 if (error) {
204 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
205 error);
206 goto fail_3;
207 }
208 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
209
210 /*
211 * Allocate DMA resources for request buffers.
212 */
213 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
214 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
215 if (error) {
216 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
217 "error = %d\n", error);
218 goto fail_4;
219 }
220
221 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
222 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
223 if (error) {
224 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
225 error);
226 goto fail_5;
227 }
228
229 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
230 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
231 if (error) {
232 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
233 "error = %d\n", error);
234 goto fail_6;
235 }
236
237 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
238 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
239 if (error) {
240 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
241 error);
242 goto fail_7;
243 }
244 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
245
246 pptr = mpt->request_phys;
247 vptr = (void *) mpt->request;
248 end = pptr + MPT_REQ_MEM_SIZE(mpt);
249
250 for (i = 0; pptr < end; i++) {
251 request_t *req = &mpt->request_pool[i];
252 req->index = i;
253
254 /* Store location of Request Data */
255 req->req_pbuf = pptr;
256 req->req_vbuf = vptr;
257
258 pptr += MPT_REQUEST_AREA;
259 vptr += MPT_REQUEST_AREA;
260
261 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
262 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
263
264 error = bus_dmamap_create(mpt->sc_dmat,
265 MPT_SGL_MAX * PAGE_SIZE ,
266 MPT_SGL_MAX,
267 MPT_SGL_MAX * PAGE_SIZE,
268 0, 0, &req->dmap);
269 if (error) {
270 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
271 "error = %d\n", i, error);
272 goto fail_8;
273 }
274 }
275
276 return (0);
277
278 fail_8:
279 for (--i; i >= 0; i--) {
280 request_t *req = &mpt->request_pool[i];
281 if (req->dmap != NULL)
282 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
283 }
284 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
285 fail_7:
286 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
287 fail_6:
288 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
289 fail_5:
290 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
291 fail_4:
292 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
293 fail_3:
294 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
295 fail_2:
296 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
297 fail_1:
298 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
299 fail_0:
300 free(mpt->request_pool, M_DEVBUF);
301
302 mpt->reply = NULL;
303 mpt->request = NULL;
304 mpt->request_pool = NULL;
305
306 return (error);
307 }
308
309 int
310 mpt_intr(void *arg)
311 {
312 mpt_softc_t *mpt = arg;
313 int nrepl = 0;
314
315 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
316 return (0);
317
318 nrepl = mpt_drain_queue(mpt);
319 return (nrepl != 0);
320 }
321
322 void
323 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
324 {
325 va_list ap;
326
327 printf("%s: ", device_xname(mpt->sc_dev));
328 va_start(ap, fmt);
329 vprintf(fmt, ap);
330 va_end(ap);
331 printf("\n");
332 }
333
334 static int
335 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
336 {
337
338 /* Timeouts are in msec, so we loop in 1000usec cycles */
339 while (count) {
340 mpt_intr(mpt);
341 if (xs->xs_status & XS_STS_DONE)
342 return (0);
343 delay(1000); /* only happens in boot, so ok */
344 count--;
345 }
346 return (1);
347 }
348
349 static void
350 mpt_timeout(void *arg)
351 {
352 request_t *req = arg;
353 struct scsipi_xfer *xs;
354 struct scsipi_periph *periph;
355 mpt_softc_t *mpt;
356 uint32_t oseq;
357 int s, nrepl = 0;
358
359 if (req->xfer == NULL) {
360 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
361 req->index, req->sequence);
362 return;
363 }
364 xs = req->xfer;
365 periph = xs->xs_periph;
366 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
367 scsipi_printaddr(periph);
368 printf("command timeout\n");
369
370 s = splbio();
371
372 oseq = req->sequence;
373 mpt->timeouts++;
374 if (mpt_intr(mpt)) {
375 if (req->sequence != oseq) {
376 mpt->success++;
377 mpt_prt(mpt, "recovered from command timeout");
378 splx(s);
379 return;
380 }
381 }
382
383 /*
384 * Ensure the IOC is really done giving us data since it appears it can
385 * sometimes fail to give us interrupts under heavy load.
386 */
387 nrepl = mpt_drain_queue(mpt);
388 if (nrepl ) {
389 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
390 }
391
392 if (req->sequence != oseq) {
393 mpt->success++;
394 splx(s);
395 return;
396 }
397
398 mpt_prt(mpt,
399 "timeout on request index = 0x%x, seq = 0x%08x",
400 req->index, req->sequence);
401 mpt_check_doorbell(mpt);
402 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
403 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
404 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
405 mpt_read(mpt, MPT_OFFSET_DOORBELL));
406 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
407 if (mpt->verbose > 1)
408 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
409
410 xs->error = XS_TIMEOUT;
411 splx(s);
412 mpt_restart(mpt, req);
413 }
414
415 static void
416 mpt_restart(mpt_softc_t *mpt, request_t *req0)
417 {
418 int i, s, nreq;
419 request_t *req;
420 struct scsipi_xfer *xs;
421
422 /* first, reset the IOC, leaving stopped so all requests are idle */
423 if (mpt_soft_reset(mpt) != MPT_OK) {
424 mpt_prt(mpt, "soft reset failed");
425 /*
426 * Don't try a hard reset since this mangles the PCI
427 * configuration registers.
428 */
429 return;
430 }
431
432 /* Freeze the channel so scsipi doesn't queue more commands. */
433 scsipi_channel_freeze(&mpt->sc_channel, 1);
434
435 /* Return all pending requests to scsipi and de-allocate them. */
436 s = splbio();
437 nreq = 0;
438 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
439 req = &mpt->request_pool[i];
440 xs = req->xfer;
441 if (xs != NULL) {
442 if (xs->datalen != 0)
443 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
444 req->xfer = NULL;
445 callout_stop(&xs->xs_callout);
446 if (req != req0) {
447 nreq++;
448 xs->error = XS_REQUEUE;
449 }
450 scsipi_done(xs);
451 /*
452 * Don't need to mpt_free_request() since mpt_init()
453 * below will free all requests anyway.
454 */
455 mpt_free_request(mpt, req);
456 }
457 }
458 splx(s);
459 if (nreq > 0)
460 mpt_prt(mpt, "re-queued %d requests", nreq);
461
462 /* Re-initialize the IOC (which restarts it). */
463 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
464 mpt_prt(mpt, "restart succeeded");
465 /* else error message already printed */
466
467 /* Thaw the channel, causing scsipi to re-queue the commands. */
468 scsipi_channel_thaw(&mpt->sc_channel, 1);
469 }
470
471 static int
472 mpt_drain_queue(mpt_softc_t *mpt)
473 {
474 int nrepl = 0;
475 uint32_t reply;
476
477 reply = mpt_pop_reply_queue(mpt);
478 while (reply != MPT_REPLY_EMPTY) {
479 nrepl++;
480 if (mpt->verbose > 1) {
481 if ((reply & MPT_CONTEXT_REPLY) != 0) {
482 /* Address reply; IOC has something to say */
483 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
484 } else {
485 /* Context reply; all went well */
486 mpt_prt(mpt, "context %u reply OK", reply);
487 }
488 }
489 mpt_done(mpt, reply);
490 reply = mpt_pop_reply_queue(mpt);
491 }
492 return (nrepl);
493 }
494
495 static void
496 mpt_done(mpt_softc_t *mpt, uint32_t reply)
497 {
498 struct scsipi_xfer *xs = NULL;
499 struct scsipi_periph *periph;
500 int index;
501 request_t *req;
502 MSG_REQUEST_HEADER *mpt_req;
503 MSG_SCSI_IO_REPLY *mpt_reply;
504 int restart = 0; /* nonzero if we need to restart the IOC*/
505
506 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
507 /* context reply (ok) */
508 mpt_reply = NULL;
509 index = reply & MPT_CONTEXT_MASK;
510 } else {
511 /* address reply (error) */
512
513 /* XXX BUS_DMASYNC_POSTREAD XXX */
514 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
515 if (mpt_reply != NULL) {
516 if (mpt->verbose > 1) {
517 uint32_t *pReply = (uint32_t *) mpt_reply;
518
519 mpt_prt(mpt, "Address Reply (index %u):",
520 le32toh(mpt_reply->MsgContext) & 0xffff);
521 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
522 pReply[1], pReply[2], pReply[3]);
523 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
524 pReply[5], pReply[6], pReply[7]);
525 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
526 pReply[9], pReply[10], pReply[11]);
527 }
528 index = le32toh(mpt_reply->MsgContext);
529 } else
530 index = reply & MPT_CONTEXT_MASK;
531 }
532
533 /*
534 * Address reply with MessageContext high bit set.
535 * This is most likely a notify message, so we try
536 * to process it, then free it.
537 */
538 if (__predict_false((index & 0x80000000) != 0)) {
539 if (mpt_reply != NULL)
540 mpt_ctlop(mpt, mpt_reply, reply);
541 else
542 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
543 index);
544 return;
545 }
546
547 /* Did we end up with a valid index into the table? */
548 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
549 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
550 index);
551 return;
552 }
553
554 req = &mpt->request_pool[index];
555
556 /* Make sure memory hasn't been trashed. */
557 if (__predict_false(req->index != index)) {
558 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
559 index);
560 return;
561 }
562
563 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
564 mpt_req = req->req_vbuf;
565
566 /* Short cut for task management replies; nothing more for us to do. */
567 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
568 if (mpt->verbose > 1)
569 mpt_prt(mpt, "%s: TASK MGMT", __func__);
570 KASSERT(req == mpt->mngt_req);
571 mpt->mngt_req = NULL;
572 goto done;
573 }
574
575 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
576 goto done;
577
578 /*
579 * At this point, it had better be a SCSI I/O command, but don't
580 * crash if it isn't.
581 */
582 if (__predict_false(mpt_req->Function !=
583 MPI_FUNCTION_SCSI_IO_REQUEST)) {
584 if (mpt->verbose > 1)
585 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
586 __func__, mpt_req->Function, index);
587 goto done;
588 }
589
590 /* Recover scsipi_xfer from the request structure. */
591 xs = req->xfer;
592
593 /* Can't have a SCSI command without a scsipi_xfer. */
594 if (__predict_false(xs == NULL)) {
595 mpt_prt(mpt,
596 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
597 req->index, req->sequence);
598 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
599 mpt_prt(mpt, "mpt_request:");
600 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
601
602 if (mpt_reply != NULL) {
603 mpt_prt(mpt, "mpt_reply:");
604 mpt_print_reply(mpt_reply);
605 } else {
606 mpt_prt(mpt, "context reply: 0x%08x", reply);
607 }
608 goto done;
609 }
610
611 callout_stop(&xs->xs_callout);
612
613 periph = xs->xs_periph;
614
615 /*
616 * If we were a data transfer, unload the map that described
617 * the data buffer.
618 */
619 if (__predict_true(xs->datalen != 0)) {
620 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
621 req->dmap->dm_mapsize,
622 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
623 : BUS_DMASYNC_POSTWRITE);
624 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
625 }
626
627 if (__predict_true(mpt_reply == NULL)) {
628 /*
629 * Context reply; report that the command was
630 * successful!
631 *
632 * Also report the xfer mode, if necessary.
633 */
634 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
635 if ((mpt->mpt_report_xfer_mode &
636 (1 << periph->periph_target)) != 0)
637 mpt_get_xfer_mode(mpt, periph);
638 }
639 xs->error = XS_NOERROR;
640 xs->status = SCSI_OK;
641 xs->resid = 0;
642 mpt_free_request(mpt, req);
643 scsipi_done(xs);
644 return;
645 }
646
647 xs->status = mpt_reply->SCSIStatus;
648 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
649 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
650 xs->error = XS_DRIVER_STUFFUP;
651 mpt_prt(mpt, "%s: IOC overrun!", __func__);
652 break;
653
654 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
655 /*
656 * Yikes! Tagged queue full comes through this path!
657 *
658 * So we'll change it to a status error and anything
659 * that returns status should probably be a status
660 * error as well.
661 */
662 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
663 if (mpt_reply->SCSIState &
664 MPI_SCSI_STATE_NO_SCSI_STATUS) {
665 xs->error = XS_DRIVER_STUFFUP;
666 break;
667 }
668 /* FALLTHROUGH */
669 case MPI_IOCSTATUS_SUCCESS:
670 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
671 switch (xs->status) {
672 case SCSI_OK:
673 /* Report the xfer mode, if necessary. */
674 if ((mpt->mpt_report_xfer_mode &
675 (1 << periph->periph_target)) != 0)
676 mpt_get_xfer_mode(mpt, periph);
677 xs->resid = 0;
678 break;
679
680 case SCSI_CHECK:
681 xs->error = XS_SENSE;
682 break;
683
684 case SCSI_BUSY:
685 case SCSI_QUEUE_FULL:
686 xs->error = XS_BUSY;
687 break;
688
689 default:
690 scsipi_printaddr(periph);
691 printf("invalid status code %d\n", xs->status);
692 xs->error = XS_DRIVER_STUFFUP;
693 break;
694 }
695 break;
696
697 case MPI_IOCSTATUS_BUSY:
698 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
699 xs->error = XS_RESOURCE_SHORTAGE;
700 break;
701
702 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
703 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
704 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
705 xs->error = XS_SELTIMEOUT;
706 break;
707
708 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
709 xs->error = XS_DRIVER_STUFFUP;
710 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
711 restart = 1;
712 break;
713
714 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
715 /* XXX What should we do here? */
716 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
717 restart = 1;
718 break;
719
720 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
721 /* XXX */
722 xs->error = XS_DRIVER_STUFFUP;
723 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
724 restart = 1;
725 break;
726
727 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
728 /* XXX */
729 xs->error = XS_DRIVER_STUFFUP;
730 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
731 restart = 1;
732 break;
733
734 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
735 /* XXX This is a bus-reset */
736 xs->error = XS_DRIVER_STUFFUP;
737 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
738 restart = 1;
739 break;
740
741 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
742 /*
743 * FreeBSD and Linux indicate this is a phase error between
744 * the IOC and the drive itself. When this happens, the IOC
745 * becomes unhappy and stops processing all transactions.
746 * Call mpt_timeout which knows how to get the IOC back
747 * on its feet.
748 */
749 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
750 "recovering...", __func__);
751 xs->error = XS_TIMEOUT;
752 restart = 1;
753
754 break;
755
756 default:
757 /* XXX unrecognized HBA error */
758 xs->error = XS_DRIVER_STUFFUP;
759 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
760 le16toh(mpt_reply->IOCStatus));
761 restart = 1;
762 break;
763 }
764
765 if (mpt_reply != NULL) {
766 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
767 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
768 sizeof(xs->sense.scsi_sense));
769 } else if (mpt_reply->SCSIState &
770 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
771 /*
772 * This will cause the scsipi layer to issue
773 * a REQUEST SENSE.
774 */
775 if (xs->status == SCSI_CHECK)
776 xs->error = XS_BUSY;
777 }
778 }
779
780 done:
781 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
782 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
783 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
784 mpt_ctlop(mpt, mpt_reply, reply);
785 }
786
787 /* If IOC done with this request, free it up. */
788 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
789 mpt_free_request(mpt, req);
790
791 /* If address reply, give the buffer back to the IOC. */
792 if (mpt_reply != NULL)
793 mpt_free_reply(mpt, (reply << 1));
794
795 if (xs != NULL)
796 scsipi_done(xs);
797
798 if (restart) {
799 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
800 mpt_restart(mpt, NULL);
801 }
802 }
803
804 static void
805 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
806 {
807 struct scsipi_periph *periph = xs->xs_periph;
808 request_t *req;
809 MSG_SCSI_IO_REQUEST *mpt_req;
810 int error, s;
811
812 s = splbio();
813 req = mpt_get_request(mpt);
814 if (__predict_false(req == NULL)) {
815 /* This should happen very infrequently. */
816 xs->error = XS_RESOURCE_SHORTAGE;
817 scsipi_done(xs);
818 splx(s);
819 return;
820 }
821 splx(s);
822
823 /* Link the req and the scsipi_xfer. */
824 req->xfer = xs;
825
826 /* Now we build the command for the IOC */
827 mpt_req = req->req_vbuf;
828 memset(mpt_req, 0, sizeof(*mpt_req));
829
830 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
831 mpt_req->Bus = mpt->bus;
832
833 mpt_req->SenseBufferLength =
834 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
835 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
836
837 /*
838 * We use the message context to find the request structure when
839 * we get the command completion interrupt from the IOC.
840 */
841 mpt_req->MsgContext = htole32(req->index);
842
843 /* Which physical device to do the I/O on. */
844 mpt_req->TargetID = periph->periph_target;
845 mpt_req->LUN[1] = periph->periph_lun;
846
847 /* Set the direction of the transfer. */
848 if (xs->xs_control & XS_CTL_DATA_IN)
849 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
850 else if (xs->xs_control & XS_CTL_DATA_OUT)
851 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
852 else
853 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
854
855 /* Set the queue behavior. */
856 if (__predict_true((!mpt->is_scsi) ||
857 (mpt->mpt_tag_enable &
858 (1 << periph->periph_target)))) {
859 switch (XS_CTL_TAGTYPE(xs)) {
860 case XS_CTL_HEAD_TAG:
861 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
862 break;
863
864 #if 0 /* XXX */
865 case XS_CTL_ACA_TAG:
866 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
867 break;
868 #endif
869
870 case XS_CTL_ORDERED_TAG:
871 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
872 break;
873
874 case XS_CTL_SIMPLE_TAG:
875 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
876 break;
877
878 default:
879 if (mpt->is_scsi)
880 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
881 else
882 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
883 break;
884 }
885 } else
886 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
887
888 if (__predict_false(mpt->is_scsi &&
889 (mpt->mpt_disc_enable &
890 (1 << periph->periph_target)) == 0))
891 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
892
893 mpt_req->Control = htole32(mpt_req->Control);
894
895 /* Copy the SCSI command block into place. */
896 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
897
898 mpt_req->CDBLength = xs->cmdlen;
899 mpt_req->DataLength = htole32(xs->datalen);
900 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
901
902 /*
903 * Map the DMA transfer.
904 */
905 if (xs->datalen) {
906 SGE_SIMPLE32 *se;
907
908 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
909 xs->datalen, NULL,
910 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
911 : BUS_DMA_WAITOK) |
912 BUS_DMA_STREAMING |
913 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
914 : BUS_DMA_WRITE));
915 switch (error) {
916 case 0:
917 break;
918
919 case ENOMEM:
920 case EAGAIN:
921 xs->error = XS_RESOURCE_SHORTAGE;
922 goto out_bad;
923
924 default:
925 xs->error = XS_DRIVER_STUFFUP;
926 mpt_prt(mpt, "error %d loading DMA map", error);
927 out_bad:
928 s = splbio();
929 mpt_free_request(mpt, req);
930 scsipi_done(xs);
931 splx(s);
932 return;
933 }
934
935 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
936 int seg, i, nleft = req->dmap->dm_nsegs;
937 uint32_t flags;
938 SGE_CHAIN32 *ce;
939
940 seg = 0;
941 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
942 if (xs->xs_control & XS_CTL_DATA_OUT)
943 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
944
945 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
946 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
947 i++, se++, seg++) {
948 uint32_t tf;
949
950 memset(se, 0, sizeof(*se));
951 se->Address =
952 htole32(req->dmap->dm_segs[seg].ds_addr);
953 MPI_pSGE_SET_LENGTH(se,
954 req->dmap->dm_segs[seg].ds_len);
955 tf = flags;
956 if (i == MPT_NSGL_FIRST(mpt) - 2)
957 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
958 MPI_pSGE_SET_FLAGS(se, tf);
959 se->FlagsLength = htole32(se->FlagsLength);
960 nleft--;
961 }
962
963 /*
964 * Tell the IOC where to find the first chain element.
965 */
966 mpt_req->ChainOffset =
967 ((char *)se - (char *)mpt_req) >> 2;
968
969 /*
970 * Until we're finished with all segments...
971 */
972 while (nleft) {
973 int ntodo;
974
975 /*
976 * Construct the chain element that points to
977 * the next segment.
978 */
979 ce = (SGE_CHAIN32 *) se++;
980 if (nleft > MPT_NSGL(mpt)) {
981 ntodo = MPT_NSGL(mpt) - 1;
982 ce->NextChainOffset = (MPT_RQSL(mpt) -
983 sizeof(SGE_SIMPLE32)) >> 2;
984 ce->Length = htole16(MPT_NSGL(mpt)
985 * sizeof(SGE_SIMPLE32));
986 } else {
987 ntodo = nleft;
988 ce->NextChainOffset = 0;
989 ce->Length = htole16(ntodo
990 * sizeof(SGE_SIMPLE32));
991 }
992 ce->Address = htole32(req->req_pbuf +
993 ((char *)se - (char *)mpt_req));
994 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
995 for (i = 0; i < ntodo; i++, se++, seg++) {
996 uint32_t tf;
997
998 memset(se, 0, sizeof(*se));
999 se->Address = htole32(
1000 req->dmap->dm_segs[seg].ds_addr);
1001 MPI_pSGE_SET_LENGTH(se,
1002 req->dmap->dm_segs[seg].ds_len);
1003 tf = flags;
1004 if (i == ntodo - 1) {
1005 tf |=
1006 MPI_SGE_FLAGS_LAST_ELEMENT;
1007 if (ce->NextChainOffset == 0) {
1008 tf |=
1009 MPI_SGE_FLAGS_END_OF_LIST |
1010 MPI_SGE_FLAGS_END_OF_BUFFER;
1011 }
1012 }
1013 MPI_pSGE_SET_FLAGS(se, tf);
1014 se->FlagsLength =
1015 htole32(se->FlagsLength);
1016 nleft--;
1017 }
1018 }
1019 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1020 req->dmap->dm_mapsize,
1021 (xs->xs_control & XS_CTL_DATA_IN) ?
1022 BUS_DMASYNC_PREREAD
1023 : BUS_DMASYNC_PREWRITE);
1024 } else {
1025 int i;
1026 uint32_t flags;
1027
1028 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1029 if (xs->xs_control & XS_CTL_DATA_OUT)
1030 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1031
1032 /* Copy the segments into our SG list. */
1033 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1034 for (i = 0; i < req->dmap->dm_nsegs;
1035 i++, se++) {
1036 uint32_t tf;
1037
1038 memset(se, 0, sizeof(*se));
1039 se->Address =
1040 htole32(req->dmap->dm_segs[i].ds_addr);
1041 MPI_pSGE_SET_LENGTH(se,
1042 req->dmap->dm_segs[i].ds_len);
1043 tf = flags;
1044 if (i == req->dmap->dm_nsegs - 1) {
1045 tf |=
1046 MPI_SGE_FLAGS_LAST_ELEMENT |
1047 MPI_SGE_FLAGS_END_OF_BUFFER |
1048 MPI_SGE_FLAGS_END_OF_LIST;
1049 }
1050 MPI_pSGE_SET_FLAGS(se, tf);
1051 se->FlagsLength = htole32(se->FlagsLength);
1052 }
1053 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1054 req->dmap->dm_mapsize,
1055 (xs->xs_control & XS_CTL_DATA_IN) ?
1056 BUS_DMASYNC_PREREAD
1057 : BUS_DMASYNC_PREWRITE);
1058 }
1059 } else {
1060 /*
1061 * No data to transfer; just make a single simple SGL
1062 * with zero length.
1063 */
1064 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1065 memset(se, 0, sizeof(*se));
1066 MPI_pSGE_SET_FLAGS(se,
1067 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1068 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1069 se->FlagsLength = htole32(se->FlagsLength);
1070 }
1071
1072 if (mpt->verbose > 1)
1073 mpt_print_scsi_io_request(mpt_req);
1074
1075 if (xs->timeout == 0) {
1076 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1077 req->index);
1078 xs->timeout = 500;
1079 }
1080
1081 s = splbio();
1082 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1083 callout_reset(&xs->xs_callout,
1084 mstohz(xs->timeout), mpt_timeout, req);
1085 mpt_send_cmd(mpt, req);
1086 splx(s);
1087
1088 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1089 return;
1090
1091 /*
1092 * If we can't use interrupts, poll on completion.
1093 */
1094 if (mpt_poll(mpt, xs, xs->timeout))
1095 mpt_timeout(req);
1096 }
1097
1098 static void
1099 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1100 {
1101 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1102
1103 /*
1104 * Always allow disconnect; we don't have a way to disable
1105 * it right now, in any case.
1106 */
1107 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1108
1109 if (xm->xm_mode & PERIPH_CAP_TQING)
1110 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1111 else
1112 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1113
1114 if (mpt->is_scsi) {
1115 /*
1116 * SCSI transport settings only make any sense for
1117 * SCSI
1118 */
1119
1120 tmp = mpt->mpt_dev_page1[xm->xm_target];
1121
1122 /*
1123 * Set the wide/narrow parameter for the target.
1124 */
1125 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1126 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1127 else
1128 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1129
1130 /*
1131 * Set the synchronous parameters for the target.
1132 *
1133 * XXX If we request sync transfers, we just go ahead and
1134 * XXX request the maximum available. We need finer control
1135 * XXX in order to implement Domain Validation.
1136 */
1137 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1138 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1139 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1140 MPI_SCSIDEVPAGE1_RP_IU);
1141 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1142 int factor, offset, np;
1143
1144 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1145 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1146 np = 0;
1147 if (factor < 0x9) {
1148 /* Ultra320 */
1149 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1150 }
1151 if (factor < 0xa) {
1152 /* at least Ultra160 */
1153 np |= MPI_SCSIDEVPAGE1_RP_DT;
1154 }
1155 np |= (factor << 8) | (offset << 16);
1156 tmp.RequestedParameters |= np;
1157 }
1158
1159 host2mpt_config_page_scsi_device_1(&tmp);
1160 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1161 mpt_prt(mpt, "unable to write Device Page 1");
1162 return;
1163 }
1164
1165 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1166 mpt_prt(mpt, "unable to read back Device Page 1");
1167 return;
1168 }
1169
1170 mpt2host_config_page_scsi_device_1(&tmp);
1171 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1172 if (mpt->verbose > 1) {
1173 mpt_prt(mpt,
1174 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1175 xm->xm_target,
1176 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1177 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1178 }
1179 }
1180
1181 /*
1182 * Make a note that we should perform an async callback at the
1183 * end of the next successful command completion to report the
1184 * negotiated transfer mode.
1185 */
1186 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1187 }
1188
1189 static void
1190 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1191 {
1192 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1193 struct scsipi_xfer_mode xm;
1194 int period, offset;
1195
1196 tmp = mpt->mpt_dev_page0[periph->periph_target];
1197 host2mpt_config_page_scsi_device_0(&tmp);
1198 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1199 mpt_prt(mpt, "unable to read Device Page 0");
1200 return;
1201 }
1202 mpt2host_config_page_scsi_device_0(&tmp);
1203
1204 if (mpt->verbose > 1) {
1205 mpt_prt(mpt,
1206 "SPI Tgt %d Page 0: NParms %x Information %x",
1207 periph->periph_target,
1208 tmp.NegotiatedParameters, tmp.Information);
1209 }
1210
1211 xm.xm_target = periph->periph_target;
1212 xm.xm_mode = 0;
1213
1214 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1215 xm.xm_mode |= PERIPH_CAP_WIDE16;
1216
1217 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1218 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1219 if (offset) {
1220 xm.xm_period = period;
1221 xm.xm_offset = offset;
1222 xm.xm_mode |= PERIPH_CAP_SYNC;
1223 }
1224
1225 /*
1226 * Tagged queueing is all controlled by us; there is no
1227 * other setting to query.
1228 */
1229 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1230 xm.xm_mode |= PERIPH_CAP_TQING;
1231
1232 /*
1233 * We're going to deliver the async event, so clear the marker.
1234 */
1235 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1236
1237 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1238 }
1239
1240 static void
1241 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1242 {
1243 MSG_DEFAULT_REPLY *dmsg = vmsg;
1244
1245 switch (dmsg->Function) {
1246 case MPI_FUNCTION_EVENT_NOTIFICATION:
1247 mpt_event_notify_reply(mpt, vmsg);
1248 mpt_free_reply(mpt, (reply << 1));
1249 break;
1250
1251 case MPI_FUNCTION_EVENT_ACK:
1252 mpt_free_reply(mpt, (reply << 1));
1253 break;
1254
1255 case MPI_FUNCTION_PORT_ENABLE:
1256 {
1257 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1258 int index = le32toh(msg->MsgContext) & ~0x80000000;
1259 if (mpt->verbose > 1)
1260 mpt_prt(mpt, "enable port reply index %d", index);
1261 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1262 request_t *req = &mpt->request_pool[index];
1263 req->debug = REQ_DONE;
1264 }
1265 mpt_free_reply(mpt, (reply << 1));
1266 break;
1267 }
1268
1269 case MPI_FUNCTION_CONFIG:
1270 {
1271 MSG_CONFIG_REPLY *msg = vmsg;
1272 int index = le32toh(msg->MsgContext) & ~0x80000000;
1273 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1274 request_t *req = &mpt->request_pool[index];
1275 req->debug = REQ_DONE;
1276 req->sequence = reply;
1277 } else
1278 mpt_free_reply(mpt, (reply << 1));
1279 break;
1280 }
1281
1282 default:
1283 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1284 }
1285 }
1286
1287 static void
1288 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1289 {
1290
1291 switch (le32toh(msg->Event)) {
1292 case MPI_EVENT_LOG_DATA:
1293 {
1294 int i;
1295
1296 /* Some error occurrerd that the Fusion wants logged. */
1297 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1298 mpt_prt(mpt, "EvtLogData: Event Data:");
1299 for (i = 0; i < msg->EventDataLength; i++) {
1300 if ((i % 4) == 0)
1301 printf("%s:\t", device_xname(mpt->sc_dev));
1302 printf("0x%08x%c", msg->Data[i],
1303 ((i % 4) == 3) ? '\n' : ' ');
1304 }
1305 if ((i % 4) != 0)
1306 printf("\n");
1307 break;
1308 }
1309
1310 case MPI_EVENT_UNIT_ATTENTION:
1311 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1312 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1313 break;
1314
1315 case MPI_EVENT_IOC_BUS_RESET:
1316 /* We generated a bus reset. */
1317 mpt_prt(mpt, "IOC Bus Reset Port %d",
1318 (msg->Data[0] >> 8) & 0xff);
1319 break;
1320
1321 case MPI_EVENT_EXT_BUS_RESET:
1322 /* Someone else generated a bus reset. */
1323 mpt_prt(mpt, "External Bus Reset");
1324 /*
1325 * These replies don't return EventData like the MPI
1326 * spec says they do.
1327 */
1328 /* XXX Send an async event? */
1329 break;
1330
1331 case MPI_EVENT_RESCAN:
1332 /*
1333 * In general, thise means a device has been added
1334 * to the loop.
1335 */
1336 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1337 /* XXX Send an async event? */
1338 break;
1339
1340 case MPI_EVENT_LINK_STATUS_CHANGE:
1341 mpt_prt(mpt, "Port %d: Link state %s",
1342 (msg->Data[1] >> 8) & 0xff,
1343 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1344 break;
1345
1346 case MPI_EVENT_LOOP_STATE_CHANGE:
1347 switch ((msg->Data[0] >> 16) & 0xff) {
1348 case 0x01:
1349 mpt_prt(mpt,
1350 "Port %d: FC Link Event: LIP(%02x,%02x) "
1351 "(Loop Initialization)",
1352 (msg->Data[1] >> 8) & 0xff,
1353 (msg->Data[0] >> 8) & 0xff,
1354 (msg->Data[0] ) & 0xff);
1355 switch ((msg->Data[0] >> 8) & 0xff) {
1356 case 0xf7:
1357 if ((msg->Data[0] & 0xff) == 0xf7)
1358 mpt_prt(mpt, "\tDevice needs AL_PA");
1359 else
1360 mpt_prt(mpt, "\tDevice %02x doesn't "
1361 "like FC performance",
1362 msg->Data[0] & 0xff);
1363 break;
1364
1365 case 0xf8:
1366 if ((msg->Data[0] & 0xff) == 0xf7)
1367 mpt_prt(mpt, "\tDevice detected loop "
1368 "failure before acquiring AL_PA");
1369 else
1370 mpt_prt(mpt, "\tDevice %02x detected "
1371 "loop failure",
1372 msg->Data[0] & 0xff);
1373 break;
1374
1375 default:
1376 mpt_prt(mpt, "\tDevice %02x requests that "
1377 "device %02x reset itself",
1378 msg->Data[0] & 0xff,
1379 (msg->Data[0] >> 8) & 0xff);
1380 break;
1381 }
1382 break;
1383
1384 case 0x02:
1385 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1386 "(Loop Port Enable)",
1387 (msg->Data[1] >> 8) & 0xff,
1388 (msg->Data[0] >> 8) & 0xff,
1389 (msg->Data[0] ) & 0xff);
1390 break;
1391
1392 case 0x03:
1393 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1394 "(Loop Port Bypass)",
1395 (msg->Data[1] >> 8) & 0xff,
1396 (msg->Data[0] >> 8) & 0xff,
1397 (msg->Data[0] ) & 0xff);
1398 break;
1399
1400 default:
1401 mpt_prt(mpt, "Port %d: FC Link Event: "
1402 "Unknown event (%02x %02x %02x)",
1403 (msg->Data[1] >> 8) & 0xff,
1404 (msg->Data[0] >> 16) & 0xff,
1405 (msg->Data[0] >> 8) & 0xff,
1406 (msg->Data[0] ) & 0xff);
1407 break;
1408 }
1409 break;
1410
1411 case MPI_EVENT_LOGOUT:
1412 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1413 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1414 break;
1415
1416 case MPI_EVENT_EVENT_CHANGE:
1417 /*
1418 * This is just an acknowledgement of our
1419 * mpt_send_event_request().
1420 */
1421 break;
1422
1423 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1424 switch ((msg->Data[0] >> 12) & 0x0f) {
1425 case 0x00:
1426 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1427 msg->Data[0] & 0xff);
1428 break;
1429 case 0x01:
1430 mpt_prt(mpt, "Phy %d: Link Disabled",
1431 msg->Data[0] & 0xff);
1432 break;
1433 case 0x02:
1434 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1435 msg->Data[0] & 0xff);
1436 break;
1437 case 0x03:
1438 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1439 msg->Data[0] & 0xff);
1440 break;
1441 case 0x08:
1442 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1443 msg->Data[0] & 0xff);
1444 break;
1445 case 0x09:
1446 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1447 msg->Data[0] & 0xff);
1448 break;
1449 default:
1450 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1451 "Unknown event (%0x)",
1452 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1453 }
1454 break;
1455
1456 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1457 case MPI_EVENT_SAS_DISCOVERY:
1458 /* ignore these events for now */
1459 break;
1460
1461 case MPI_EVENT_QUEUE_FULL:
1462 /* This can get a little chatty */
1463 if (mpt->verbose > 0)
1464 mpt_prt(mpt, "Queue Full Event");
1465 break;
1466
1467 default:
1468 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1469 break;
1470 }
1471
1472 if (msg->AckRequired) {
1473 MSG_EVENT_ACK *ackp;
1474 request_t *req;
1475
1476 if ((req = mpt_get_request(mpt)) == NULL) {
1477 /* XXX XXX XXX XXXJRT */
1478 panic("mpt_event_notify_reply: unable to allocate "
1479 "request structure");
1480 }
1481
1482 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1483 memset(ackp, 0, sizeof(*ackp));
1484 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1485 ackp->Event = msg->Event;
1486 ackp->EventContext = msg->EventContext;
1487 ackp->MsgContext = htole32(req->index | 0x80000000);
1488 mpt_check_doorbell(mpt);
1489 mpt_send_cmd(mpt, req);
1490 }
1491 }
1492
1493 static void
1494 mpt_bus_reset(mpt_softc_t *mpt)
1495 {
1496 request_t *req;
1497 MSG_SCSI_TASK_MGMT *mngt_req;
1498 int s;
1499
1500 s = splbio();
1501 if (mpt->mngt_req) {
1502 /* request already queued; can't do more */
1503 splx(s);
1504 return;
1505 }
1506 req = mpt_get_request(mpt);
1507 if (__predict_false(req == NULL)) {
1508 mpt_prt(mpt, "no mngt request\n");
1509 splx(s);
1510 return;
1511 }
1512 mpt->mngt_req = req;
1513 splx(s);
1514 mngt_req = req->req_vbuf;
1515 memset(mngt_req, 0, sizeof(*mngt_req));
1516 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1517 mngt_req->Bus = mpt->bus;
1518 mngt_req->TargetID = 0;
1519 mngt_req->ChainOffset = 0;
1520 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1521 mngt_req->Reserved1 = 0;
1522 mngt_req->MsgFlags =
1523 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1524 mngt_req->MsgContext = req->index;
1525 mngt_req->TaskMsgContext = 0;
1526 s = splbio();
1527 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1528 splx(s);
1529 }
1530
1531 /*****************************************************************************
1532 * SCSI interface routines
1533 *****************************************************************************/
1534
1535 static void
1536 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1537 void *arg)
1538 {
1539 struct scsipi_adapter *adapt = chan->chan_adapter;
1540 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1541
1542 switch (req) {
1543 case ADAPTER_REQ_RUN_XFER:
1544 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1545 return;
1546
1547 case ADAPTER_REQ_GROW_RESOURCES:
1548 /* Not supported. */
1549 return;
1550
1551 case ADAPTER_REQ_SET_XFER_MODE:
1552 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1553 return;
1554 }
1555 }
1556
1557 static void
1558 mpt_minphys(struct buf *bp)
1559 {
1560 if (bp->b_bcount > MPT_MAX_XFER)
1561 bp->b_bcount = MPT_MAX_XFER;
1562 minphys(bp);
1563 }
1564
1565 static int
1566 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1567 int flag, struct proc *p)
1568 {
1569 mpt_softc_t *mpt;
1570 int s;
1571
1572 mpt = device_private(chan->chan_adapter->adapt_dev);
1573 switch (cmd) {
1574 case SCBUSIORESET:
1575 mpt_bus_reset(mpt);
1576 s = splbio();
1577 mpt_intr(mpt);
1578 splx(s);
1579 return(0);
1580 default:
1581 return (ENOTTY);
1582 }
1583 }
1584