mpt_netbsd.c revision 1.20.2.1 1 /* $NetBSD: mpt_netbsd.c,v 1.20.2.1 2014/08/10 06:54:52 tls Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.20.2.1 2014/08/10 06:54:52 tls Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83 #include <sys/scsiio.h>
84
85 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
86 static void mpt_timeout(void *);
87 static void mpt_restart(mpt_softc_t *, request_t *);
88 static void mpt_done(mpt_softc_t *, uint32_t);
89 static int mpt_drain_queue(mpt_softc_t *);
90 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
91 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
92 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
93 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
94 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
95 static void mpt_bus_reset(mpt_softc_t *);
96
97 static void mpt_scsipi_request(struct scsipi_channel *,
98 scsipi_adapter_req_t, void *);
99 static void mpt_minphys(struct buf *);
100 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
101 struct proc *);
102
103 void
104 mpt_scsipi_attach(mpt_softc_t *mpt)
105 {
106 struct scsipi_adapter *adapt = &mpt->sc_adapter;
107 struct scsipi_channel *chan = &mpt->sc_channel;
108 int maxq;
109
110 mpt->bus = 0; /* XXX ?? */
111
112 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
113 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
114
115 /* Fill in the scsipi_adapter. */
116 memset(adapt, 0, sizeof(*adapt));
117 adapt->adapt_dev = mpt->sc_dev;
118 adapt->adapt_nchannels = 1;
119 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
120 adapt->adapt_max_periph = maxq - 2;
121 adapt->adapt_request = mpt_scsipi_request;
122 adapt->adapt_minphys = mpt_minphys;
123 adapt->adapt_ioctl = mpt_ioctl;
124
125 /* Fill in the scsipi_channel. */
126 memset(chan, 0, sizeof(*chan));
127 chan->chan_adapter = adapt;
128 if (mpt->is_sas) {
129 chan->chan_bustype = &scsi_sas_bustype;
130 } else if (mpt->is_fc) {
131 chan->chan_bustype = &scsi_fc_bustype;
132 } else {
133 chan->chan_bustype = &scsi_bustype;
134 }
135 chan->chan_channel = 0;
136 chan->chan_flags = 0;
137 chan->chan_nluns = 8;
138 chan->chan_ntargets = mpt->mpt_max_devices;
139 chan->chan_id = mpt->mpt_ini_id;
140
141 /*
142 * Save the output of the config so we can rescan the bus in case of
143 * errors
144 */
145 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
146 scsiprint);
147 }
148
149 int
150 mpt_dma_mem_alloc(mpt_softc_t *mpt)
151 {
152 bus_dma_segment_t reply_seg, request_seg;
153 int reply_rseg, request_rseg;
154 bus_addr_t pptr, end;
155 char *vptr;
156 size_t len;
157 int error, i;
158
159 /* Check if we have already allocated the reply memory. */
160 if (mpt->reply != NULL)
161 return (0);
162
163 /*
164 * Allocate the request pool. This isn't really DMA'd memory,
165 * but it's a convenient place to do it.
166 */
167 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
168 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
169 if (mpt->request_pool == NULL) {
170 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
171 return (ENOMEM);
172 }
173
174 /*
175 * Allocate DMA resources for reply buffers.
176 */
177 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
178 &reply_seg, 1, &reply_rseg, 0);
179 if (error) {
180 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
181 error);
182 goto fail_0;
183 }
184
185 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
186 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
187 if (error) {
188 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
189 error);
190 goto fail_1;
191 }
192
193 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
194 0, 0, &mpt->reply_dmap);
195 if (error) {
196 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
197 error);
198 goto fail_2;
199 }
200
201 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
202 PAGE_SIZE, NULL, 0);
203 if (error) {
204 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
205 error);
206 goto fail_3;
207 }
208 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
209
210 /*
211 * Allocate DMA resources for request buffers.
212 */
213 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
214 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
215 if (error) {
216 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
217 "error = %d\n", error);
218 goto fail_4;
219 }
220
221 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
222 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
223 if (error) {
224 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
225 error);
226 goto fail_5;
227 }
228
229 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
230 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
231 if (error) {
232 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
233 "error = %d\n", error);
234 goto fail_6;
235 }
236
237 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
238 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
239 if (error) {
240 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
241 error);
242 goto fail_7;
243 }
244 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
245
246 pptr = mpt->request_phys;
247 vptr = (void *) mpt->request;
248 end = pptr + MPT_REQ_MEM_SIZE(mpt);
249
250 for (i = 0; pptr < end; i++) {
251 request_t *req = &mpt->request_pool[i];
252 req->index = i;
253
254 /* Store location of Request Data */
255 req->req_pbuf = pptr;
256 req->req_vbuf = vptr;
257
258 pptr += MPT_REQUEST_AREA;
259 vptr += MPT_REQUEST_AREA;
260
261 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
262 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
263
264 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
265 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
266 if (error) {
267 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
268 "error = %d\n", i, error);
269 goto fail_8;
270 }
271 }
272
273 return (0);
274
275 fail_8:
276 for (--i; i >= 0; i--) {
277 request_t *req = &mpt->request_pool[i];
278 if (req->dmap != NULL)
279 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
280 }
281 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
282 fail_7:
283 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
284 fail_6:
285 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
286 fail_5:
287 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
288 fail_4:
289 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
290 fail_3:
291 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
292 fail_2:
293 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
294 fail_1:
295 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
296 fail_0:
297 free(mpt->request_pool, M_DEVBUF);
298
299 mpt->reply = NULL;
300 mpt->request = NULL;
301 mpt->request_pool = NULL;
302
303 return (error);
304 }
305
306 int
307 mpt_intr(void *arg)
308 {
309 mpt_softc_t *mpt = arg;
310 int nrepl = 0;
311
312 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
313 return (0);
314
315 nrepl = mpt_drain_queue(mpt);
316 return (nrepl != 0);
317 }
318
319 void
320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
321 {
322 va_list ap;
323
324 printf("%s: ", device_xname(mpt->sc_dev));
325 va_start(ap, fmt);
326 vprintf(fmt, ap);
327 va_end(ap);
328 printf("\n");
329 }
330
331 static int
332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
333 {
334
335 /* Timeouts are in msec, so we loop in 1000usec cycles */
336 while (count) {
337 mpt_intr(mpt);
338 if (xs->xs_status & XS_STS_DONE)
339 return (0);
340 delay(1000); /* only happens in boot, so ok */
341 count--;
342 }
343 return (1);
344 }
345
346 static void
347 mpt_timeout(void *arg)
348 {
349 request_t *req = arg;
350 struct scsipi_xfer *xs;
351 struct scsipi_periph *periph;
352 mpt_softc_t *mpt;
353 uint32_t oseq;
354 int s, nrepl = 0;
355
356 if (req->xfer == NULL) {
357 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
358 req->index, req->sequence);
359 return;
360 }
361 xs = req->xfer;
362 periph = xs->xs_periph;
363 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
364 scsipi_printaddr(periph);
365 printf("command timeout\n");
366
367 s = splbio();
368
369 oseq = req->sequence;
370 mpt->timeouts++;
371 if (mpt_intr(mpt)) {
372 if (req->sequence != oseq) {
373 mpt->success++;
374 mpt_prt(mpt, "recovered from command timeout");
375 splx(s);
376 return;
377 }
378 }
379
380 /*
381 * Ensure the IOC is really done giving us data since it appears it can
382 * sometimes fail to give us interrupts under heavy load.
383 */
384 nrepl = mpt_drain_queue(mpt);
385 if (nrepl ) {
386 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
387 }
388
389 if (req->sequence != oseq) {
390 mpt->success++;
391 splx(s);
392 return;
393 }
394
395 mpt_prt(mpt,
396 "timeout on request index = 0x%x, seq = 0x%08x",
397 req->index, req->sequence);
398 mpt_check_doorbell(mpt);
399 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
400 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
401 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
402 mpt_read(mpt, MPT_OFFSET_DOORBELL));
403 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
404 if (mpt->verbose > 1)
405 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
406
407 xs->error = XS_TIMEOUT;
408 splx(s);
409 mpt_restart(mpt, req);
410 }
411
412 static void
413 mpt_restart(mpt_softc_t *mpt, request_t *req0)
414 {
415 int i, s, nreq;
416 request_t *req;
417 struct scsipi_xfer *xs;
418
419 /* first, reset the IOC, leaving stopped so all requests are idle */
420 if (mpt_soft_reset(mpt) != MPT_OK) {
421 mpt_prt(mpt, "soft reset failed");
422 /*
423 * Don't try a hard reset since this mangles the PCI
424 * configuration registers.
425 */
426 return;
427 }
428
429 /* Freeze the channel so scsipi doesn't queue more commands. */
430 scsipi_channel_freeze(&mpt->sc_channel, 1);
431
432 /* Return all pending requests to scsipi and de-allocate them. */
433 s = splbio();
434 nreq = 0;
435 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
436 req = &mpt->request_pool[i];
437 xs = req->xfer;
438 if (xs != NULL) {
439 if (xs->datalen != 0)
440 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
441 req->xfer = NULL;
442 callout_stop(&xs->xs_callout);
443 if (req != req0) {
444 nreq++;
445 xs->error = XS_REQUEUE;
446 }
447 scsipi_done(xs);
448 /*
449 * Don't need to mpt_free_request() since mpt_init()
450 * below will free all requests anyway.
451 */
452 mpt_free_request(mpt, req);
453 }
454 }
455 splx(s);
456 if (nreq > 0)
457 mpt_prt(mpt, "re-queued %d requests", nreq);
458
459 /* Re-initialize the IOC (which restarts it). */
460 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
461 mpt_prt(mpt, "restart succeeded");
462 /* else error message already printed */
463
464 /* Thaw the channel, causing scsipi to re-queue the commands. */
465 scsipi_channel_thaw(&mpt->sc_channel, 1);
466 }
467
468 static int
469 mpt_drain_queue(mpt_softc_t *mpt)
470 {
471 int nrepl = 0;
472 uint32_t reply;
473
474 reply = mpt_pop_reply_queue(mpt);
475 while (reply != MPT_REPLY_EMPTY) {
476 nrepl++;
477 if (mpt->verbose > 1) {
478 if ((reply & MPT_CONTEXT_REPLY) != 0) {
479 /* Address reply; IOC has something to say */
480 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
481 } else {
482 /* Context reply; all went well */
483 mpt_prt(mpt, "context %u reply OK", reply);
484 }
485 }
486 mpt_done(mpt, reply);
487 reply = mpt_pop_reply_queue(mpt);
488 }
489 return (nrepl);
490 }
491
492 static void
493 mpt_done(mpt_softc_t *mpt, uint32_t reply)
494 {
495 struct scsipi_xfer *xs = NULL;
496 struct scsipi_periph *periph;
497 int index;
498 request_t *req;
499 MSG_REQUEST_HEADER *mpt_req;
500 MSG_SCSI_IO_REPLY *mpt_reply;
501 int restart = 0; /* nonzero if we need to restart the IOC*/
502
503 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
504 /* context reply (ok) */
505 mpt_reply = NULL;
506 index = reply & MPT_CONTEXT_MASK;
507 } else {
508 /* address reply (error) */
509
510 /* XXX BUS_DMASYNC_POSTREAD XXX */
511 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
512 if (mpt_reply != NULL) {
513 if (mpt->verbose > 1) {
514 uint32_t *pReply = (uint32_t *) mpt_reply;
515
516 mpt_prt(mpt, "Address Reply (index %u):",
517 le32toh(mpt_reply->MsgContext) & 0xffff);
518 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
519 pReply[1], pReply[2], pReply[3]);
520 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
521 pReply[5], pReply[6], pReply[7]);
522 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
523 pReply[9], pReply[10], pReply[11]);
524 }
525 index = le32toh(mpt_reply->MsgContext);
526 } else
527 index = reply & MPT_CONTEXT_MASK;
528 }
529
530 /*
531 * Address reply with MessageContext high bit set.
532 * This is most likely a notify message, so we try
533 * to process it, then free it.
534 */
535 if (__predict_false((index & 0x80000000) != 0)) {
536 if (mpt_reply != NULL)
537 mpt_ctlop(mpt, mpt_reply, reply);
538 else
539 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
540 index);
541 return;
542 }
543
544 /* Did we end up with a valid index into the table? */
545 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
546 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
547 index);
548 return;
549 }
550
551 req = &mpt->request_pool[index];
552
553 /* Make sure memory hasn't been trashed. */
554 if (__predict_false(req->index != index)) {
555 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
556 index);
557 return;
558 }
559
560 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
561 mpt_req = req->req_vbuf;
562
563 /* Short cut for task management replies; nothing more for us to do. */
564 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
565 if (mpt->verbose > 1)
566 mpt_prt(mpt, "%s: TASK MGMT", __func__);
567 KASSERT(req == mpt->mngt_req);
568 mpt->mngt_req = NULL;
569 goto done;
570 }
571
572 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
573 goto done;
574
575 /*
576 * At this point, it had better be a SCSI I/O command, but don't
577 * crash if it isn't.
578 */
579 if (__predict_false(mpt_req->Function !=
580 MPI_FUNCTION_SCSI_IO_REQUEST)) {
581 if (mpt->verbose > 1)
582 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
583 __func__, mpt_req->Function, index);
584 goto done;
585 }
586
587 /* Recover scsipi_xfer from the request structure. */
588 xs = req->xfer;
589
590 /* Can't have a SCSI command without a scsipi_xfer. */
591 if (__predict_false(xs == NULL)) {
592 mpt_prt(mpt,
593 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
594 req->index, req->sequence);
595 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
596 mpt_prt(mpt, "mpt_request:");
597 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
598
599 if (mpt_reply != NULL) {
600 mpt_prt(mpt, "mpt_reply:");
601 mpt_print_reply(mpt_reply);
602 } else {
603 mpt_prt(mpt, "context reply: 0x%08x", reply);
604 }
605 goto done;
606 }
607
608 callout_stop(&xs->xs_callout);
609
610 periph = xs->xs_periph;
611
612 /*
613 * If we were a data transfer, unload the map that described
614 * the data buffer.
615 */
616 if (__predict_true(xs->datalen != 0)) {
617 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
618 req->dmap->dm_mapsize,
619 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
620 : BUS_DMASYNC_POSTWRITE);
621 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
622 }
623
624 if (__predict_true(mpt_reply == NULL)) {
625 /*
626 * Context reply; report that the command was
627 * successful!
628 *
629 * Also report the xfer mode, if necessary.
630 */
631 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
632 if ((mpt->mpt_report_xfer_mode &
633 (1 << periph->periph_target)) != 0)
634 mpt_get_xfer_mode(mpt, periph);
635 }
636 xs->error = XS_NOERROR;
637 xs->status = SCSI_OK;
638 xs->resid = 0;
639 mpt_free_request(mpt, req);
640 scsipi_done(xs);
641 return;
642 }
643
644 xs->status = mpt_reply->SCSIStatus;
645 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
646 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
647 xs->error = XS_DRIVER_STUFFUP;
648 mpt_prt(mpt, "%s: IOC overrun!", __func__);
649 break;
650
651 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
652 /*
653 * Yikes! Tagged queue full comes through this path!
654 *
655 * So we'll change it to a status error and anything
656 * that returns status should probably be a status
657 * error as well.
658 */
659 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
660 if (mpt_reply->SCSIState &
661 MPI_SCSI_STATE_NO_SCSI_STATUS) {
662 xs->error = XS_DRIVER_STUFFUP;
663 break;
664 }
665 /* FALLTHROUGH */
666 case MPI_IOCSTATUS_SUCCESS:
667 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
668 switch (xs->status) {
669 case SCSI_OK:
670 /* Report the xfer mode, if necessary. */
671 if ((mpt->mpt_report_xfer_mode &
672 (1 << periph->periph_target)) != 0)
673 mpt_get_xfer_mode(mpt, periph);
674 xs->resid = 0;
675 break;
676
677 case SCSI_CHECK:
678 xs->error = XS_SENSE;
679 break;
680
681 case SCSI_BUSY:
682 case SCSI_QUEUE_FULL:
683 xs->error = XS_BUSY;
684 break;
685
686 default:
687 scsipi_printaddr(periph);
688 printf("invalid status code %d\n", xs->status);
689 xs->error = XS_DRIVER_STUFFUP;
690 break;
691 }
692 break;
693
694 case MPI_IOCSTATUS_BUSY:
695 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
696 xs->error = XS_RESOURCE_SHORTAGE;
697 break;
698
699 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
700 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
701 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
702 xs->error = XS_SELTIMEOUT;
703 break;
704
705 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
706 xs->error = XS_DRIVER_STUFFUP;
707 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
708 restart = 1;
709 break;
710
711 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
712 /* XXX What should we do here? */
713 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
714 restart = 1;
715 break;
716
717 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
718 /* XXX */
719 xs->error = XS_DRIVER_STUFFUP;
720 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
721 restart = 1;
722 break;
723
724 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
725 /* XXX */
726 xs->error = XS_DRIVER_STUFFUP;
727 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
728 restart = 1;
729 break;
730
731 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
732 /* XXX This is a bus-reset */
733 xs->error = XS_DRIVER_STUFFUP;
734 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
735 restart = 1;
736 break;
737
738 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
739 /*
740 * FreeBSD and Linux indicate this is a phase error between
741 * the IOC and the drive itself. When this happens, the IOC
742 * becomes unhappy and stops processing all transactions.
743 * Call mpt_timeout which knows how to get the IOC back
744 * on its feet.
745 */
746 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
747 "recovering...", __func__);
748 xs->error = XS_TIMEOUT;
749 restart = 1;
750
751 break;
752
753 default:
754 /* XXX unrecognized HBA error */
755 xs->error = XS_DRIVER_STUFFUP;
756 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
757 le16toh(mpt_reply->IOCStatus));
758 restart = 1;
759 break;
760 }
761
762 if (mpt_reply != NULL) {
763 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
764 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
765 sizeof(xs->sense.scsi_sense));
766 } else if (mpt_reply->SCSIState &
767 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
768 /*
769 * This will cause the scsipi layer to issue
770 * a REQUEST SENSE.
771 */
772 if (xs->status == SCSI_CHECK)
773 xs->error = XS_BUSY;
774 }
775 }
776
777 done:
778 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
779 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
780 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
781 mpt_ctlop(mpt, mpt_reply, reply);
782 }
783
784 /* If IOC done with this request, free it up. */
785 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
786 mpt_free_request(mpt, req);
787
788 /* If address reply, give the buffer back to the IOC. */
789 if (mpt_reply != NULL)
790 mpt_free_reply(mpt, (reply << 1));
791
792 if (xs != NULL)
793 scsipi_done(xs);
794
795 if (restart) {
796 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
797 mpt_restart(mpt, NULL);
798 }
799 }
800
801 static void
802 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
803 {
804 struct scsipi_periph *periph = xs->xs_periph;
805 request_t *req;
806 MSG_SCSI_IO_REQUEST *mpt_req;
807 int error, s;
808
809 s = splbio();
810 req = mpt_get_request(mpt);
811 if (__predict_false(req == NULL)) {
812 /* This should happen very infrequently. */
813 xs->error = XS_RESOURCE_SHORTAGE;
814 scsipi_done(xs);
815 splx(s);
816 return;
817 }
818 splx(s);
819
820 /* Link the req and the scsipi_xfer. */
821 req->xfer = xs;
822
823 /* Now we build the command for the IOC */
824 mpt_req = req->req_vbuf;
825 memset(mpt_req, 0, sizeof(*mpt_req));
826
827 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
828 mpt_req->Bus = mpt->bus;
829
830 mpt_req->SenseBufferLength =
831 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
832 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
833
834 /*
835 * We use the message context to find the request structure when
836 * we get the command completion interrupt from the IOC.
837 */
838 mpt_req->MsgContext = htole32(req->index);
839
840 /* Which physical device to do the I/O on. */
841 mpt_req->TargetID = periph->periph_target;
842 mpt_req->LUN[1] = periph->periph_lun;
843
844 /* Set the direction of the transfer. */
845 if (xs->xs_control & XS_CTL_DATA_IN)
846 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
847 else if (xs->xs_control & XS_CTL_DATA_OUT)
848 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
849 else
850 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
851
852 /* Set the queue behavior. */
853 if (__predict_true((!mpt->is_scsi) ||
854 (mpt->mpt_tag_enable &
855 (1 << periph->periph_target)))) {
856 switch (XS_CTL_TAGTYPE(xs)) {
857 case XS_CTL_HEAD_TAG:
858 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
859 break;
860
861 #if 0 /* XXX */
862 case XS_CTL_ACA_TAG:
863 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
864 break;
865 #endif
866
867 case XS_CTL_ORDERED_TAG:
868 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
869 break;
870
871 case XS_CTL_SIMPLE_TAG:
872 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
873 break;
874
875 default:
876 if (mpt->is_scsi)
877 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
878 else
879 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
880 break;
881 }
882 } else
883 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
884
885 if (__predict_false(mpt->is_scsi &&
886 (mpt->mpt_disc_enable &
887 (1 << periph->periph_target)) == 0))
888 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
889
890 mpt_req->Control = htole32(mpt_req->Control);
891
892 /* Copy the SCSI command block into place. */
893 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
894
895 mpt_req->CDBLength = xs->cmdlen;
896 mpt_req->DataLength = htole32(xs->datalen);
897 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
898
899 /*
900 * Map the DMA transfer.
901 */
902 if (xs->datalen) {
903 SGE_SIMPLE32 *se;
904
905 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
906 xs->datalen, NULL,
907 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
908 : BUS_DMA_WAITOK) |
909 BUS_DMA_STREAMING |
910 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
911 : BUS_DMA_WRITE));
912 switch (error) {
913 case 0:
914 break;
915
916 case ENOMEM:
917 case EAGAIN:
918 xs->error = XS_RESOURCE_SHORTAGE;
919 goto out_bad;
920
921 default:
922 xs->error = XS_DRIVER_STUFFUP;
923 mpt_prt(mpt, "error %d loading DMA map", error);
924 out_bad:
925 s = splbio();
926 mpt_free_request(mpt, req);
927 scsipi_done(xs);
928 splx(s);
929 return;
930 }
931
932 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
933 int seg, i, nleft = req->dmap->dm_nsegs;
934 uint32_t flags;
935 SGE_CHAIN32 *ce;
936
937 seg = 0;
938 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
939 if (xs->xs_control & XS_CTL_DATA_OUT)
940 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
941
942 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
943 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
944 i++, se++, seg++) {
945 uint32_t tf;
946
947 memset(se, 0, sizeof(*se));
948 se->Address =
949 htole32(req->dmap->dm_segs[seg].ds_addr);
950 MPI_pSGE_SET_LENGTH(se,
951 req->dmap->dm_segs[seg].ds_len);
952 tf = flags;
953 if (i == MPT_NSGL_FIRST(mpt) - 2)
954 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
955 MPI_pSGE_SET_FLAGS(se, tf);
956 se->FlagsLength = htole32(se->FlagsLength);
957 nleft--;
958 }
959
960 /*
961 * Tell the IOC where to find the first chain element.
962 */
963 mpt_req->ChainOffset =
964 ((char *)se - (char *)mpt_req) >> 2;
965
966 /*
967 * Until we're finished with all segments...
968 */
969 while (nleft) {
970 int ntodo;
971
972 /*
973 * Construct the chain element that points to
974 * the next segment.
975 */
976 ce = (SGE_CHAIN32 *) se++;
977 if (nleft > MPT_NSGL(mpt)) {
978 ntodo = MPT_NSGL(mpt) - 1;
979 ce->NextChainOffset = (MPT_RQSL(mpt) -
980 sizeof(SGE_SIMPLE32)) >> 2;
981 ce->Length = htole16(MPT_NSGL(mpt)
982 * sizeof(SGE_SIMPLE32));
983 } else {
984 ntodo = nleft;
985 ce->NextChainOffset = 0;
986 ce->Length = htole16(ntodo
987 * sizeof(SGE_SIMPLE32));
988 }
989 ce->Address = htole32(req->req_pbuf +
990 ((char *)se - (char *)mpt_req));
991 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
992 for (i = 0; i < ntodo; i++, se++, seg++) {
993 uint32_t tf;
994
995 memset(se, 0, sizeof(*se));
996 se->Address = htole32(
997 req->dmap->dm_segs[seg].ds_addr);
998 MPI_pSGE_SET_LENGTH(se,
999 req->dmap->dm_segs[seg].ds_len);
1000 tf = flags;
1001 if (i == ntodo - 1) {
1002 tf |=
1003 MPI_SGE_FLAGS_LAST_ELEMENT;
1004 if (ce->NextChainOffset == 0) {
1005 tf |=
1006 MPI_SGE_FLAGS_END_OF_LIST |
1007 MPI_SGE_FLAGS_END_OF_BUFFER;
1008 }
1009 }
1010 MPI_pSGE_SET_FLAGS(se, tf);
1011 se->FlagsLength =
1012 htole32(se->FlagsLength);
1013 nleft--;
1014 }
1015 }
1016 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1017 req->dmap->dm_mapsize,
1018 (xs->xs_control & XS_CTL_DATA_IN) ?
1019 BUS_DMASYNC_PREREAD
1020 : BUS_DMASYNC_PREWRITE);
1021 } else {
1022 int i;
1023 uint32_t flags;
1024
1025 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1026 if (xs->xs_control & XS_CTL_DATA_OUT)
1027 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1028
1029 /* Copy the segments into our SG list. */
1030 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1031 for (i = 0; i < req->dmap->dm_nsegs;
1032 i++, se++) {
1033 uint32_t tf;
1034
1035 memset(se, 0, sizeof(*se));
1036 se->Address =
1037 htole32(req->dmap->dm_segs[i].ds_addr);
1038 MPI_pSGE_SET_LENGTH(se,
1039 req->dmap->dm_segs[i].ds_len);
1040 tf = flags;
1041 if (i == req->dmap->dm_nsegs - 1) {
1042 tf |=
1043 MPI_SGE_FLAGS_LAST_ELEMENT |
1044 MPI_SGE_FLAGS_END_OF_BUFFER |
1045 MPI_SGE_FLAGS_END_OF_LIST;
1046 }
1047 MPI_pSGE_SET_FLAGS(se, tf);
1048 se->FlagsLength = htole32(se->FlagsLength);
1049 }
1050 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1051 req->dmap->dm_mapsize,
1052 (xs->xs_control & XS_CTL_DATA_IN) ?
1053 BUS_DMASYNC_PREREAD
1054 : BUS_DMASYNC_PREWRITE);
1055 }
1056 } else {
1057 /*
1058 * No data to transfer; just make a single simple SGL
1059 * with zero length.
1060 */
1061 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1062 memset(se, 0, sizeof(*se));
1063 MPI_pSGE_SET_FLAGS(se,
1064 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1065 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1066 se->FlagsLength = htole32(se->FlagsLength);
1067 }
1068
1069 if (mpt->verbose > 1)
1070 mpt_print_scsi_io_request(mpt_req);
1071
1072 if (xs->timeout == 0) {
1073 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1074 req->index);
1075 xs->timeout = 500;
1076 }
1077
1078 s = splbio();
1079 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1080 callout_reset(&xs->xs_callout,
1081 mstohz(xs->timeout), mpt_timeout, req);
1082 mpt_send_cmd(mpt, req);
1083 splx(s);
1084
1085 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1086 return;
1087
1088 /*
1089 * If we can't use interrupts, poll on completion.
1090 */
1091 if (mpt_poll(mpt, xs, xs->timeout))
1092 mpt_timeout(req);
1093 }
1094
1095 static void
1096 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1097 {
1098 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1099
1100 /*
1101 * Always allow disconnect; we don't have a way to disable
1102 * it right now, in any case.
1103 */
1104 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1105
1106 if (xm->xm_mode & PERIPH_CAP_TQING)
1107 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1108 else
1109 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1110
1111 if (mpt->is_scsi) {
1112 /*
1113 * SCSI transport settings only make any sense for
1114 * SCSI
1115 */
1116
1117 tmp = mpt->mpt_dev_page1[xm->xm_target];
1118
1119 /*
1120 * Set the wide/narrow parameter for the target.
1121 */
1122 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1123 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1124 else
1125 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1126
1127 /*
1128 * Set the synchronous parameters for the target.
1129 *
1130 * XXX If we request sync transfers, we just go ahead and
1131 * XXX request the maximum available. We need finer control
1132 * XXX in order to implement Domain Validation.
1133 */
1134 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1135 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1136 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1137 MPI_SCSIDEVPAGE1_RP_IU);
1138 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1139 int factor, offset, np;
1140
1141 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1142 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1143 np = 0;
1144 if (factor < 0x9) {
1145 /* Ultra320 */
1146 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1147 }
1148 if (factor < 0xa) {
1149 /* at least Ultra160 */
1150 np |= MPI_SCSIDEVPAGE1_RP_DT;
1151 }
1152 np |= (factor << 8) | (offset << 16);
1153 tmp.RequestedParameters |= np;
1154 }
1155
1156 host2mpt_config_page_scsi_device_1(&tmp);
1157 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1158 mpt_prt(mpt, "unable to write Device Page 1");
1159 return;
1160 }
1161
1162 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1163 mpt_prt(mpt, "unable to read back Device Page 1");
1164 return;
1165 }
1166
1167 mpt2host_config_page_scsi_device_1(&tmp);
1168 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1169 if (mpt->verbose > 1) {
1170 mpt_prt(mpt,
1171 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1172 xm->xm_target,
1173 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1174 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1175 }
1176 }
1177
1178 /*
1179 * Make a note that we should perform an async callback at the
1180 * end of the next successful command completion to report the
1181 * negotiated transfer mode.
1182 */
1183 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1184 }
1185
1186 static void
1187 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1188 {
1189 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1190 struct scsipi_xfer_mode xm;
1191 int period, offset;
1192
1193 tmp = mpt->mpt_dev_page0[periph->periph_target];
1194 host2mpt_config_page_scsi_device_0(&tmp);
1195 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1196 mpt_prt(mpt, "unable to read Device Page 0");
1197 return;
1198 }
1199 mpt2host_config_page_scsi_device_0(&tmp);
1200
1201 if (mpt->verbose > 1) {
1202 mpt_prt(mpt,
1203 "SPI Tgt %d Page 0: NParms %x Information %x",
1204 periph->periph_target,
1205 tmp.NegotiatedParameters, tmp.Information);
1206 }
1207
1208 xm.xm_target = periph->periph_target;
1209 xm.xm_mode = 0;
1210
1211 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1212 xm.xm_mode |= PERIPH_CAP_WIDE16;
1213
1214 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1215 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1216 if (offset) {
1217 xm.xm_period = period;
1218 xm.xm_offset = offset;
1219 xm.xm_mode |= PERIPH_CAP_SYNC;
1220 }
1221
1222 /*
1223 * Tagged queueing is all controlled by us; there is no
1224 * other setting to query.
1225 */
1226 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1227 xm.xm_mode |= PERIPH_CAP_TQING;
1228
1229 /*
1230 * We're going to deliver the async event, so clear the marker.
1231 */
1232 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1233
1234 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1235 }
1236
1237 static void
1238 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1239 {
1240 MSG_DEFAULT_REPLY *dmsg = vmsg;
1241
1242 switch (dmsg->Function) {
1243 case MPI_FUNCTION_EVENT_NOTIFICATION:
1244 mpt_event_notify_reply(mpt, vmsg);
1245 mpt_free_reply(mpt, (reply << 1));
1246 break;
1247
1248 case MPI_FUNCTION_EVENT_ACK:
1249 mpt_free_reply(mpt, (reply << 1));
1250 break;
1251
1252 case MPI_FUNCTION_PORT_ENABLE:
1253 {
1254 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1255 int index = le32toh(msg->MsgContext) & ~0x80000000;
1256 if (mpt->verbose > 1)
1257 mpt_prt(mpt, "enable port reply index %d", index);
1258 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1259 request_t *req = &mpt->request_pool[index];
1260 req->debug = REQ_DONE;
1261 }
1262 mpt_free_reply(mpt, (reply << 1));
1263 break;
1264 }
1265
1266 case MPI_FUNCTION_CONFIG:
1267 {
1268 MSG_CONFIG_REPLY *msg = vmsg;
1269 int index = le32toh(msg->MsgContext) & ~0x80000000;
1270 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1271 request_t *req = &mpt->request_pool[index];
1272 req->debug = REQ_DONE;
1273 req->sequence = reply;
1274 } else
1275 mpt_free_reply(mpt, (reply << 1));
1276 break;
1277 }
1278
1279 default:
1280 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1281 }
1282 }
1283
1284 static void
1285 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1286 {
1287
1288 switch (le32toh(msg->Event)) {
1289 case MPI_EVENT_LOG_DATA:
1290 {
1291 int i;
1292
1293 /* Some error occurrerd that the Fusion wants logged. */
1294 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1295 mpt_prt(mpt, "EvtLogData: Event Data:");
1296 for (i = 0; i < msg->EventDataLength; i++) {
1297 if ((i % 4) == 0)
1298 printf("%s:\t", device_xname(mpt->sc_dev));
1299 printf("0x%08x%c", msg->Data[i],
1300 ((i % 4) == 3) ? '\n' : ' ');
1301 }
1302 if ((i % 4) != 0)
1303 printf("\n");
1304 break;
1305 }
1306
1307 case MPI_EVENT_UNIT_ATTENTION:
1308 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1309 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1310 break;
1311
1312 case MPI_EVENT_IOC_BUS_RESET:
1313 /* We generated a bus reset. */
1314 mpt_prt(mpt, "IOC Bus Reset Port %d",
1315 (msg->Data[0] >> 8) & 0xff);
1316 break;
1317
1318 case MPI_EVENT_EXT_BUS_RESET:
1319 /* Someone else generated a bus reset. */
1320 mpt_prt(mpt, "External Bus Reset");
1321 /*
1322 * These replies don't return EventData like the MPI
1323 * spec says they do.
1324 */
1325 /* XXX Send an async event? */
1326 break;
1327
1328 case MPI_EVENT_RESCAN:
1329 /*
1330 * In general, thise means a device has been added
1331 * to the loop.
1332 */
1333 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1334 /* XXX Send an async event? */
1335 break;
1336
1337 case MPI_EVENT_LINK_STATUS_CHANGE:
1338 mpt_prt(mpt, "Port %d: Link state %s",
1339 (msg->Data[1] >> 8) & 0xff,
1340 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1341 break;
1342
1343 case MPI_EVENT_LOOP_STATE_CHANGE:
1344 switch ((msg->Data[0] >> 16) & 0xff) {
1345 case 0x01:
1346 mpt_prt(mpt,
1347 "Port %d: FC Link Event: LIP(%02x,%02x) "
1348 "(Loop Initialization)",
1349 (msg->Data[1] >> 8) & 0xff,
1350 (msg->Data[0] >> 8) & 0xff,
1351 (msg->Data[0] ) & 0xff);
1352 switch ((msg->Data[0] >> 8) & 0xff) {
1353 case 0xf7:
1354 if ((msg->Data[0] & 0xff) == 0xf7)
1355 mpt_prt(mpt, "\tDevice needs AL_PA");
1356 else
1357 mpt_prt(mpt, "\tDevice %02x doesn't "
1358 "like FC performance",
1359 msg->Data[0] & 0xff);
1360 break;
1361
1362 case 0xf8:
1363 if ((msg->Data[0] & 0xff) == 0xf7)
1364 mpt_prt(mpt, "\tDevice detected loop "
1365 "failure before acquiring AL_PA");
1366 else
1367 mpt_prt(mpt, "\tDevice %02x detected "
1368 "loop failure",
1369 msg->Data[0] & 0xff);
1370 break;
1371
1372 default:
1373 mpt_prt(mpt, "\tDevice %02x requests that "
1374 "device %02x reset itself",
1375 msg->Data[0] & 0xff,
1376 (msg->Data[0] >> 8) & 0xff);
1377 break;
1378 }
1379 break;
1380
1381 case 0x02:
1382 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1383 "(Loop Port Enable)",
1384 (msg->Data[1] >> 8) & 0xff,
1385 (msg->Data[0] >> 8) & 0xff,
1386 (msg->Data[0] ) & 0xff);
1387 break;
1388
1389 case 0x03:
1390 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1391 "(Loop Port Bypass)",
1392 (msg->Data[1] >> 8) & 0xff,
1393 (msg->Data[0] >> 8) & 0xff,
1394 (msg->Data[0] ) & 0xff);
1395 break;
1396
1397 default:
1398 mpt_prt(mpt, "Port %d: FC Link Event: "
1399 "Unknown event (%02x %02x %02x)",
1400 (msg->Data[1] >> 8) & 0xff,
1401 (msg->Data[0] >> 16) & 0xff,
1402 (msg->Data[0] >> 8) & 0xff,
1403 (msg->Data[0] ) & 0xff);
1404 break;
1405 }
1406 break;
1407
1408 case MPI_EVENT_LOGOUT:
1409 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1410 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1411 break;
1412
1413 case MPI_EVENT_EVENT_CHANGE:
1414 /*
1415 * This is just an acknowledgement of our
1416 * mpt_send_event_request().
1417 */
1418 break;
1419
1420 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1421 switch ((msg->Data[0] >> 12) & 0x0f) {
1422 case 0x00:
1423 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1424 msg->Data[0] & 0xff);
1425 break;
1426 case 0x01:
1427 mpt_prt(mpt, "Phy %d: Link Disabled",
1428 msg->Data[0] & 0xff);
1429 break;
1430 case 0x02:
1431 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1432 msg->Data[0] & 0xff);
1433 break;
1434 case 0x03:
1435 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1436 msg->Data[0] & 0xff);
1437 break;
1438 case 0x08:
1439 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1440 msg->Data[0] & 0xff);
1441 break;
1442 case 0x09:
1443 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1444 msg->Data[0] & 0xff);
1445 break;
1446 default:
1447 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1448 "Unknown event (%0x)",
1449 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1450 }
1451 break;
1452
1453 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1454 case MPI_EVENT_SAS_DISCOVERY:
1455 /* ignore these events for now */
1456 break;
1457
1458 case MPI_EVENT_QUEUE_FULL:
1459 /* This can get a little chatty */
1460 if (mpt->verbose > 0)
1461 mpt_prt(mpt, "Queue Full Event");
1462 break;
1463
1464 default:
1465 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1466 break;
1467 }
1468
1469 if (msg->AckRequired) {
1470 MSG_EVENT_ACK *ackp;
1471 request_t *req;
1472
1473 if ((req = mpt_get_request(mpt)) == NULL) {
1474 /* XXX XXX XXX XXXJRT */
1475 panic("mpt_event_notify_reply: unable to allocate "
1476 "request structure");
1477 }
1478
1479 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1480 memset(ackp, 0, sizeof(*ackp));
1481 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1482 ackp->Event = msg->Event;
1483 ackp->EventContext = msg->EventContext;
1484 ackp->MsgContext = htole32(req->index | 0x80000000);
1485 mpt_check_doorbell(mpt);
1486 mpt_send_cmd(mpt, req);
1487 }
1488 }
1489
1490 static void
1491 mpt_bus_reset(mpt_softc_t *mpt)
1492 {
1493 request_t *req;
1494 MSG_SCSI_TASK_MGMT *mngt_req;
1495 int s;
1496
1497 s = splbio();
1498 if (mpt->mngt_req) {
1499 /* request already queued; can't do more */
1500 splx(s);
1501 return;
1502 }
1503 req = mpt_get_request(mpt);
1504 if (__predict_false(req == NULL)) {
1505 mpt_prt(mpt, "no mngt request\n");
1506 splx(s);
1507 return;
1508 }
1509 mpt->mngt_req = req;
1510 splx(s);
1511 mngt_req = req->req_vbuf;
1512 memset(mngt_req, 0, sizeof(*mngt_req));
1513 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1514 mngt_req->Bus = mpt->bus;
1515 mngt_req->TargetID = 0;
1516 mngt_req->ChainOffset = 0;
1517 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1518 mngt_req->Reserved1 = 0;
1519 mngt_req->MsgFlags =
1520 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1521 mngt_req->MsgContext = req->index;
1522 mngt_req->TaskMsgContext = 0;
1523 s = splbio();
1524 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1525 splx(s);
1526 }
1527
1528 /*****************************************************************************
1529 * SCSI interface routines
1530 *****************************************************************************/
1531
1532 static void
1533 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1534 void *arg)
1535 {
1536 struct scsipi_adapter *adapt = chan->chan_adapter;
1537 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1538
1539 switch (req) {
1540 case ADAPTER_REQ_RUN_XFER:
1541 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1542 return;
1543
1544 case ADAPTER_REQ_GROW_RESOURCES:
1545 /* Not supported. */
1546 return;
1547
1548 case ADAPTER_REQ_SET_XFER_MODE:
1549 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1550 return;
1551 }
1552 }
1553
1554 static void
1555 mpt_minphys(struct buf *bp)
1556 {
1557
1558 /*
1559 * Subtract one from the SGL limit, since we need an extra one to handle
1560 * an non-page-aligned transfer.
1561 */
1562 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1563
1564 if (bp->b_bcount > MPT_MAX_XFER)
1565 bp->b_bcount = MPT_MAX_XFER;
1566 minphys(bp);
1567 }
1568
1569 static int
1570 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1571 int flag, struct proc *p)
1572 {
1573 mpt_softc_t *mpt;
1574 int s;
1575
1576 mpt = device_private(chan->chan_adapter->adapt_dev);
1577 switch (cmd) {
1578 case SCBUSIORESET:
1579 mpt_bus_reset(mpt);
1580 s = splbio();
1581 mpt_intr(mpt);
1582 splx(s);
1583 return(0);
1584 default:
1585 return (ENOTTY);
1586 }
1587 }
1588