mpt_netbsd.c revision 1.20 1 /* $NetBSD: mpt_netbsd.c,v 1.20 2014/04/01 23:57:54 buhrow Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.20 2014/04/01 23:57:54 buhrow Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83 #include <sys/scsiio.h>
84
85 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
86 static void mpt_timeout(void *);
87 static void mpt_restart(mpt_softc_t *, request_t *);
88 static void mpt_done(mpt_softc_t *, uint32_t);
89 static int mpt_drain_queue(mpt_softc_t *);
90 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
91 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
92 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
93 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
94 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
95 static void mpt_bus_reset(mpt_softc_t *);
96
97 static void mpt_scsipi_request(struct scsipi_channel *,
98 scsipi_adapter_req_t, void *);
99 static void mpt_minphys(struct buf *);
100 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
101 struct proc *);
102
103 /*
104 * XXX - this assumes the device_private() of the attachement starts with
105 * a struct mpt_softc, so we can use the return value of device_private()
106 * straight without any offset.
107 */
108 #define DEV_TO_MPT(DEV) device_private(DEV)
109
110 void
111 mpt_scsipi_attach(mpt_softc_t *mpt)
112 {
113 struct scsipi_adapter *adapt = &mpt->sc_adapter;
114 struct scsipi_channel *chan = &mpt->sc_channel;
115 int maxq;
116
117 mpt->bus = 0; /* XXX ?? */
118
119 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
120 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
121
122 /* Fill in the scsipi_adapter. */
123 memset(adapt, 0, sizeof(*adapt));
124 adapt->adapt_dev = mpt->sc_dev;
125 adapt->adapt_nchannels = 1;
126 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
127 adapt->adapt_max_periph = maxq - 2;
128 adapt->adapt_request = mpt_scsipi_request;
129 adapt->adapt_minphys = mpt_minphys;
130 adapt->adapt_ioctl = mpt_ioctl;
131
132 /* Fill in the scsipi_channel. */
133 memset(chan, 0, sizeof(*chan));
134 chan->chan_adapter = adapt;
135 if (mpt->is_sas) {
136 chan->chan_bustype = &scsi_sas_bustype;
137 } else if (mpt->is_fc) {
138 chan->chan_bustype = &scsi_fc_bustype;
139 } else {
140 chan->chan_bustype = &scsi_bustype;
141 }
142 chan->chan_channel = 0;
143 chan->chan_flags = 0;
144 chan->chan_nluns = 8;
145 chan->chan_ntargets = mpt->mpt_max_devices;
146 chan->chan_id = mpt->mpt_ini_id;
147
148 /*Save the output of the config so we can rescan the bus in case of errors*/
149 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel, scsiprint);
150 }
151
152 int
153 mpt_dma_mem_alloc(mpt_softc_t *mpt)
154 {
155 bus_dma_segment_t reply_seg, request_seg;
156 int reply_rseg, request_rseg;
157 bus_addr_t pptr, end;
158 char *vptr;
159 size_t len;
160 int error, i;
161
162 /* Check if we have already allocated the reply memory. */
163 if (mpt->reply != NULL)
164 return (0);
165
166 /*
167 * Allocate the request pool. This isn't really DMA'd memory,
168 * but it's a convenient place to do it.
169 */
170 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
171 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
172 if (mpt->request_pool == NULL) {
173 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
174 return (ENOMEM);
175 }
176
177 /*
178 * Allocate DMA resources for reply buffers.
179 */
180 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
181 &reply_seg, 1, &reply_rseg, 0);
182 if (error) {
183 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
184 error);
185 goto fail_0;
186 }
187
188 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
189 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
190 if (error) {
191 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
192 error);
193 goto fail_1;
194 }
195
196 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
197 0, 0, &mpt->reply_dmap);
198 if (error) {
199 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
200 error);
201 goto fail_2;
202 }
203
204 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
205 PAGE_SIZE, NULL, 0);
206 if (error) {
207 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
208 error);
209 goto fail_3;
210 }
211 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
212
213 /*
214 * Allocate DMA resources for request buffers.
215 */
216 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
217 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
218 if (error) {
219 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
220 "error = %d\n", error);
221 goto fail_4;
222 }
223
224 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
225 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
226 if (error) {
227 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
228 error);
229 goto fail_5;
230 }
231
232 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
233 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
234 if (error) {
235 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
236 "error = %d\n", error);
237 goto fail_6;
238 }
239
240 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
241 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
242 if (error) {
243 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
244 error);
245 goto fail_7;
246 }
247 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
248
249 pptr = mpt->request_phys;
250 vptr = (void *) mpt->request;
251 end = pptr + MPT_REQ_MEM_SIZE(mpt);
252
253 for (i = 0; pptr < end; i++) {
254 request_t *req = &mpt->request_pool[i];
255 req->index = i;
256
257 /* Store location of Request Data */
258 req->req_pbuf = pptr;
259 req->req_vbuf = vptr;
260
261 pptr += MPT_REQUEST_AREA;
262 vptr += MPT_REQUEST_AREA;
263
264 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
265 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
266
267 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
268 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
269 if (error) {
270 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
271 "error = %d\n", i, error);
272 goto fail_8;
273 }
274 }
275
276 return (0);
277
278 fail_8:
279 for (--i; i >= 0; i--) {
280 request_t *req = &mpt->request_pool[i];
281 if (req->dmap != NULL)
282 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
283 }
284 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
285 fail_7:
286 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
287 fail_6:
288 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
289 fail_5:
290 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
291 fail_4:
292 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
293 fail_3:
294 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
295 fail_2:
296 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
297 fail_1:
298 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
299 fail_0:
300 free(mpt->request_pool, M_DEVBUF);
301
302 mpt->reply = NULL;
303 mpt->request = NULL;
304 mpt->request_pool = NULL;
305
306 return (error);
307 }
308
309 int
310 mpt_intr(void *arg)
311 {
312 mpt_softc_t *mpt = arg;
313 int nrepl = 0;
314
315 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
316 return (0);
317
318 nrepl = mpt_drain_queue(mpt);
319 return (nrepl != 0);
320 }
321
322 void
323 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
324 {
325 va_list ap;
326
327 printf("%s: ", device_xname(mpt->sc_dev));
328 va_start(ap, fmt);
329 vprintf(fmt, ap);
330 va_end(ap);
331 printf("\n");
332 }
333
334 static int
335 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
336 {
337
338 /* Timeouts are in msec, so we loop in 1000usec cycles */
339 while (count) {
340 mpt_intr(mpt);
341 if (xs->xs_status & XS_STS_DONE)
342 return (0);
343 delay(1000); /* only happens in boot, so ok */
344 count--;
345 }
346 return (1);
347 }
348
349 static void
350 mpt_timeout(void *arg)
351 {
352 request_t *req = arg;
353 struct scsipi_xfer *xs;
354 struct scsipi_periph *periph;
355 mpt_softc_t *mpt;
356 uint32_t oseq;
357 int s, nrepl = 0;
358
359 if (req->xfer == NULL) {
360 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
361 req->index, req->sequence);
362 return;
363 }
364 xs = req->xfer;
365 periph = xs->xs_periph;
366 mpt = (void *) periph->periph_channel->chan_adapter->adapt_dev;
367 scsipi_printaddr(periph);
368 printf("command timeout\n");
369
370 s = splbio();
371
372 oseq = req->sequence;
373 mpt->timeouts++;
374 if (mpt_intr(mpt)) {
375 if (req->sequence != oseq) {
376 mpt->success ++;
377 mpt_prt(mpt, "recovered from command timeout");
378 splx(s);
379 return;
380 }
381 }
382
383 /*
384 *Ensure the IOC is really done giving us data since it appears it can
385 *sometimes fail to give us interrupts under heavy load.
386 */
387 nrepl = mpt_drain_queue(mpt);
388 if (nrepl ) {
389 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
390 }
391
392 if (req->sequence != oseq) {
393 mpt->success ++;
394 splx(s);
395 return;
396 }
397
398 mpt_prt(mpt,
399 "timeout on request index = 0x%x, seq = 0x%08x",
400 req->index, req->sequence);
401 mpt_check_doorbell(mpt);
402 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
403 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
404 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
405 mpt_read(mpt, MPT_OFFSET_DOORBELL));
406 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
407 if (mpt->verbose > 1)
408 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
409
410 xs->error = XS_TIMEOUT;
411 splx(s);
412 mpt_restart(mpt, req);
413 }
414
415 static void
416 mpt_restart(mpt_softc_t *mpt, request_t *req0)
417 {
418 int i, s, nreq;
419 request_t *req;
420 struct scsipi_xfer *xs;
421
422 /* first, reset the IOC, leaving stopped so all requests are idle */
423 if (mpt_soft_reset(mpt) != MPT_OK) {
424 mpt_prt(mpt, "soft reset failed");
425 /* don't try a hard reset since this mangles the PCI configuration registers */
426 return;
427 }
428
429 /* freeze the channel so scsipi doesn't queue more commands */
430 scsipi_channel_freeze(&mpt->sc_channel, 1);
431
432 /* return all pending requests to scsipi and de-allocate them */
433 s = splbio();
434 nreq = 0;
435 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
436 req = &mpt->request_pool[i];
437 xs = req->xfer;
438 if (xs != NULL) {
439 if (xs->datalen != 0)
440 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
441 req->xfer = NULL;
442 callout_stop(&xs->xs_callout);
443 if (req != req0) {
444 nreq++;
445 xs->error = XS_REQUEUE;
446 }
447 scsipi_done(xs);
448 /* don't really need to mpt_free_request() since mpt_init() below will free all requests anyway */
449 mpt_free_request(mpt, req);
450 }
451 }
452 splx(s);
453 if (nreq > 0)
454 mpt_prt(mpt, "re-queued %d requests", nreq);
455
456 /* re-initialize the IOC (which restarts it) */
457 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
458 mpt_prt(mpt, "restart succeeded");
459 /* else error message already printed */
460
461 /* thaw the channel, causing scsipi to re-queue the commands */
462 scsipi_channel_thaw(&mpt->sc_channel, 1);
463 }
464
465 static
466 int mpt_drain_queue(mpt_softc_t *mpt)
467 {
468 int nrepl = 0;
469 uint32_t reply;
470
471 reply = mpt_pop_reply_queue(mpt);
472 while (reply != MPT_REPLY_EMPTY) {
473 nrepl++;
474 if (mpt->verbose > 1) {
475 if ((reply & MPT_CONTEXT_REPLY) != 0) {
476 /* Address reply; IOC has something to say */
477 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
478 } else {
479 /* Context reply; all went well */
480 mpt_prt(mpt, "context %u reply OK", reply);
481 }
482 }
483 mpt_done(mpt, reply);
484 reply = mpt_pop_reply_queue(mpt);
485 }
486 return (nrepl);
487 }
488
489 static void
490 mpt_done(mpt_softc_t *mpt, uint32_t reply)
491 {
492 struct scsipi_xfer *xs = NULL;
493 struct scsipi_periph *periph;
494 int index;
495 request_t *req;
496 MSG_REQUEST_HEADER *mpt_req;
497 MSG_SCSI_IO_REPLY *mpt_reply;
498 int restart = 0; /*nonzero if we need to restart the IOC*/
499
500 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
501 /* context reply (ok) */
502 mpt_reply = NULL;
503 index = reply & MPT_CONTEXT_MASK;
504 } else {
505 /* address reply (error) */
506
507 /* XXX BUS_DMASYNC_POSTREAD XXX */
508 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
509 if (mpt->verbose > 1) {
510 uint32_t *pReply = (uint32_t *) mpt_reply;
511
512 mpt_prt(mpt, "Address Reply (index %u):",
513 le32toh(mpt_reply->MsgContext) & 0xffff);
514 mpt_prt(mpt, "%08x %08x %08x %08x",
515 pReply[0], pReply[1], pReply[2], pReply[3]);
516 mpt_prt(mpt, "%08x %08x %08x %08x",
517 pReply[4], pReply[5], pReply[6], pReply[7]);
518 mpt_prt(mpt, "%08x %08x %08x %08x",
519 pReply[8], pReply[9], pReply[10], pReply[11]);
520 }
521 index = le32toh(mpt_reply->MsgContext);
522 }
523
524 /*
525 * Address reply with MessageContext high bit set.
526 * This is most likely a notify message, so we try
527 * to process it, then free it.
528 */
529 if (__predict_false((index & 0x80000000) != 0)) {
530 if (mpt_reply != NULL)
531 mpt_ctlop(mpt, mpt_reply, reply);
532 else
533 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
534 return;
535 }
536
537 /* Did we end up with a valid index into the table? */
538 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
539 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
540 return;
541 }
542
543 req = &mpt->request_pool[index];
544
545 /* Make sure memory hasn't been trashed. */
546 if (__predict_false(req->index != index)) {
547 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
548 return;
549 }
550
551 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
552 mpt_req = req->req_vbuf;
553
554 /* Short cut for task management replies; nothing more for us to do. */
555 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
556 if (mpt->verbose > 1)
557 mpt_prt(mpt, "mpt_done: TASK MGMT");
558 KASSERT(req == mpt->mngt_req);
559 mpt->mngt_req = NULL;
560 goto done;
561 }
562
563 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
564 goto done;
565
566 /*
567 * At this point, it had better be a SCSI I/O command, but don't
568 * crash if it isn't.
569 */
570 if (__predict_false(mpt_req->Function !=
571 MPI_FUNCTION_SCSI_IO_REQUEST)) {
572 if (mpt->verbose > 1)
573 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
574 mpt_req->Function, index);
575 goto done;
576 }
577
578 /* Recover scsipi_xfer from the request structure. */
579 xs = req->xfer;
580
581 /* Can't have a SCSI command without a scsipi_xfer. */
582 if (__predict_false(xs == NULL)) {
583 mpt_prt(mpt,
584 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
585 req->index, req->sequence);
586 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
587 mpt_prt(mpt, "mpt_request:");
588 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
589
590 if (mpt_reply != NULL) {
591 mpt_prt(mpt, "mpt_reply:");
592 mpt_print_reply(mpt_reply);
593 } else {
594 mpt_prt(mpt, "context reply: 0x%08x", reply);
595 }
596 goto done;
597 }
598
599 callout_stop(&xs->xs_callout);
600
601 periph = xs->xs_periph;
602
603 /*
604 * If we were a data transfer, unload the map that described
605 * the data buffer.
606 */
607 if (__predict_true(xs->datalen != 0)) {
608 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
609 req->dmap->dm_mapsize,
610 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
611 : BUS_DMASYNC_POSTWRITE);
612 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
613 }
614
615 if (__predict_true(mpt_reply == NULL)) {
616 /*
617 * Context reply; report that the command was
618 * successful!
619 *
620 * Also report the xfer mode, if necessary.
621 */
622 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
623 if ((mpt->mpt_report_xfer_mode &
624 (1 << periph->periph_target)) != 0)
625 mpt_get_xfer_mode(mpt, periph);
626 }
627 xs->error = XS_NOERROR;
628 xs->status = SCSI_OK;
629 xs->resid = 0;
630 mpt_free_request(mpt, req);
631 scsipi_done(xs);
632 return;
633 }
634
635 xs->status = mpt_reply->SCSIStatus;
636 switch ((le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK)) {
637 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
638 xs->error = XS_DRIVER_STUFFUP;
639 mpt_prt(mpt,"mpt_done: IOC overrun!");
640 break;
641
642 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
643 /*
644 * Yikes! Tagged queue full comes through this path!
645 *
646 * So we'll change it to a status error and anything
647 * that returns status should probably be a status
648 * error as well.
649 */
650 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
651 if (mpt_reply->SCSIState &
652 MPI_SCSI_STATE_NO_SCSI_STATUS) {
653 xs->error = XS_DRIVER_STUFFUP;
654 break;
655 }
656 /* FALLTHROUGH */
657 case MPI_IOCSTATUS_SUCCESS:
658 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
659 switch (xs->status) {
660 case SCSI_OK:
661 /* Report the xfer mode, if necessary. */
662 if ((mpt->mpt_report_xfer_mode &
663 (1 << periph->periph_target)) != 0)
664 mpt_get_xfer_mode(mpt, periph);
665 xs->resid = 0;
666 break;
667
668 case SCSI_CHECK:
669 xs->error = XS_SENSE;
670 break;
671
672 case SCSI_BUSY:
673 case SCSI_QUEUE_FULL:
674 xs->error = XS_BUSY;
675 break;
676
677 default:
678 scsipi_printaddr(periph);
679 printf("invalid status code %d\n", xs->status);
680 xs->error = XS_DRIVER_STUFFUP;
681 break;
682 }
683 break;
684
685 case MPI_IOCSTATUS_BUSY:
686 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
687 xs->error = XS_RESOURCE_SHORTAGE;
688 break;
689
690 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
691 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
692 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
693 xs->error = XS_SELTIMEOUT;
694 break;
695
696 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
697 xs->error = XS_DRIVER_STUFFUP;
698 mpt_prt(mpt,"mpt_done: IOC SCSI residual mismatch!");
699 restart = 1;
700 break;
701
702 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
703 /* XXX What should we do here? */
704 mpt_prt(mpt,"mpt_done: IOC SCSI task terminated!");
705 restart = 1;
706 break;
707
708 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
709 /* XXX */
710 xs->error = XS_DRIVER_STUFFUP;
711 mpt_prt(mpt,"mpt_done: IOC SCSI task failed!");
712 restart = 1;
713 break;
714
715 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
716 /* XXX */
717 xs->error = XS_DRIVER_STUFFUP;
718 mpt_prt(mpt,"mpt_done: IOC task terminated!");
719 restart = 1;
720 break;
721
722 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
723 /* XXX This is a bus-reset */
724 xs->error = XS_DRIVER_STUFFUP;
725 mpt_prt(mpt,"mpt_done: IOC SCSI bus reset!");
726 restart = 1;
727 break;
728
729 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
730 /*
731 *FreeBSD and Linux indicate this is a phase error between
732 *the IOC and the drive itself.
733 *When this happens, the IOC becomes unhappy and stops processing
734 *all transactions. Call mpt_timeout which knows how to
735 *get the IOC back on its feet.
736 */
737 mpt_prt(mpt,"mpt_done: IOC indicates protocol error -- recovering...");
738 xs->error = XS_TIMEOUT;
739 restart = 1;
740
741 break;
742
743 default:
744 /* XXX unrecognized HBA error */
745 xs->error = XS_DRIVER_STUFFUP;
746 mpt_prt(mpt,"mpt_done: IOC returned unknown code: 0x%x",le16toh(mpt_reply->IOCStatus));
747 restart = 1;
748 break;
749 }
750
751 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
752 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
753 sizeof(xs->sense.scsi_sense));
754 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
755 /*
756 * This will cause the scsipi layer to issue
757 * a REQUEST SENSE.
758 */
759 if (xs->status == SCSI_CHECK)
760 xs->error = XS_BUSY;
761 }
762
763 done:
764 if (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
765 mpt_prt(mpt,"mpt_done: IOC has error - logging...\n");
766 mpt_ctlop(mpt, mpt_reply, reply);
767 }
768
769 /* If IOC done with this requeset, free it up. */
770 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
771 mpt_free_request(mpt, req);
772
773 /* If address reply, give the buffer back to the IOC. */
774 if (mpt_reply != NULL)
775 mpt_free_reply(mpt, (reply << 1));
776
777 if (xs != NULL)
778 scsipi_done(xs);
779
780 if (restart) {
781 mpt_prt(mpt,"mpt_done: IOC fatal error: restarting...");
782 mpt_restart(mpt, NULL);
783 }
784 }
785
786 static void
787 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
788 {
789 struct scsipi_periph *periph = xs->xs_periph;
790 request_t *req;
791 MSG_SCSI_IO_REQUEST *mpt_req;
792 int error, s;
793
794 s = splbio();
795 req = mpt_get_request(mpt);
796 if (__predict_false(req == NULL)) {
797 /* This should happen very infrequently. */
798 xs->error = XS_RESOURCE_SHORTAGE;
799 scsipi_done(xs);
800 splx(s);
801 return;
802 }
803 splx(s);
804
805 /* Link the req and the scsipi_xfer. */
806 req->xfer = xs;
807
808 /* Now we build the command for the IOC */
809 mpt_req = req->req_vbuf;
810 memset(mpt_req, 0, sizeof(*mpt_req));
811
812 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
813 mpt_req->Bus = mpt->bus;
814
815 mpt_req->SenseBufferLength =
816 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
817 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
818
819 /*
820 * We use the message context to find the request structure when
821 * we get the command completion interrupt from the IOC.
822 */
823 mpt_req->MsgContext = htole32(req->index);
824
825 /* Which physical device to do the I/O on. */
826 mpt_req->TargetID = periph->periph_target;
827 mpt_req->LUN[1] = periph->periph_lun;
828
829 /* Set the direction of the transfer. */
830 if (xs->xs_control & XS_CTL_DATA_IN)
831 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
832 else if (xs->xs_control & XS_CTL_DATA_OUT)
833 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
834 else
835 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
836
837 /* Set the queue behavior. */
838 if (__predict_true((!mpt->is_scsi) ||
839 (mpt->mpt_tag_enable &
840 (1 << periph->periph_target)))) {
841 switch (XS_CTL_TAGTYPE(xs)) {
842 case XS_CTL_HEAD_TAG:
843 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
844 break;
845
846 #if 0 /* XXX */
847 case XS_CTL_ACA_TAG:
848 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
849 break;
850 #endif
851
852 case XS_CTL_ORDERED_TAG:
853 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
854 break;
855
856 case XS_CTL_SIMPLE_TAG:
857 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
858 break;
859
860 default:
861 if (mpt->is_scsi)
862 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
863 else
864 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
865 break;
866 }
867 } else
868 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
869
870 if (__predict_false(mpt->is_scsi &&
871 (mpt->mpt_disc_enable &
872 (1 << periph->periph_target)) == 0))
873 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
874
875 mpt_req->Control = htole32(mpt_req->Control);
876
877 /* Copy the SCSI command block into place. */
878 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
879
880 mpt_req->CDBLength = xs->cmdlen;
881 mpt_req->DataLength = htole32(xs->datalen);
882 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
883
884 /*
885 * Map the DMA transfer.
886 */
887 if (xs->datalen) {
888 SGE_SIMPLE32 *se;
889
890 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
891 xs->datalen, NULL,
892 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
893 : BUS_DMA_WAITOK) |
894 BUS_DMA_STREAMING |
895 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
896 : BUS_DMA_WRITE));
897 switch (error) {
898 case 0:
899 break;
900
901 case ENOMEM:
902 case EAGAIN:
903 xs->error = XS_RESOURCE_SHORTAGE;
904 goto out_bad;
905
906 default:
907 xs->error = XS_DRIVER_STUFFUP;
908 mpt_prt(mpt, "error %d loading DMA map", error);
909 out_bad:
910 s = splbio();
911 mpt_free_request(mpt, req);
912 scsipi_done(xs);
913 splx(s);
914 return;
915 }
916
917 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
918 int seg, i, nleft = req->dmap->dm_nsegs;
919 uint32_t flags;
920 SGE_CHAIN32 *ce;
921
922 seg = 0;
923 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
924 if (xs->xs_control & XS_CTL_DATA_OUT)
925 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
926
927 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
928 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
929 i++, se++, seg++) {
930 uint32_t tf;
931
932 memset(se, 0, sizeof(*se));
933 se->Address =
934 htole32(req->dmap->dm_segs[seg].ds_addr);
935 MPI_pSGE_SET_LENGTH(se,
936 req->dmap->dm_segs[seg].ds_len);
937 tf = flags;
938 if (i == MPT_NSGL_FIRST(mpt) - 2)
939 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
940 MPI_pSGE_SET_FLAGS(se, tf);
941 se->FlagsLength = htole32(se->FlagsLength);
942 nleft--;
943 }
944
945 /*
946 * Tell the IOC where to find the first chain element.
947 */
948 mpt_req->ChainOffset =
949 ((char *)se - (char *)mpt_req) >> 2;
950
951 /*
952 * Until we're finished with all segments...
953 */
954 while (nleft) {
955 int ntodo;
956
957 /*
958 * Construct the chain element that points to
959 * the next segment.
960 */
961 ce = (SGE_CHAIN32 *) se++;
962 if (nleft > MPT_NSGL(mpt)) {
963 ntodo = MPT_NSGL(mpt) - 1;
964 ce->NextChainOffset = (MPT_RQSL(mpt) -
965 sizeof(SGE_SIMPLE32)) >> 2;
966 ce->Length = htole16(MPT_NSGL(mpt)
967 * sizeof(SGE_SIMPLE32));
968 } else {
969 ntodo = nleft;
970 ce->NextChainOffset = 0;
971 ce->Length = htole16(ntodo
972 * sizeof(SGE_SIMPLE32));
973 }
974 ce->Address = htole32(req->req_pbuf +
975 ((char *)se - (char *)mpt_req));
976 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
977 for (i = 0; i < ntodo; i++, se++, seg++) {
978 uint32_t tf;
979
980 memset(se, 0, sizeof(*se));
981 se->Address = htole32(
982 req->dmap->dm_segs[seg].ds_addr);
983 MPI_pSGE_SET_LENGTH(se,
984 req->dmap->dm_segs[seg].ds_len);
985 tf = flags;
986 if (i == ntodo - 1) {
987 tf |=
988 MPI_SGE_FLAGS_LAST_ELEMENT;
989 if (ce->NextChainOffset == 0) {
990 tf |=
991 MPI_SGE_FLAGS_END_OF_LIST |
992 MPI_SGE_FLAGS_END_OF_BUFFER;
993 }
994 }
995 MPI_pSGE_SET_FLAGS(se, tf);
996 se->FlagsLength =
997 htole32(se->FlagsLength);
998 nleft--;
999 }
1000 }
1001 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1002 req->dmap->dm_mapsize,
1003 (xs->xs_control & XS_CTL_DATA_IN) ?
1004 BUS_DMASYNC_PREREAD
1005 : BUS_DMASYNC_PREWRITE);
1006 } else {
1007 int i;
1008 uint32_t flags;
1009
1010 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1011 if (xs->xs_control & XS_CTL_DATA_OUT)
1012 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1013
1014 /* Copy the segments into our SG list. */
1015 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1016 for (i = 0; i < req->dmap->dm_nsegs;
1017 i++, se++) {
1018 uint32_t tf;
1019
1020 memset(se, 0, sizeof(*se));
1021 se->Address =
1022 htole32(req->dmap->dm_segs[i].ds_addr);
1023 MPI_pSGE_SET_LENGTH(se,
1024 req->dmap->dm_segs[i].ds_len);
1025 tf = flags;
1026 if (i == req->dmap->dm_nsegs - 1) {
1027 tf |=
1028 MPI_SGE_FLAGS_LAST_ELEMENT |
1029 MPI_SGE_FLAGS_END_OF_BUFFER |
1030 MPI_SGE_FLAGS_END_OF_LIST;
1031 }
1032 MPI_pSGE_SET_FLAGS(se, tf);
1033 se->FlagsLength = htole32(se->FlagsLength);
1034 }
1035 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1036 req->dmap->dm_mapsize,
1037 (xs->xs_control & XS_CTL_DATA_IN) ?
1038 BUS_DMASYNC_PREREAD
1039 : BUS_DMASYNC_PREWRITE);
1040 }
1041 } else {
1042 /*
1043 * No data to transfer; just make a single simple SGL
1044 * with zero length.
1045 */
1046 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1047 memset(se, 0, sizeof(*se));
1048 MPI_pSGE_SET_FLAGS(se,
1049 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1050 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1051 se->FlagsLength = htole32(se->FlagsLength);
1052 }
1053
1054 if (mpt->verbose > 1)
1055 mpt_print_scsi_io_request(mpt_req);
1056
1057 if (xs->timeout == 0) {
1058 mpt_prt(mpt,"mpt_run_xfer: no timeout specified for request: 0x%x\n",
1059 req->index);
1060 xs->timeout = 500;
1061 }
1062
1063 s = splbio();
1064 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1065 callout_reset(&xs->xs_callout,
1066 mstohz(xs->timeout), mpt_timeout, req);
1067 mpt_send_cmd(mpt, req);
1068 splx(s);
1069
1070 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1071 return;
1072
1073 /*
1074 * If we can't use interrupts, poll on completion.
1075 */
1076 if (mpt_poll(mpt, xs, xs->timeout))
1077 mpt_timeout(req);
1078 }
1079
1080 static void
1081 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1082 {
1083 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1084
1085 /*
1086 * Always allow disconnect; we don't have a way to disable
1087 * it right now, in any case.
1088 */
1089 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1090
1091 if (xm->xm_mode & PERIPH_CAP_TQING)
1092 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1093 else
1094 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1095
1096 if (mpt->is_scsi) {
1097 /*
1098 * SCSI transport settings only make any sense for
1099 * SCSI
1100 */
1101
1102 tmp = mpt->mpt_dev_page1[xm->xm_target];
1103
1104 /*
1105 * Set the wide/narrow parameter for the target.
1106 */
1107 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1108 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1109 else
1110 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1111
1112 /*
1113 * Set the synchronous parameters for the target.
1114 *
1115 * XXX If we request sync transfers, we just go ahead and
1116 * XXX request the maximum available. We need finer control
1117 * XXX in order to implement Domain Validation.
1118 */
1119 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1120 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1121 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1122 MPI_SCSIDEVPAGE1_RP_IU);
1123 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1124 int factor, offset, np;
1125
1126 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1127 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1128 np = 0;
1129 if (factor < 0x9) {
1130 /* Ultra320 */
1131 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1132 }
1133 if (factor < 0xa) {
1134 /* at least Ultra160 */
1135 np |= MPI_SCSIDEVPAGE1_RP_DT;
1136 }
1137 np |= (factor << 8) | (offset << 16);
1138 tmp.RequestedParameters |= np;
1139 }
1140
1141 host2mpt_config_page_scsi_device_1(&tmp);
1142 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1143 mpt_prt(mpt, "unable to write Device Page 1");
1144 return;
1145 }
1146
1147 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1148 mpt_prt(mpt, "unable to read back Device Page 1");
1149 return;
1150 }
1151
1152 mpt2host_config_page_scsi_device_1(&tmp);
1153 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1154 if (mpt->verbose > 1) {
1155 mpt_prt(mpt,
1156 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1157 xm->xm_target,
1158 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1159 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1160 }
1161 }
1162
1163 /*
1164 * Make a note that we should perform an async callback at the
1165 * end of the next successful command completion to report the
1166 * negotiated transfer mode.
1167 */
1168 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1169 }
1170
1171 static void
1172 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1173 {
1174 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1175 struct scsipi_xfer_mode xm;
1176 int period, offset;
1177
1178 tmp = mpt->mpt_dev_page0[periph->periph_target];
1179 host2mpt_config_page_scsi_device_0(&tmp);
1180 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1181 mpt_prt(mpt, "unable to read Device Page 0");
1182 return;
1183 }
1184 mpt2host_config_page_scsi_device_0(&tmp);
1185
1186 if (mpt->verbose > 1) {
1187 mpt_prt(mpt,
1188 "SPI Tgt %d Page 0: NParms %x Information %x",
1189 periph->periph_target,
1190 tmp.NegotiatedParameters, tmp.Information);
1191 }
1192
1193 xm.xm_target = periph->periph_target;
1194 xm.xm_mode = 0;
1195
1196 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1197 xm.xm_mode |= PERIPH_CAP_WIDE16;
1198
1199 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1200 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1201 if (offset) {
1202 xm.xm_period = period;
1203 xm.xm_offset = offset;
1204 xm.xm_mode |= PERIPH_CAP_SYNC;
1205 }
1206
1207 /*
1208 * Tagged queueing is all controlled by us; there is no
1209 * other setting to query.
1210 */
1211 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1212 xm.xm_mode |= PERIPH_CAP_TQING;
1213
1214 /*
1215 * We're going to deliver the async event, so clear the marker.
1216 */
1217 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1218
1219 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1220 }
1221
1222 static void
1223 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1224 {
1225 MSG_DEFAULT_REPLY *dmsg = vmsg;
1226
1227 switch (dmsg->Function) {
1228 case MPI_FUNCTION_EVENT_NOTIFICATION:
1229 mpt_event_notify_reply(mpt, vmsg);
1230 mpt_free_reply(mpt, (reply << 1));
1231 break;
1232
1233 case MPI_FUNCTION_EVENT_ACK:
1234 mpt_free_reply(mpt, (reply << 1));
1235 break;
1236
1237 case MPI_FUNCTION_PORT_ENABLE:
1238 {
1239 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1240 int index = le32toh(msg->MsgContext) & ~0x80000000;
1241 if (mpt->verbose > 1)
1242 mpt_prt(mpt, "enable port reply index %d", index);
1243 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1244 request_t *req = &mpt->request_pool[index];
1245 req->debug = REQ_DONE;
1246 }
1247 mpt_free_reply(mpt, (reply << 1));
1248 break;
1249 }
1250
1251 case MPI_FUNCTION_CONFIG:
1252 {
1253 MSG_CONFIG_REPLY *msg = vmsg;
1254 int index = le32toh(msg->MsgContext) & ~0x80000000;
1255 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1256 request_t *req = &mpt->request_pool[index];
1257 req->debug = REQ_DONE;
1258 req->sequence = reply;
1259 } else
1260 mpt_free_reply(mpt, (reply << 1));
1261 break;
1262 }
1263
1264 default:
1265 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1266 }
1267 }
1268
1269 static void
1270 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1271 {
1272
1273 switch (le32toh(msg->Event)) {
1274 case MPI_EVENT_LOG_DATA:
1275 {
1276 int i;
1277
1278 /* Some error occurrerd that the Fusion wants logged. */
1279 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1280 mpt_prt(mpt, "EvtLogData: Event Data:");
1281 for (i = 0; i < msg->EventDataLength; i++) {
1282 if ((i % 4) == 0)
1283 printf("%s:\t", device_xname(mpt->sc_dev));
1284 printf("0x%08x%c", msg->Data[i],
1285 ((i % 4) == 3) ? '\n' : ' ');
1286 }
1287 if ((i % 4) != 0)
1288 printf("\n");
1289 break;
1290 }
1291
1292 case MPI_EVENT_UNIT_ATTENTION:
1293 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1294 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1295 break;
1296
1297 case MPI_EVENT_IOC_BUS_RESET:
1298 /* We generated a bus reset. */
1299 mpt_prt(mpt, "IOC Bus Reset Port %d",
1300 (msg->Data[0] >> 8) & 0xff);
1301 break;
1302
1303 case MPI_EVENT_EXT_BUS_RESET:
1304 /* Someone else generated a bus reset. */
1305 mpt_prt(mpt, "External Bus Reset");
1306 /*
1307 * These replies don't return EventData like the MPI
1308 * spec says they do.
1309 */
1310 /* XXX Send an async event? */
1311 break;
1312
1313 case MPI_EVENT_RESCAN:
1314 /*
1315 * In general, thise means a device has been added
1316 * to the loop.
1317 */
1318 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1319 /* XXX Send an async event? */
1320 break;
1321
1322 case MPI_EVENT_LINK_STATUS_CHANGE:
1323 mpt_prt(mpt, "Port %d: Link state %s",
1324 (msg->Data[1] >> 8) & 0xff,
1325 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1326 break;
1327
1328 case MPI_EVENT_LOOP_STATE_CHANGE:
1329 switch ((msg->Data[0] >> 16) & 0xff) {
1330 case 0x01:
1331 mpt_prt(mpt,
1332 "Port %d: FC Link Event: LIP(%02x,%02x) "
1333 "(Loop Initialization)",
1334 (msg->Data[1] >> 8) & 0xff,
1335 (msg->Data[0] >> 8) & 0xff,
1336 (msg->Data[0] ) & 0xff);
1337 switch ((msg->Data[0] >> 8) & 0xff) {
1338 case 0xf7:
1339 if ((msg->Data[0] & 0xff) == 0xf7)
1340 mpt_prt(mpt, "\tDevice needs AL_PA");
1341 else
1342 mpt_prt(mpt, "\tDevice %02x doesn't "
1343 "like FC performance",
1344 msg->Data[0] & 0xff);
1345 break;
1346
1347 case 0xf8:
1348 if ((msg->Data[0] & 0xff) == 0xf7)
1349 mpt_prt(mpt, "\tDevice detected loop "
1350 "failure before acquiring AL_PA");
1351 else
1352 mpt_prt(mpt, "\tDevice %02x detected "
1353 "loop failure",
1354 msg->Data[0] & 0xff);
1355 break;
1356
1357 default:
1358 mpt_prt(mpt, "\tDevice %02x requests that "
1359 "device %02x reset itself",
1360 msg->Data[0] & 0xff,
1361 (msg->Data[0] >> 8) & 0xff);
1362 break;
1363 }
1364 break;
1365
1366 case 0x02:
1367 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1368 "(Loop Port Enable)",
1369 (msg->Data[1] >> 8) & 0xff,
1370 (msg->Data[0] >> 8) & 0xff,
1371 (msg->Data[0] ) & 0xff);
1372 break;
1373
1374 case 0x03:
1375 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1376 "(Loop Port Bypass)",
1377 (msg->Data[1] >> 8) & 0xff,
1378 (msg->Data[0] >> 8) & 0xff,
1379 (msg->Data[0] ) & 0xff);
1380 break;
1381
1382 default:
1383 mpt_prt(mpt, "Port %d: FC Link Event: "
1384 "Unknown event (%02x %02x %02x)",
1385 (msg->Data[1] >> 8) & 0xff,
1386 (msg->Data[0] >> 16) & 0xff,
1387 (msg->Data[0] >> 8) & 0xff,
1388 (msg->Data[0] ) & 0xff);
1389 break;
1390 }
1391 break;
1392
1393 case MPI_EVENT_LOGOUT:
1394 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1395 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1396 break;
1397
1398 case MPI_EVENT_EVENT_CHANGE:
1399 /*
1400 * This is just an acknowledgement of our
1401 * mpt_send_event_request().
1402 */
1403 break;
1404
1405 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1406 switch ((msg->Data[0] >> 12) & 0x0f) {
1407 case 0x00:
1408 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1409 msg->Data[0] & 0xff);
1410 break;
1411 case 0x01:
1412 mpt_prt(mpt, "Phy %d: Link Disabled",
1413 msg->Data[0] & 0xff);
1414 break;
1415 case 0x02:
1416 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1417 msg->Data[0] & 0xff);
1418 break;
1419 case 0x03:
1420 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1421 msg->Data[0] & 0xff);
1422 break;
1423 case 0x08:
1424 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1425 msg->Data[0] & 0xff);
1426 break;
1427 case 0x09:
1428 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1429 msg->Data[0] & 0xff);
1430 break;
1431 default:
1432 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1433 "Unknown event (%0x)",
1434 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1435 }
1436 break;
1437
1438 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1439 case MPI_EVENT_SAS_DISCOVERY:
1440 /* ignore these events for now */
1441 break;
1442
1443 case MPI_EVENT_QUEUE_FULL:
1444 /* This can get a little chatty */
1445 if (mpt->verbose > 0)
1446 mpt_prt(mpt, "Queue Full Event");
1447 break;
1448
1449 default:
1450 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1451 break;
1452 }
1453
1454 if (msg->AckRequired) {
1455 MSG_EVENT_ACK *ackp;
1456 request_t *req;
1457
1458 if ((req = mpt_get_request(mpt)) == NULL) {
1459 /* XXX XXX XXX XXXJRT */
1460 panic("mpt_event_notify_reply: unable to allocate "
1461 "request structure");
1462 }
1463
1464 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1465 memset(ackp, 0, sizeof(*ackp));
1466 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1467 ackp->Event = msg->Event;
1468 ackp->EventContext = msg->EventContext;
1469 ackp->MsgContext = htole32(req->index | 0x80000000);
1470 mpt_check_doorbell(mpt);
1471 mpt_send_cmd(mpt, req);
1472 }
1473 }
1474
1475 static void
1476 mpt_bus_reset(mpt_softc_t *mpt)
1477 {
1478 request_t *req;
1479 MSG_SCSI_TASK_MGMT *mngt_req;
1480 int s;
1481
1482 s = splbio();
1483 if (mpt->mngt_req) {
1484 /* request already queued; can't do more */
1485 splx(s);
1486 return;
1487 }
1488 req = mpt_get_request(mpt);
1489 if (__predict_false(req == NULL)) {
1490 mpt_prt(mpt, "no mngt request\n");
1491 splx(s);
1492 return;
1493 }
1494 mpt->mngt_req = req;
1495 splx(s);
1496 mngt_req = req->req_vbuf;
1497 memset(mngt_req, 0, sizeof(*mngt_req));
1498 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1499 mngt_req->Bus = mpt->bus;
1500 mngt_req->TargetID = 0;
1501 mngt_req->ChainOffset = 0;
1502 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1503 mngt_req->Reserved1 = 0;
1504 mngt_req->MsgFlags =
1505 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1506 mngt_req->MsgContext = req->index;
1507 mngt_req->TaskMsgContext = 0;
1508 s = splbio();
1509 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1510 /*mpt_enable_ints(mpt);*/
1511 splx(s);
1512 }
1513
1514 /*****************************************************************************
1515 * SCSI interface routines
1516 *****************************************************************************/
1517
1518 static void
1519 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1520 void *arg)
1521 {
1522 struct scsipi_adapter *adapt = chan->chan_adapter;
1523 mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1524
1525 switch (req) {
1526 case ADAPTER_REQ_RUN_XFER:
1527 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1528 return;
1529
1530 case ADAPTER_REQ_GROW_RESOURCES:
1531 /* Not supported. */
1532 return;
1533
1534 case ADAPTER_REQ_SET_XFER_MODE:
1535 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1536 return;
1537 }
1538 }
1539
1540 static void
1541 mpt_minphys(struct buf *bp)
1542 {
1543
1544 /*
1545 * Subtract one from the SGL limit, since we need an extra one to handle
1546 * an non-page-aligned transfer.
1547 */
1548 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1549
1550 if (bp->b_bcount > MPT_MAX_XFER)
1551 bp->b_bcount = MPT_MAX_XFER;
1552 minphys(bp);
1553 }
1554
1555 static int
1556 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1557 int flag, struct proc *p)
1558 {
1559 mpt_softc_t *mpt;
1560 int s;
1561
1562 mpt = device_private(chan->chan_adapter->adapt_dev);
1563 switch (cmd) {
1564 case SCBUSIORESET:
1565 mpt_bus_reset(mpt);
1566 s = splbio();
1567 mpt_intr(mpt);
1568 splx(s);
1569 return(0);
1570 default:
1571 return (ENOTTY);
1572 }
1573 }
1574