mpt_netbsd.c revision 1.22 1 /* $NetBSD: mpt_netbsd.c,v 1.22 2014/04/15 05:27:54 buhrow Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.22 2014/04/15 05:27:54 buhrow Exp $");
81
82 #include <dev/ic/mpt.h> /* pulls in all headers */
83 #include <sys/scsiio.h>
84
85 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
86 static void mpt_timeout(void *);
87 static void mpt_restart(mpt_softc_t *, request_t *);
88 static void mpt_done(mpt_softc_t *, uint32_t);
89 static int mpt_drain_queue(mpt_softc_t *);
90 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
91 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
92 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
93 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
94 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
95 static void mpt_bus_reset(mpt_softc_t *);
96
97 static void mpt_scsipi_request(struct scsipi_channel *,
98 scsipi_adapter_req_t, void *);
99 static void mpt_minphys(struct buf *);
100 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
101 struct proc *);
102
103 /*
104 * XXX - this assumes the device_private() of the attachement starts with
105 * a struct mpt_softc, so we can use the return value of device_private()
106 * straight without any offset.
107 */
108 #define DEV_TO_MPT(DEV) device_private(DEV)
109
110 void
111 mpt_scsipi_attach(mpt_softc_t *mpt)
112 {
113 struct scsipi_adapter *adapt = &mpt->sc_adapter;
114 struct scsipi_channel *chan = &mpt->sc_channel;
115 int maxq;
116
117 mpt->bus = 0; /* XXX ?? */
118
119 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
120 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
121
122 /* Fill in the scsipi_adapter. */
123 memset(adapt, 0, sizeof(*adapt));
124 adapt->adapt_dev = mpt->sc_dev;
125 adapt->adapt_nchannels = 1;
126 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
127 adapt->adapt_max_periph = maxq - 2;
128 adapt->adapt_request = mpt_scsipi_request;
129 adapt->adapt_minphys = mpt_minphys;
130 adapt->adapt_ioctl = mpt_ioctl;
131
132 /* Fill in the scsipi_channel. */
133 memset(chan, 0, sizeof(*chan));
134 chan->chan_adapter = adapt;
135 if (mpt->is_sas) {
136 chan->chan_bustype = &scsi_sas_bustype;
137 } else if (mpt->is_fc) {
138 chan->chan_bustype = &scsi_fc_bustype;
139 } else {
140 chan->chan_bustype = &scsi_bustype;
141 }
142 chan->chan_channel = 0;
143 chan->chan_flags = 0;
144 chan->chan_nluns = 8;
145 chan->chan_ntargets = mpt->mpt_max_devices;
146 chan->chan_id = mpt->mpt_ini_id;
147
148 /*
149 * Save the output of the config so we can rescan the bus in case of
150 * errors
151 */
152 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
153 scsiprint);
154 }
155
156 int
157 mpt_dma_mem_alloc(mpt_softc_t *mpt)
158 {
159 bus_dma_segment_t reply_seg, request_seg;
160 int reply_rseg, request_rseg;
161 bus_addr_t pptr, end;
162 char *vptr;
163 size_t len;
164 int error, i;
165
166 /* Check if we have already allocated the reply memory. */
167 if (mpt->reply != NULL)
168 return (0);
169
170 /*
171 * Allocate the request pool. This isn't really DMA'd memory,
172 * but it's a convenient place to do it.
173 */
174 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
175 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
176 if (mpt->request_pool == NULL) {
177 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
178 return (ENOMEM);
179 }
180
181 /*
182 * Allocate DMA resources for reply buffers.
183 */
184 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
185 &reply_seg, 1, &reply_rseg, 0);
186 if (error) {
187 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
188 error);
189 goto fail_0;
190 }
191
192 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
193 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
194 if (error) {
195 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
196 error);
197 goto fail_1;
198 }
199
200 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
201 0, 0, &mpt->reply_dmap);
202 if (error) {
203 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
204 error);
205 goto fail_2;
206 }
207
208 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
209 PAGE_SIZE, NULL, 0);
210 if (error) {
211 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
212 error);
213 goto fail_3;
214 }
215 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
216
217 /*
218 * Allocate DMA resources for request buffers.
219 */
220 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
221 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
222 if (error) {
223 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
224 "error = %d\n", error);
225 goto fail_4;
226 }
227
228 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
229 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
230 if (error) {
231 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
232 error);
233 goto fail_5;
234 }
235
236 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
237 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
238 if (error) {
239 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
240 "error = %d\n", error);
241 goto fail_6;
242 }
243
244 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
245 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
246 if (error) {
247 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
248 error);
249 goto fail_7;
250 }
251 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
252
253 pptr = mpt->request_phys;
254 vptr = (void *) mpt->request;
255 end = pptr + MPT_REQ_MEM_SIZE(mpt);
256
257 for (i = 0; pptr < end; i++) {
258 request_t *req = &mpt->request_pool[i];
259 req->index = i;
260
261 /* Store location of Request Data */
262 req->req_pbuf = pptr;
263 req->req_vbuf = vptr;
264
265 pptr += MPT_REQUEST_AREA;
266 vptr += MPT_REQUEST_AREA;
267
268 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
269 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
270
271 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
272 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
273 if (error) {
274 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
275 "error = %d\n", i, error);
276 goto fail_8;
277 }
278 }
279
280 return (0);
281
282 fail_8:
283 for (--i; i >= 0; i--) {
284 request_t *req = &mpt->request_pool[i];
285 if (req->dmap != NULL)
286 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
287 }
288 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
289 fail_7:
290 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
291 fail_6:
292 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
293 fail_5:
294 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
295 fail_4:
296 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
297 fail_3:
298 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
299 fail_2:
300 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
301 fail_1:
302 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
303 fail_0:
304 free(mpt->request_pool, M_DEVBUF);
305
306 mpt->reply = NULL;
307 mpt->request = NULL;
308 mpt->request_pool = NULL;
309
310 return (error);
311 }
312
313 int
314 mpt_intr(void *arg)
315 {
316 mpt_softc_t *mpt = arg;
317 int nrepl = 0;
318
319 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
320 return (0);
321
322 nrepl = mpt_drain_queue(mpt);
323 return (nrepl != 0);
324 }
325
326 void
327 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
328 {
329 va_list ap;
330
331 printf("%s: ", device_xname(mpt->sc_dev));
332 va_start(ap, fmt);
333 vprintf(fmt, ap);
334 va_end(ap);
335 printf("\n");
336 }
337
338 static int
339 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
340 {
341
342 /* Timeouts are in msec, so we loop in 1000usec cycles */
343 while (count) {
344 mpt_intr(mpt);
345 if (xs->xs_status & XS_STS_DONE)
346 return (0);
347 delay(1000); /* only happens in boot, so ok */
348 count--;
349 }
350 return (1);
351 }
352
353 static void
354 mpt_timeout(void *arg)
355 {
356 request_t *req = arg;
357 struct scsipi_xfer *xs;
358 struct scsipi_periph *periph;
359 mpt_softc_t *mpt;
360 uint32_t oseq;
361 int s, nrepl = 0;
362
363 if (req->xfer == NULL) {
364 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
365 req->index, req->sequence);
366 return;
367 }
368 xs = req->xfer;
369 periph = xs->xs_periph;
370 mpt = (void *) periph->periph_channel->chan_adapter->adapt_dev;
371 scsipi_printaddr(periph);
372 printf("command timeout\n");
373
374 s = splbio();
375
376 oseq = req->sequence;
377 mpt->timeouts++;
378 if (mpt_intr(mpt)) {
379 if (req->sequence != oseq) {
380 mpt->success++;
381 mpt_prt(mpt, "recovered from command timeout");
382 splx(s);
383 return;
384 }
385 }
386
387 /*
388 * Ensure the IOC is really done giving us data since it appears it can
389 * sometimes fail to give us interrupts under heavy load.
390 */
391 nrepl = mpt_drain_queue(mpt);
392 if (nrepl ) {
393 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
394 }
395
396 if (req->sequence != oseq) {
397 mpt->success++;
398 splx(s);
399 return;
400 }
401
402 mpt_prt(mpt,
403 "timeout on request index = 0x%x, seq = 0x%08x",
404 req->index, req->sequence);
405 mpt_check_doorbell(mpt);
406 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
407 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
408 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
409 mpt_read(mpt, MPT_OFFSET_DOORBELL));
410 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
411 if (mpt->verbose > 1)
412 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
413
414 xs->error = XS_TIMEOUT;
415 splx(s);
416 mpt_restart(mpt, req);
417 }
418
419 static void
420 mpt_restart(mpt_softc_t *mpt, request_t *req0)
421 {
422 int i, s, nreq;
423 request_t *req;
424 struct scsipi_xfer *xs;
425
426 /* first, reset the IOC, leaving stopped so all requests are idle */
427 if (mpt_soft_reset(mpt) != MPT_OK) {
428 mpt_prt(mpt, "soft reset failed");
429 /*
430 * Don't try a hard reset since this mangles the PCI
431 * configuration registers.
432 */
433 return;
434 }
435
436 /* Freeze the channel so scsipi doesn't queue more commands. */
437 scsipi_channel_freeze(&mpt->sc_channel, 1);
438
439 /* Return all pending requests to scsipi and de-allocate them. */
440 s = splbio();
441 nreq = 0;
442 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
443 req = &mpt->request_pool[i];
444 xs = req->xfer;
445 if (xs != NULL) {
446 if (xs->datalen != 0)
447 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
448 req->xfer = NULL;
449 callout_stop(&xs->xs_callout);
450 if (req != req0) {
451 nreq++;
452 xs->error = XS_REQUEUE;
453 }
454 scsipi_done(xs);
455 /*
456 * Don't need to mpt_free_request() since mpt_init()
457 * below will free all requests anyway.
458 */
459 mpt_free_request(mpt, req);
460 }
461 }
462 splx(s);
463 if (nreq > 0)
464 mpt_prt(mpt, "re-queued %d requests", nreq);
465
466 /* Re-initialize the IOC (which restarts it). */
467 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
468 mpt_prt(mpt, "restart succeeded");
469 /* else error message already printed */
470
471 /* Thaw the channel, causing scsipi to re-queue the commands. */
472 scsipi_channel_thaw(&mpt->sc_channel, 1);
473 }
474
475 static int
476 mpt_drain_queue(mpt_softc_t *mpt)
477 {
478 int nrepl = 0;
479 uint32_t reply;
480
481 reply = mpt_pop_reply_queue(mpt);
482 while (reply != MPT_REPLY_EMPTY) {
483 nrepl++;
484 if (mpt->verbose > 1) {
485 if ((reply & MPT_CONTEXT_REPLY) != 0) {
486 /* Address reply; IOC has something to say */
487 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
488 } else {
489 /* Context reply; all went well */
490 mpt_prt(mpt, "context %u reply OK", reply);
491 }
492 }
493 mpt_done(mpt, reply);
494 reply = mpt_pop_reply_queue(mpt);
495 }
496 return (nrepl);
497 }
498
499 static void
500 mpt_done(mpt_softc_t *mpt, uint32_t reply)
501 {
502 struct scsipi_xfer *xs = NULL;
503 struct scsipi_periph *periph;
504 int index;
505 request_t *req;
506 MSG_REQUEST_HEADER *mpt_req;
507 MSG_SCSI_IO_REPLY *mpt_reply;
508 int restart = 0; /* nonzero if we need to restart the IOC*/
509
510 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
511 /* context reply (ok) */
512 mpt_reply = NULL;
513 index = reply & MPT_CONTEXT_MASK;
514 } else {
515 /* address reply (error) */
516
517 /* XXX BUS_DMASYNC_POSTREAD XXX */
518 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
519 if (mpt->verbose > 1) {
520 uint32_t *pReply = (uint32_t *) mpt_reply;
521
522 mpt_prt(mpt, "Address Reply (index %u):",
523 le32toh(mpt_reply->MsgContext) & 0xffff);
524 mpt_prt(mpt, "%08x %08x %08x %08x",
525 pReply[0], pReply[1], pReply[2], pReply[3]);
526 mpt_prt(mpt, "%08x %08x %08x %08x",
527 pReply[4], pReply[5], pReply[6], pReply[7]);
528 mpt_prt(mpt, "%08x %08x %08x %08x",
529 pReply[8], pReply[9], pReply[10], pReply[11]);
530 }
531 index = le32toh(mpt_reply->MsgContext);
532 }
533
534 /*
535 * Address reply with MessageContext high bit set.
536 * This is most likely a notify message, so we try
537 * to process it, then free it.
538 */
539 if (__predict_false((index & 0x80000000) != 0)) {
540 if (mpt_reply != NULL)
541 mpt_ctlop(mpt, mpt_reply, reply);
542 else
543 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
544 return;
545 }
546
547 /* Did we end up with a valid index into the table? */
548 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
549 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
550 return;
551 }
552
553 req = &mpt->request_pool[index];
554
555 /* Make sure memory hasn't been trashed. */
556 if (__predict_false(req->index != index)) {
557 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
558 return;
559 }
560
561 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
562 mpt_req = req->req_vbuf;
563
564 /* Short cut for task management replies; nothing more for us to do. */
565 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
566 if (mpt->verbose > 1)
567 mpt_prt(mpt, "mpt_done: TASK MGMT");
568 KASSERT(req == mpt->mngt_req);
569 mpt->mngt_req = NULL;
570 goto done;
571 }
572
573 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
574 goto done;
575
576 /*
577 * At this point, it had better be a SCSI I/O command, but don't
578 * crash if it isn't.
579 */
580 if (__predict_false(mpt_req->Function !=
581 MPI_FUNCTION_SCSI_IO_REQUEST)) {
582 if (mpt->verbose > 1)
583 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
584 mpt_req->Function, index);
585 goto done;
586 }
587
588 /* Recover scsipi_xfer from the request structure. */
589 xs = req->xfer;
590
591 /* Can't have a SCSI command without a scsipi_xfer. */
592 if (__predict_false(xs == NULL)) {
593 mpt_prt(mpt,
594 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
595 req->index, req->sequence);
596 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
597 mpt_prt(mpt, "mpt_request:");
598 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
599
600 if (mpt_reply != NULL) {
601 mpt_prt(mpt, "mpt_reply:");
602 mpt_print_reply(mpt_reply);
603 } else {
604 mpt_prt(mpt, "context reply: 0x%08x", reply);
605 }
606 goto done;
607 }
608
609 callout_stop(&xs->xs_callout);
610
611 periph = xs->xs_periph;
612
613 /*
614 * If we were a data transfer, unload the map that described
615 * the data buffer.
616 */
617 if (__predict_true(xs->datalen != 0)) {
618 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
619 req->dmap->dm_mapsize,
620 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
621 : BUS_DMASYNC_POSTWRITE);
622 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
623 }
624
625 if (__predict_true(mpt_reply == NULL)) {
626 /*
627 * Context reply; report that the command was
628 * successful!
629 *
630 * Also report the xfer mode, if necessary.
631 */
632 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
633 if ((mpt->mpt_report_xfer_mode &
634 (1 << periph->periph_target)) != 0)
635 mpt_get_xfer_mode(mpt, periph);
636 }
637 xs->error = XS_NOERROR;
638 xs->status = SCSI_OK;
639 xs->resid = 0;
640 mpt_free_request(mpt, req);
641 scsipi_done(xs);
642 return;
643 }
644
645 xs->status = mpt_reply->SCSIStatus;
646 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
647 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
648 xs->error = XS_DRIVER_STUFFUP;
649 mpt_prt(mpt,"mpt_done: IOC overrun!");
650 break;
651
652 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
653 /*
654 * Yikes! Tagged queue full comes through this path!
655 *
656 * So we'll change it to a status error and anything
657 * that returns status should probably be a status
658 * error as well.
659 */
660 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
661 if (mpt_reply->SCSIState &
662 MPI_SCSI_STATE_NO_SCSI_STATUS) {
663 xs->error = XS_DRIVER_STUFFUP;
664 break;
665 }
666 /* FALLTHROUGH */
667 case MPI_IOCSTATUS_SUCCESS:
668 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
669 switch (xs->status) {
670 case SCSI_OK:
671 /* Report the xfer mode, if necessary. */
672 if ((mpt->mpt_report_xfer_mode &
673 (1 << periph->periph_target)) != 0)
674 mpt_get_xfer_mode(mpt, periph);
675 xs->resid = 0;
676 break;
677
678 case SCSI_CHECK:
679 xs->error = XS_SENSE;
680 break;
681
682 case SCSI_BUSY:
683 case SCSI_QUEUE_FULL:
684 xs->error = XS_BUSY;
685 break;
686
687 default:
688 scsipi_printaddr(periph);
689 printf("invalid status code %d\n", xs->status);
690 xs->error = XS_DRIVER_STUFFUP;
691 break;
692 }
693 break;
694
695 case MPI_IOCSTATUS_BUSY:
696 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
697 xs->error = XS_RESOURCE_SHORTAGE;
698 break;
699
700 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
701 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
702 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
703 xs->error = XS_SELTIMEOUT;
704 break;
705
706 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
707 xs->error = XS_DRIVER_STUFFUP;
708 mpt_prt(mpt,"mpt_done: IOC SCSI residual mismatch!");
709 restart = 1;
710 break;
711
712 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
713 /* XXX What should we do here? */
714 mpt_prt(mpt,"mpt_done: IOC SCSI task terminated!");
715 restart = 1;
716 break;
717
718 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
719 /* XXX */
720 xs->error = XS_DRIVER_STUFFUP;
721 mpt_prt(mpt,"mpt_done: IOC SCSI task failed!");
722 restart = 1;
723 break;
724
725 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
726 /* XXX */
727 xs->error = XS_DRIVER_STUFFUP;
728 mpt_prt(mpt,"mpt_done: IOC task terminated!");
729 restart = 1;
730 break;
731
732 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
733 /* XXX This is a bus-reset */
734 xs->error = XS_DRIVER_STUFFUP;
735 mpt_prt(mpt,"mpt_done: IOC SCSI bus reset!");
736 restart = 1;
737 break;
738
739 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
740 /*
741 * FreeBSD and Linux indicate this is a phase error between
742 * the IOC and the drive itself. When this happens, the IOC
743 * becomes unhappy and stops processing all transactions.
744 * Call mpt_timeout which knows how to get the IOC back on its feet.
745 */
746 mpt_prt(mpt,"mpt_done: IOC indicates protocol error -- recovering...");
747 xs->error = XS_TIMEOUT;
748 restart = 1;
749
750 break;
751
752 default:
753 /* XXX unrecognized HBA error */
754 xs->error = XS_DRIVER_STUFFUP;
755 mpt_prt(mpt,"mpt_done: IOC returned unknown code: 0x%x",le16toh(mpt_reply->IOCStatus));
756 restart = 1;
757 break;
758 }
759
760 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
761 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
762 sizeof(xs->sense.scsi_sense));
763 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
764 /*
765 * This will cause the scsipi layer to issue
766 * a REQUEST SENSE.
767 */
768 if (xs->status == SCSI_CHECK)
769 xs->error = XS_BUSY;
770 }
771
772 done:
773 if (le16toh(mpt_reply->IOCStatus) &
774 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
775 mpt_prt(mpt,"mpt_done: IOC has error - logging...\n");
776 mpt_ctlop(mpt, mpt_reply, reply);
777 }
778
779 /* If IOC done with this request, free it up. */
780 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
781 mpt_free_request(mpt, req);
782
783 /* If address reply, give the buffer back to the IOC. */
784 if (mpt_reply != NULL)
785 mpt_free_reply(mpt, (reply << 1));
786
787 if (xs != NULL)
788 scsipi_done(xs);
789
790 if (restart) {
791 mpt_prt(mpt,"mpt_done: IOC fatal error: restarting...");
792 mpt_restart(mpt, NULL);
793 }
794 }
795
796 static void
797 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
798 {
799 struct scsipi_periph *periph = xs->xs_periph;
800 request_t *req;
801 MSG_SCSI_IO_REQUEST *mpt_req;
802 int error, s;
803
804 s = splbio();
805 req = mpt_get_request(mpt);
806 if (__predict_false(req == NULL)) {
807 /* This should happen very infrequently. */
808 xs->error = XS_RESOURCE_SHORTAGE;
809 scsipi_done(xs);
810 splx(s);
811 return;
812 }
813 splx(s);
814
815 /* Link the req and the scsipi_xfer. */
816 req->xfer = xs;
817
818 /* Now we build the command for the IOC */
819 mpt_req = req->req_vbuf;
820 memset(mpt_req, 0, sizeof(*mpt_req));
821
822 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
823 mpt_req->Bus = mpt->bus;
824
825 mpt_req->SenseBufferLength =
826 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
827 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
828
829 /*
830 * We use the message context to find the request structure when
831 * we get the command completion interrupt from the IOC.
832 */
833 mpt_req->MsgContext = htole32(req->index);
834
835 /* Which physical device to do the I/O on. */
836 mpt_req->TargetID = periph->periph_target;
837 mpt_req->LUN[1] = periph->periph_lun;
838
839 /* Set the direction of the transfer. */
840 if (xs->xs_control & XS_CTL_DATA_IN)
841 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
842 else if (xs->xs_control & XS_CTL_DATA_OUT)
843 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
844 else
845 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
846
847 /* Set the queue behavior. */
848 if (__predict_true((!mpt->is_scsi) ||
849 (mpt->mpt_tag_enable &
850 (1 << periph->periph_target)))) {
851 switch (XS_CTL_TAGTYPE(xs)) {
852 case XS_CTL_HEAD_TAG:
853 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
854 break;
855
856 #if 0 /* XXX */
857 case XS_CTL_ACA_TAG:
858 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
859 break;
860 #endif
861
862 case XS_CTL_ORDERED_TAG:
863 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
864 break;
865
866 case XS_CTL_SIMPLE_TAG:
867 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
868 break;
869
870 default:
871 if (mpt->is_scsi)
872 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
873 else
874 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
875 break;
876 }
877 } else
878 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
879
880 if (__predict_false(mpt->is_scsi &&
881 (mpt->mpt_disc_enable &
882 (1 << periph->periph_target)) == 0))
883 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
884
885 mpt_req->Control = htole32(mpt_req->Control);
886
887 /* Copy the SCSI command block into place. */
888 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
889
890 mpt_req->CDBLength = xs->cmdlen;
891 mpt_req->DataLength = htole32(xs->datalen);
892 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
893
894 /*
895 * Map the DMA transfer.
896 */
897 if (xs->datalen) {
898 SGE_SIMPLE32 *se;
899
900 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
901 xs->datalen, NULL,
902 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
903 : BUS_DMA_WAITOK) |
904 BUS_DMA_STREAMING |
905 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
906 : BUS_DMA_WRITE));
907 switch (error) {
908 case 0:
909 break;
910
911 case ENOMEM:
912 case EAGAIN:
913 xs->error = XS_RESOURCE_SHORTAGE;
914 goto out_bad;
915
916 default:
917 xs->error = XS_DRIVER_STUFFUP;
918 mpt_prt(mpt, "error %d loading DMA map", error);
919 out_bad:
920 s = splbio();
921 mpt_free_request(mpt, req);
922 scsipi_done(xs);
923 splx(s);
924 return;
925 }
926
927 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
928 int seg, i, nleft = req->dmap->dm_nsegs;
929 uint32_t flags;
930 SGE_CHAIN32 *ce;
931
932 seg = 0;
933 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
934 if (xs->xs_control & XS_CTL_DATA_OUT)
935 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
936
937 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
938 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
939 i++, se++, seg++) {
940 uint32_t tf;
941
942 memset(se, 0, sizeof(*se));
943 se->Address =
944 htole32(req->dmap->dm_segs[seg].ds_addr);
945 MPI_pSGE_SET_LENGTH(se,
946 req->dmap->dm_segs[seg].ds_len);
947 tf = flags;
948 if (i == MPT_NSGL_FIRST(mpt) - 2)
949 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
950 MPI_pSGE_SET_FLAGS(se, tf);
951 se->FlagsLength = htole32(se->FlagsLength);
952 nleft--;
953 }
954
955 /*
956 * Tell the IOC where to find the first chain element.
957 */
958 mpt_req->ChainOffset =
959 ((char *)se - (char *)mpt_req) >> 2;
960
961 /*
962 * Until we're finished with all segments...
963 */
964 while (nleft) {
965 int ntodo;
966
967 /*
968 * Construct the chain element that points to
969 * the next segment.
970 */
971 ce = (SGE_CHAIN32 *) se++;
972 if (nleft > MPT_NSGL(mpt)) {
973 ntodo = MPT_NSGL(mpt) - 1;
974 ce->NextChainOffset = (MPT_RQSL(mpt) -
975 sizeof(SGE_SIMPLE32)) >> 2;
976 ce->Length = htole16(MPT_NSGL(mpt)
977 * sizeof(SGE_SIMPLE32));
978 } else {
979 ntodo = nleft;
980 ce->NextChainOffset = 0;
981 ce->Length = htole16(ntodo
982 * sizeof(SGE_SIMPLE32));
983 }
984 ce->Address = htole32(req->req_pbuf +
985 ((char *)se - (char *)mpt_req));
986 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
987 for (i = 0; i < ntodo; i++, se++, seg++) {
988 uint32_t tf;
989
990 memset(se, 0, sizeof(*se));
991 se->Address = htole32(
992 req->dmap->dm_segs[seg].ds_addr);
993 MPI_pSGE_SET_LENGTH(se,
994 req->dmap->dm_segs[seg].ds_len);
995 tf = flags;
996 if (i == ntodo - 1) {
997 tf |=
998 MPI_SGE_FLAGS_LAST_ELEMENT;
999 if (ce->NextChainOffset == 0) {
1000 tf |=
1001 MPI_SGE_FLAGS_END_OF_LIST |
1002 MPI_SGE_FLAGS_END_OF_BUFFER;
1003 }
1004 }
1005 MPI_pSGE_SET_FLAGS(se, tf);
1006 se->FlagsLength =
1007 htole32(se->FlagsLength);
1008 nleft--;
1009 }
1010 }
1011 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1012 req->dmap->dm_mapsize,
1013 (xs->xs_control & XS_CTL_DATA_IN) ?
1014 BUS_DMASYNC_PREREAD
1015 : BUS_DMASYNC_PREWRITE);
1016 } else {
1017 int i;
1018 uint32_t flags;
1019
1020 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1021 if (xs->xs_control & XS_CTL_DATA_OUT)
1022 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1023
1024 /* Copy the segments into our SG list. */
1025 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1026 for (i = 0; i < req->dmap->dm_nsegs;
1027 i++, se++) {
1028 uint32_t tf;
1029
1030 memset(se, 0, sizeof(*se));
1031 se->Address =
1032 htole32(req->dmap->dm_segs[i].ds_addr);
1033 MPI_pSGE_SET_LENGTH(se,
1034 req->dmap->dm_segs[i].ds_len);
1035 tf = flags;
1036 if (i == req->dmap->dm_nsegs - 1) {
1037 tf |=
1038 MPI_SGE_FLAGS_LAST_ELEMENT |
1039 MPI_SGE_FLAGS_END_OF_BUFFER |
1040 MPI_SGE_FLAGS_END_OF_LIST;
1041 }
1042 MPI_pSGE_SET_FLAGS(se, tf);
1043 se->FlagsLength = htole32(se->FlagsLength);
1044 }
1045 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1046 req->dmap->dm_mapsize,
1047 (xs->xs_control & XS_CTL_DATA_IN) ?
1048 BUS_DMASYNC_PREREAD
1049 : BUS_DMASYNC_PREWRITE);
1050 }
1051 } else {
1052 /*
1053 * No data to transfer; just make a single simple SGL
1054 * with zero length.
1055 */
1056 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1057 memset(se, 0, sizeof(*se));
1058 MPI_pSGE_SET_FLAGS(se,
1059 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1060 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1061 se->FlagsLength = htole32(se->FlagsLength);
1062 }
1063
1064 if (mpt->verbose > 1)
1065 mpt_print_scsi_io_request(mpt_req);
1066
1067 if (xs->timeout == 0) {
1068 mpt_prt(mpt,"mpt_run_xfer: no timeout specified for request: 0x%x\n",
1069 req->index);
1070 xs->timeout = 500;
1071 }
1072
1073 s = splbio();
1074 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1075 callout_reset(&xs->xs_callout,
1076 mstohz(xs->timeout), mpt_timeout, req);
1077 mpt_send_cmd(mpt, req);
1078 splx(s);
1079
1080 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1081 return;
1082
1083 /*
1084 * If we can't use interrupts, poll on completion.
1085 */
1086 if (mpt_poll(mpt, xs, xs->timeout))
1087 mpt_timeout(req);
1088 }
1089
1090 static void
1091 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1092 {
1093 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1094
1095 /*
1096 * Always allow disconnect; we don't have a way to disable
1097 * it right now, in any case.
1098 */
1099 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1100
1101 if (xm->xm_mode & PERIPH_CAP_TQING)
1102 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1103 else
1104 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1105
1106 if (mpt->is_scsi) {
1107 /*
1108 * SCSI transport settings only make any sense for
1109 * SCSI
1110 */
1111
1112 tmp = mpt->mpt_dev_page1[xm->xm_target];
1113
1114 /*
1115 * Set the wide/narrow parameter for the target.
1116 */
1117 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1118 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1119 else
1120 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1121
1122 /*
1123 * Set the synchronous parameters for the target.
1124 *
1125 * XXX If we request sync transfers, we just go ahead and
1126 * XXX request the maximum available. We need finer control
1127 * XXX in order to implement Domain Validation.
1128 */
1129 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1130 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1131 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1132 MPI_SCSIDEVPAGE1_RP_IU);
1133 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1134 int factor, offset, np;
1135
1136 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1137 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1138 np = 0;
1139 if (factor < 0x9) {
1140 /* Ultra320 */
1141 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1142 }
1143 if (factor < 0xa) {
1144 /* at least Ultra160 */
1145 np |= MPI_SCSIDEVPAGE1_RP_DT;
1146 }
1147 np |= (factor << 8) | (offset << 16);
1148 tmp.RequestedParameters |= np;
1149 }
1150
1151 host2mpt_config_page_scsi_device_1(&tmp);
1152 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1153 mpt_prt(mpt, "unable to write Device Page 1");
1154 return;
1155 }
1156
1157 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1158 mpt_prt(mpt, "unable to read back Device Page 1");
1159 return;
1160 }
1161
1162 mpt2host_config_page_scsi_device_1(&tmp);
1163 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1164 if (mpt->verbose > 1) {
1165 mpt_prt(mpt,
1166 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1167 xm->xm_target,
1168 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1169 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1170 }
1171 }
1172
1173 /*
1174 * Make a note that we should perform an async callback at the
1175 * end of the next successful command completion to report the
1176 * negotiated transfer mode.
1177 */
1178 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1179 }
1180
1181 static void
1182 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1183 {
1184 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1185 struct scsipi_xfer_mode xm;
1186 int period, offset;
1187
1188 tmp = mpt->mpt_dev_page0[periph->periph_target];
1189 host2mpt_config_page_scsi_device_0(&tmp);
1190 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1191 mpt_prt(mpt, "unable to read Device Page 0");
1192 return;
1193 }
1194 mpt2host_config_page_scsi_device_0(&tmp);
1195
1196 if (mpt->verbose > 1) {
1197 mpt_prt(mpt,
1198 "SPI Tgt %d Page 0: NParms %x Information %x",
1199 periph->periph_target,
1200 tmp.NegotiatedParameters, tmp.Information);
1201 }
1202
1203 xm.xm_target = periph->periph_target;
1204 xm.xm_mode = 0;
1205
1206 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1207 xm.xm_mode |= PERIPH_CAP_WIDE16;
1208
1209 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1210 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1211 if (offset) {
1212 xm.xm_period = period;
1213 xm.xm_offset = offset;
1214 xm.xm_mode |= PERIPH_CAP_SYNC;
1215 }
1216
1217 /*
1218 * Tagged queueing is all controlled by us; there is no
1219 * other setting to query.
1220 */
1221 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1222 xm.xm_mode |= PERIPH_CAP_TQING;
1223
1224 /*
1225 * We're going to deliver the async event, so clear the marker.
1226 */
1227 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1228
1229 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1230 }
1231
1232 static void
1233 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1234 {
1235 MSG_DEFAULT_REPLY *dmsg = vmsg;
1236
1237 switch (dmsg->Function) {
1238 case MPI_FUNCTION_EVENT_NOTIFICATION:
1239 mpt_event_notify_reply(mpt, vmsg);
1240 mpt_free_reply(mpt, (reply << 1));
1241 break;
1242
1243 case MPI_FUNCTION_EVENT_ACK:
1244 mpt_free_reply(mpt, (reply << 1));
1245 break;
1246
1247 case MPI_FUNCTION_PORT_ENABLE:
1248 {
1249 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1250 int index = le32toh(msg->MsgContext) & ~0x80000000;
1251 if (mpt->verbose > 1)
1252 mpt_prt(mpt, "enable port reply index %d", index);
1253 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1254 request_t *req = &mpt->request_pool[index];
1255 req->debug = REQ_DONE;
1256 }
1257 mpt_free_reply(mpt, (reply << 1));
1258 break;
1259 }
1260
1261 case MPI_FUNCTION_CONFIG:
1262 {
1263 MSG_CONFIG_REPLY *msg = vmsg;
1264 int index = le32toh(msg->MsgContext) & ~0x80000000;
1265 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1266 request_t *req = &mpt->request_pool[index];
1267 req->debug = REQ_DONE;
1268 req->sequence = reply;
1269 } else
1270 mpt_free_reply(mpt, (reply << 1));
1271 break;
1272 }
1273
1274 default:
1275 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1276 }
1277 }
1278
1279 static void
1280 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1281 {
1282
1283 switch (le32toh(msg->Event)) {
1284 case MPI_EVENT_LOG_DATA:
1285 {
1286 int i;
1287
1288 /* Some error occurrerd that the Fusion wants logged. */
1289 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1290 mpt_prt(mpt, "EvtLogData: Event Data:");
1291 for (i = 0; i < msg->EventDataLength; i++) {
1292 if ((i % 4) == 0)
1293 printf("%s:\t", device_xname(mpt->sc_dev));
1294 printf("0x%08x%c", msg->Data[i],
1295 ((i % 4) == 3) ? '\n' : ' ');
1296 }
1297 if ((i % 4) != 0)
1298 printf("\n");
1299 break;
1300 }
1301
1302 case MPI_EVENT_UNIT_ATTENTION:
1303 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1304 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1305 break;
1306
1307 case MPI_EVENT_IOC_BUS_RESET:
1308 /* We generated a bus reset. */
1309 mpt_prt(mpt, "IOC Bus Reset Port %d",
1310 (msg->Data[0] >> 8) & 0xff);
1311 break;
1312
1313 case MPI_EVENT_EXT_BUS_RESET:
1314 /* Someone else generated a bus reset. */
1315 mpt_prt(mpt, "External Bus Reset");
1316 /*
1317 * These replies don't return EventData like the MPI
1318 * spec says they do.
1319 */
1320 /* XXX Send an async event? */
1321 break;
1322
1323 case MPI_EVENT_RESCAN:
1324 /*
1325 * In general, thise means a device has been added
1326 * to the loop.
1327 */
1328 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1329 /* XXX Send an async event? */
1330 break;
1331
1332 case MPI_EVENT_LINK_STATUS_CHANGE:
1333 mpt_prt(mpt, "Port %d: Link state %s",
1334 (msg->Data[1] >> 8) & 0xff,
1335 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1336 break;
1337
1338 case MPI_EVENT_LOOP_STATE_CHANGE:
1339 switch ((msg->Data[0] >> 16) & 0xff) {
1340 case 0x01:
1341 mpt_prt(mpt,
1342 "Port %d: FC Link Event: LIP(%02x,%02x) "
1343 "(Loop Initialization)",
1344 (msg->Data[1] >> 8) & 0xff,
1345 (msg->Data[0] >> 8) & 0xff,
1346 (msg->Data[0] ) & 0xff);
1347 switch ((msg->Data[0] >> 8) & 0xff) {
1348 case 0xf7:
1349 if ((msg->Data[0] & 0xff) == 0xf7)
1350 mpt_prt(mpt, "\tDevice needs AL_PA");
1351 else
1352 mpt_prt(mpt, "\tDevice %02x doesn't "
1353 "like FC performance",
1354 msg->Data[0] & 0xff);
1355 break;
1356
1357 case 0xf8:
1358 if ((msg->Data[0] & 0xff) == 0xf7)
1359 mpt_prt(mpt, "\tDevice detected loop "
1360 "failure before acquiring AL_PA");
1361 else
1362 mpt_prt(mpt, "\tDevice %02x detected "
1363 "loop failure",
1364 msg->Data[0] & 0xff);
1365 break;
1366
1367 default:
1368 mpt_prt(mpt, "\tDevice %02x requests that "
1369 "device %02x reset itself",
1370 msg->Data[0] & 0xff,
1371 (msg->Data[0] >> 8) & 0xff);
1372 break;
1373 }
1374 break;
1375
1376 case 0x02:
1377 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1378 "(Loop Port Enable)",
1379 (msg->Data[1] >> 8) & 0xff,
1380 (msg->Data[0] >> 8) & 0xff,
1381 (msg->Data[0] ) & 0xff);
1382 break;
1383
1384 case 0x03:
1385 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1386 "(Loop Port Bypass)",
1387 (msg->Data[1] >> 8) & 0xff,
1388 (msg->Data[0] >> 8) & 0xff,
1389 (msg->Data[0] ) & 0xff);
1390 break;
1391
1392 default:
1393 mpt_prt(mpt, "Port %d: FC Link Event: "
1394 "Unknown event (%02x %02x %02x)",
1395 (msg->Data[1] >> 8) & 0xff,
1396 (msg->Data[0] >> 16) & 0xff,
1397 (msg->Data[0] >> 8) & 0xff,
1398 (msg->Data[0] ) & 0xff);
1399 break;
1400 }
1401 break;
1402
1403 case MPI_EVENT_LOGOUT:
1404 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1405 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1406 break;
1407
1408 case MPI_EVENT_EVENT_CHANGE:
1409 /*
1410 * This is just an acknowledgement of our
1411 * mpt_send_event_request().
1412 */
1413 break;
1414
1415 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1416 switch ((msg->Data[0] >> 12) & 0x0f) {
1417 case 0x00:
1418 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1419 msg->Data[0] & 0xff);
1420 break;
1421 case 0x01:
1422 mpt_prt(mpt, "Phy %d: Link Disabled",
1423 msg->Data[0] & 0xff);
1424 break;
1425 case 0x02:
1426 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1427 msg->Data[0] & 0xff);
1428 break;
1429 case 0x03:
1430 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1431 msg->Data[0] & 0xff);
1432 break;
1433 case 0x08:
1434 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1435 msg->Data[0] & 0xff);
1436 break;
1437 case 0x09:
1438 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1439 msg->Data[0] & 0xff);
1440 break;
1441 default:
1442 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1443 "Unknown event (%0x)",
1444 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1445 }
1446 break;
1447
1448 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1449 case MPI_EVENT_SAS_DISCOVERY:
1450 /* ignore these events for now */
1451 break;
1452
1453 case MPI_EVENT_QUEUE_FULL:
1454 /* This can get a little chatty */
1455 if (mpt->verbose > 0)
1456 mpt_prt(mpt, "Queue Full Event");
1457 break;
1458
1459 default:
1460 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1461 break;
1462 }
1463
1464 if (msg->AckRequired) {
1465 MSG_EVENT_ACK *ackp;
1466 request_t *req;
1467
1468 if ((req = mpt_get_request(mpt)) == NULL) {
1469 /* XXX XXX XXX XXXJRT */
1470 panic("mpt_event_notify_reply: unable to allocate "
1471 "request structure");
1472 }
1473
1474 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1475 memset(ackp, 0, sizeof(*ackp));
1476 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1477 ackp->Event = msg->Event;
1478 ackp->EventContext = msg->EventContext;
1479 ackp->MsgContext = htole32(req->index | 0x80000000);
1480 mpt_check_doorbell(mpt);
1481 mpt_send_cmd(mpt, req);
1482 }
1483 }
1484
1485 static void
1486 mpt_bus_reset(mpt_softc_t *mpt)
1487 {
1488 request_t *req;
1489 MSG_SCSI_TASK_MGMT *mngt_req;
1490 int s;
1491
1492 s = splbio();
1493 if (mpt->mngt_req) {
1494 /* request already queued; can't do more */
1495 splx(s);
1496 return;
1497 }
1498 req = mpt_get_request(mpt);
1499 if (__predict_false(req == NULL)) {
1500 mpt_prt(mpt, "no mngt request\n");
1501 splx(s);
1502 return;
1503 }
1504 mpt->mngt_req = req;
1505 splx(s);
1506 mngt_req = req->req_vbuf;
1507 memset(mngt_req, 0, sizeof(*mngt_req));
1508 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1509 mngt_req->Bus = mpt->bus;
1510 mngt_req->TargetID = 0;
1511 mngt_req->ChainOffset = 0;
1512 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1513 mngt_req->Reserved1 = 0;
1514 mngt_req->MsgFlags =
1515 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1516 mngt_req->MsgContext = req->index;
1517 mngt_req->TaskMsgContext = 0;
1518 s = splbio();
1519 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1520 splx(s);
1521 }
1522
1523 /*****************************************************************************
1524 * SCSI interface routines
1525 *****************************************************************************/
1526
1527 static void
1528 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1529 void *arg)
1530 {
1531 struct scsipi_adapter *adapt = chan->chan_adapter;
1532 mpt_softc_t *mpt = DEV_TO_MPT(adapt->adapt_dev);
1533
1534 switch (req) {
1535 case ADAPTER_REQ_RUN_XFER:
1536 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1537 return;
1538
1539 case ADAPTER_REQ_GROW_RESOURCES:
1540 /* Not supported. */
1541 return;
1542
1543 case ADAPTER_REQ_SET_XFER_MODE:
1544 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1545 return;
1546 }
1547 }
1548
1549 static void
1550 mpt_minphys(struct buf *bp)
1551 {
1552
1553 /*
1554 * Subtract one from the SGL limit, since we need an extra one to handle
1555 * an non-page-aligned transfer.
1556 */
1557 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1558
1559 if (bp->b_bcount > MPT_MAX_XFER)
1560 bp->b_bcount = MPT_MAX_XFER;
1561 minphys(bp);
1562 }
1563
1564 static int
1565 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1566 int flag, struct proc *p)
1567 {
1568 mpt_softc_t *mpt;
1569 int s;
1570
1571 mpt = device_private(chan->chan_adapter->adapt_dev);
1572 switch (cmd) {
1573 case SCBUSIORESET:
1574 mpt_bus_reset(mpt);
1575 s = splbio();
1576 mpt_intr(mpt);
1577 splx(s);
1578 return(0);
1579 default:
1580 return (ENOTTY);
1581 }
1582 }
1583