mpt_netbsd.c revision 1.31 1 /* $NetBSD: mpt_netbsd.c,v 1.31 2014/09/28 11:27:00 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.31 2014/09/28 11:27:00 jmcneill Exp $");
81
82 #include "bio.h"
83
84 #include <dev/ic/mpt.h> /* pulls in all headers */
85 #include <sys/scsiio.h>
86
87 #if NBIO > 0
88 #include <dev/biovar.h>
89 #endif
90
91 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92 static void mpt_timeout(void *);
93 static void mpt_restart(mpt_softc_t *, request_t *);
94 static void mpt_done(mpt_softc_t *, uint32_t);
95 static int mpt_drain_queue(mpt_softc_t *);
96 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
97 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
98 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
99 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
100 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
101 static void mpt_bus_reset(mpt_softc_t *);
102
103 static void mpt_scsipi_request(struct scsipi_channel *,
104 scsipi_adapter_req_t, void *);
105 static void mpt_minphys(struct buf *);
106 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
107 struct proc *);
108
109 #if NBIO > 0
110 static bool mpt_is_raid(mpt_softc_t *);
111 static int mpt_bio_ioctl(device_t, u_long, void *);
112 static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
113 static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
114 static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
115 static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *);
116 static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *);
117 #endif
118
119 void
120 mpt_scsipi_attach(mpt_softc_t *mpt)
121 {
122 struct scsipi_adapter *adapt = &mpt->sc_adapter;
123 struct scsipi_channel *chan = &mpt->sc_channel;
124 int maxq;
125
126 mpt->bus = 0; /* XXX ?? */
127
128 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
129 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
130
131 /* Fill in the scsipi_adapter. */
132 memset(adapt, 0, sizeof(*adapt));
133 adapt->adapt_dev = mpt->sc_dev;
134 adapt->adapt_nchannels = 1;
135 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
136 adapt->adapt_max_periph = maxq - 2;
137 adapt->adapt_request = mpt_scsipi_request;
138 adapt->adapt_minphys = mpt_minphys;
139 adapt->adapt_ioctl = mpt_ioctl;
140
141 /* Fill in the scsipi_channel. */
142 memset(chan, 0, sizeof(*chan));
143 chan->chan_adapter = adapt;
144 if (mpt->is_sas) {
145 chan->chan_bustype = &scsi_sas_bustype;
146 } else if (mpt->is_fc) {
147 chan->chan_bustype = &scsi_fc_bustype;
148 } else {
149 chan->chan_bustype = &scsi_bustype;
150 }
151 chan->chan_channel = 0;
152 chan->chan_flags = 0;
153 chan->chan_nluns = 8;
154 chan->chan_ntargets = mpt->mpt_max_devices;
155 chan->chan_id = mpt->mpt_ini_id;
156
157 /*
158 * Save the output of the config so we can rescan the bus in case of
159 * errors
160 */
161 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
162 scsiprint);
163
164 #if NBIO > 0
165 if (mpt_is_raid(mpt)) {
166 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
167 panic("%s: controller registration failed",
168 device_xname(mpt->sc_dev));
169 }
170 #endif
171 }
172
173 int
174 mpt_dma_mem_alloc(mpt_softc_t *mpt)
175 {
176 bus_dma_segment_t reply_seg, request_seg;
177 int reply_rseg, request_rseg;
178 bus_addr_t pptr, end;
179 char *vptr;
180 size_t len;
181 int error, i;
182
183 /* Check if we have already allocated the reply memory. */
184 if (mpt->reply != NULL)
185 return (0);
186
187 /*
188 * Allocate the request pool. This isn't really DMA'd memory,
189 * but it's a convenient place to do it.
190 */
191 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
192 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
193 if (mpt->request_pool == NULL) {
194 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
195 return (ENOMEM);
196 }
197
198 /*
199 * Allocate DMA resources for reply buffers.
200 */
201 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
202 &reply_seg, 1, &reply_rseg, 0);
203 if (error) {
204 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
205 error);
206 goto fail_0;
207 }
208
209 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
210 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
211 if (error) {
212 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
213 error);
214 goto fail_1;
215 }
216
217 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
218 0, 0, &mpt->reply_dmap);
219 if (error) {
220 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
221 error);
222 goto fail_2;
223 }
224
225 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
226 PAGE_SIZE, NULL, 0);
227 if (error) {
228 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
229 error);
230 goto fail_3;
231 }
232 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
233
234 /*
235 * Allocate DMA resources for request buffers.
236 */
237 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
238 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
239 if (error) {
240 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
241 "error = %d\n", error);
242 goto fail_4;
243 }
244
245 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
246 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
247 if (error) {
248 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
249 error);
250 goto fail_5;
251 }
252
253 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
254 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
255 if (error) {
256 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
257 "error = %d\n", error);
258 goto fail_6;
259 }
260
261 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
262 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
263 if (error) {
264 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
265 error);
266 goto fail_7;
267 }
268 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
269
270 pptr = mpt->request_phys;
271 vptr = (void *) mpt->request;
272 end = pptr + MPT_REQ_MEM_SIZE(mpt);
273
274 for (i = 0; pptr < end; i++) {
275 request_t *req = &mpt->request_pool[i];
276 req->index = i;
277
278 /* Store location of Request Data */
279 req->req_pbuf = pptr;
280 req->req_vbuf = vptr;
281
282 pptr += MPT_REQUEST_AREA;
283 vptr += MPT_REQUEST_AREA;
284
285 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
286 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
287
288 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
289 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
290 if (error) {
291 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
292 "error = %d\n", i, error);
293 goto fail_8;
294 }
295 }
296
297 return (0);
298
299 fail_8:
300 for (--i; i >= 0; i--) {
301 request_t *req = &mpt->request_pool[i];
302 if (req->dmap != NULL)
303 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
304 }
305 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
306 fail_7:
307 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
308 fail_6:
309 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
310 fail_5:
311 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
312 fail_4:
313 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
314 fail_3:
315 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
316 fail_2:
317 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
318 fail_1:
319 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
320 fail_0:
321 free(mpt->request_pool, M_DEVBUF);
322
323 mpt->reply = NULL;
324 mpt->request = NULL;
325 mpt->request_pool = NULL;
326
327 return (error);
328 }
329
330 int
331 mpt_intr(void *arg)
332 {
333 mpt_softc_t *mpt = arg;
334 int nrepl = 0;
335
336 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
337 return (0);
338
339 nrepl = mpt_drain_queue(mpt);
340 return (nrepl != 0);
341 }
342
343 void
344 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
345 {
346 va_list ap;
347
348 printf("%s: ", device_xname(mpt->sc_dev));
349 va_start(ap, fmt);
350 vprintf(fmt, ap);
351 va_end(ap);
352 printf("\n");
353 }
354
355 static int
356 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
357 {
358
359 /* Timeouts are in msec, so we loop in 1000usec cycles */
360 while (count) {
361 mpt_intr(mpt);
362 if (xs->xs_status & XS_STS_DONE)
363 return (0);
364 delay(1000); /* only happens in boot, so ok */
365 count--;
366 }
367 return (1);
368 }
369
370 static void
371 mpt_timeout(void *arg)
372 {
373 request_t *req = arg;
374 struct scsipi_xfer *xs;
375 struct scsipi_periph *periph;
376 mpt_softc_t *mpt;
377 uint32_t oseq;
378 int s, nrepl = 0;
379
380 if (req->xfer == NULL) {
381 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
382 req->index, req->sequence);
383 return;
384 }
385 xs = req->xfer;
386 periph = xs->xs_periph;
387 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
388 scsipi_printaddr(periph);
389 printf("command timeout\n");
390
391 s = splbio();
392
393 oseq = req->sequence;
394 mpt->timeouts++;
395 if (mpt_intr(mpt)) {
396 if (req->sequence != oseq) {
397 mpt->success++;
398 mpt_prt(mpt, "recovered from command timeout");
399 splx(s);
400 return;
401 }
402 }
403
404 /*
405 * Ensure the IOC is really done giving us data since it appears it can
406 * sometimes fail to give us interrupts under heavy load.
407 */
408 nrepl = mpt_drain_queue(mpt);
409 if (nrepl ) {
410 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
411 }
412
413 if (req->sequence != oseq) {
414 mpt->success++;
415 splx(s);
416 return;
417 }
418
419 mpt_prt(mpt,
420 "timeout on request index = 0x%x, seq = 0x%08x",
421 req->index, req->sequence);
422 mpt_check_doorbell(mpt);
423 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
424 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
425 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
426 mpt_read(mpt, MPT_OFFSET_DOORBELL));
427 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
428 if (mpt->verbose > 1)
429 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
430
431 xs->error = XS_TIMEOUT;
432 splx(s);
433 mpt_restart(mpt, req);
434 }
435
436 static void
437 mpt_restart(mpt_softc_t *mpt, request_t *req0)
438 {
439 int i, s, nreq;
440 request_t *req;
441 struct scsipi_xfer *xs;
442
443 /* first, reset the IOC, leaving stopped so all requests are idle */
444 if (mpt_soft_reset(mpt) != MPT_OK) {
445 mpt_prt(mpt, "soft reset failed");
446 /*
447 * Don't try a hard reset since this mangles the PCI
448 * configuration registers.
449 */
450 return;
451 }
452
453 /* Freeze the channel so scsipi doesn't queue more commands. */
454 scsipi_channel_freeze(&mpt->sc_channel, 1);
455
456 /* Return all pending requests to scsipi and de-allocate them. */
457 s = splbio();
458 nreq = 0;
459 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
460 req = &mpt->request_pool[i];
461 xs = req->xfer;
462 if (xs != NULL) {
463 if (xs->datalen != 0)
464 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
465 req->xfer = NULL;
466 callout_stop(&xs->xs_callout);
467 if (req != req0) {
468 nreq++;
469 xs->error = XS_REQUEUE;
470 }
471 scsipi_done(xs);
472 /*
473 * Don't need to mpt_free_request() since mpt_init()
474 * below will free all requests anyway.
475 */
476 mpt_free_request(mpt, req);
477 }
478 }
479 splx(s);
480 if (nreq > 0)
481 mpt_prt(mpt, "re-queued %d requests", nreq);
482
483 /* Re-initialize the IOC (which restarts it). */
484 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
485 mpt_prt(mpt, "restart succeeded");
486 /* else error message already printed */
487
488 /* Thaw the channel, causing scsipi to re-queue the commands. */
489 scsipi_channel_thaw(&mpt->sc_channel, 1);
490 }
491
492 static int
493 mpt_drain_queue(mpt_softc_t *mpt)
494 {
495 int nrepl = 0;
496 uint32_t reply;
497
498 reply = mpt_pop_reply_queue(mpt);
499 while (reply != MPT_REPLY_EMPTY) {
500 nrepl++;
501 if (mpt->verbose > 1) {
502 if ((reply & MPT_CONTEXT_REPLY) != 0) {
503 /* Address reply; IOC has something to say */
504 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
505 } else {
506 /* Context reply; all went well */
507 mpt_prt(mpt, "context %u reply OK", reply);
508 }
509 }
510 mpt_done(mpt, reply);
511 reply = mpt_pop_reply_queue(mpt);
512 }
513 return (nrepl);
514 }
515
516 static void
517 mpt_done(mpt_softc_t *mpt, uint32_t reply)
518 {
519 struct scsipi_xfer *xs = NULL;
520 struct scsipi_periph *periph;
521 int index;
522 request_t *req;
523 MSG_REQUEST_HEADER *mpt_req;
524 MSG_SCSI_IO_REPLY *mpt_reply;
525 int restart = 0; /* nonzero if we need to restart the IOC*/
526
527 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
528 /* context reply (ok) */
529 mpt_reply = NULL;
530 index = reply & MPT_CONTEXT_MASK;
531 } else {
532 /* address reply (error) */
533
534 /* XXX BUS_DMASYNC_POSTREAD XXX */
535 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
536 if (mpt_reply != NULL) {
537 if (mpt->verbose > 1) {
538 uint32_t *pReply = (uint32_t *) mpt_reply;
539
540 mpt_prt(mpt, "Address Reply (index %u):",
541 le32toh(mpt_reply->MsgContext) & 0xffff);
542 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
543 pReply[1], pReply[2], pReply[3]);
544 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
545 pReply[5], pReply[6], pReply[7]);
546 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
547 pReply[9], pReply[10], pReply[11]);
548 }
549 index = le32toh(mpt_reply->MsgContext);
550 } else
551 index = reply & MPT_CONTEXT_MASK;
552 }
553
554 /*
555 * Address reply with MessageContext high bit set.
556 * This is most likely a notify message, so we try
557 * to process it, then free it.
558 */
559 if (__predict_false((index & 0x80000000) != 0)) {
560 if (mpt_reply != NULL)
561 mpt_ctlop(mpt, mpt_reply, reply);
562 else
563 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
564 index);
565 return;
566 }
567
568 /* Did we end up with a valid index into the table? */
569 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
570 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
571 index);
572 return;
573 }
574
575 req = &mpt->request_pool[index];
576
577 /* Make sure memory hasn't been trashed. */
578 if (__predict_false(req->index != index)) {
579 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
580 index);
581 return;
582 }
583
584 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
585 mpt_req = req->req_vbuf;
586
587 /* Short cut for task management replies; nothing more for us to do. */
588 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
589 if (mpt->verbose > 1)
590 mpt_prt(mpt, "%s: TASK MGMT", __func__);
591 KASSERT(req == mpt->mngt_req);
592 mpt->mngt_req = NULL;
593 goto done;
594 }
595
596 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
597 goto done;
598
599 /*
600 * At this point, it had better be a SCSI I/O command, but don't
601 * crash if it isn't.
602 */
603 if (__predict_false(mpt_req->Function !=
604 MPI_FUNCTION_SCSI_IO_REQUEST)) {
605 if (mpt->verbose > 1)
606 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
607 __func__, mpt_req->Function, index);
608 goto done;
609 }
610
611 /* Recover scsipi_xfer from the request structure. */
612 xs = req->xfer;
613
614 /* Can't have a SCSI command without a scsipi_xfer. */
615 if (__predict_false(xs == NULL)) {
616 mpt_prt(mpt,
617 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
618 req->index, req->sequence);
619 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
620 mpt_prt(mpt, "mpt_request:");
621 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
622
623 if (mpt_reply != NULL) {
624 mpt_prt(mpt, "mpt_reply:");
625 mpt_print_reply(mpt_reply);
626 } else {
627 mpt_prt(mpt, "context reply: 0x%08x", reply);
628 }
629 goto done;
630 }
631
632 callout_stop(&xs->xs_callout);
633
634 periph = xs->xs_periph;
635
636 /*
637 * If we were a data transfer, unload the map that described
638 * the data buffer.
639 */
640 if (__predict_true(xs->datalen != 0)) {
641 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
642 req->dmap->dm_mapsize,
643 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
644 : BUS_DMASYNC_POSTWRITE);
645 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
646 }
647
648 if (__predict_true(mpt_reply == NULL)) {
649 /*
650 * Context reply; report that the command was
651 * successful!
652 *
653 * Also report the xfer mode, if necessary.
654 */
655 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
656 if ((mpt->mpt_report_xfer_mode &
657 (1 << periph->periph_target)) != 0)
658 mpt_get_xfer_mode(mpt, periph);
659 }
660 xs->error = XS_NOERROR;
661 xs->status = SCSI_OK;
662 xs->resid = 0;
663 mpt_free_request(mpt, req);
664 scsipi_done(xs);
665 return;
666 }
667
668 xs->status = mpt_reply->SCSIStatus;
669 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
670 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
671 xs->error = XS_DRIVER_STUFFUP;
672 mpt_prt(mpt, "%s: IOC overrun!", __func__);
673 break;
674
675 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
676 /*
677 * Yikes! Tagged queue full comes through this path!
678 *
679 * So we'll change it to a status error and anything
680 * that returns status should probably be a status
681 * error as well.
682 */
683 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
684 if (mpt_reply->SCSIState &
685 MPI_SCSI_STATE_NO_SCSI_STATUS) {
686 xs->error = XS_DRIVER_STUFFUP;
687 break;
688 }
689 /* FALLTHROUGH */
690 case MPI_IOCSTATUS_SUCCESS:
691 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
692 switch (xs->status) {
693 case SCSI_OK:
694 /* Report the xfer mode, if necessary. */
695 if ((mpt->mpt_report_xfer_mode &
696 (1 << periph->periph_target)) != 0)
697 mpt_get_xfer_mode(mpt, periph);
698 xs->resid = 0;
699 break;
700
701 case SCSI_CHECK:
702 xs->error = XS_SENSE;
703 break;
704
705 case SCSI_BUSY:
706 case SCSI_QUEUE_FULL:
707 xs->error = XS_BUSY;
708 break;
709
710 default:
711 scsipi_printaddr(periph);
712 printf("invalid status code %d\n", xs->status);
713 xs->error = XS_DRIVER_STUFFUP;
714 break;
715 }
716 break;
717
718 case MPI_IOCSTATUS_BUSY:
719 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
720 xs->error = XS_RESOURCE_SHORTAGE;
721 break;
722
723 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
724 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
725 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
726 xs->error = XS_SELTIMEOUT;
727 break;
728
729 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
730 xs->error = XS_DRIVER_STUFFUP;
731 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
732 restart = 1;
733 break;
734
735 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
736 /* XXX What should we do here? */
737 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
738 restart = 1;
739 break;
740
741 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
742 /* XXX */
743 xs->error = XS_DRIVER_STUFFUP;
744 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
745 restart = 1;
746 break;
747
748 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
749 /* XXX */
750 xs->error = XS_DRIVER_STUFFUP;
751 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
752 restart = 1;
753 break;
754
755 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
756 /* XXX This is a bus-reset */
757 xs->error = XS_DRIVER_STUFFUP;
758 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
759 restart = 1;
760 break;
761
762 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
763 /*
764 * FreeBSD and Linux indicate this is a phase error between
765 * the IOC and the drive itself. When this happens, the IOC
766 * becomes unhappy and stops processing all transactions.
767 * Call mpt_timeout which knows how to get the IOC back
768 * on its feet.
769 */
770 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
771 "recovering...", __func__);
772 xs->error = XS_TIMEOUT;
773 restart = 1;
774
775 break;
776
777 default:
778 /* XXX unrecognized HBA error */
779 xs->error = XS_DRIVER_STUFFUP;
780 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
781 le16toh(mpt_reply->IOCStatus));
782 restart = 1;
783 break;
784 }
785
786 if (mpt_reply != NULL) {
787 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
788 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
789 sizeof(xs->sense.scsi_sense));
790 } else if (mpt_reply->SCSIState &
791 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
792 /*
793 * This will cause the scsipi layer to issue
794 * a REQUEST SENSE.
795 */
796 if (xs->status == SCSI_CHECK)
797 xs->error = XS_BUSY;
798 }
799 }
800
801 done:
802 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
803 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
804 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
805 mpt_ctlop(mpt, mpt_reply, reply);
806 }
807
808 /* If IOC done with this request, free it up. */
809 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
810 mpt_free_request(mpt, req);
811
812 /* If address reply, give the buffer back to the IOC. */
813 if (mpt_reply != NULL)
814 mpt_free_reply(mpt, (reply << 1));
815
816 if (xs != NULL)
817 scsipi_done(xs);
818
819 if (restart) {
820 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
821 mpt_restart(mpt, NULL);
822 }
823 }
824
825 static void
826 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
827 {
828 struct scsipi_periph *periph = xs->xs_periph;
829 request_t *req;
830 MSG_SCSI_IO_REQUEST *mpt_req;
831 int error, s;
832
833 s = splbio();
834 req = mpt_get_request(mpt);
835 if (__predict_false(req == NULL)) {
836 /* This should happen very infrequently. */
837 xs->error = XS_RESOURCE_SHORTAGE;
838 scsipi_done(xs);
839 splx(s);
840 return;
841 }
842 splx(s);
843
844 /* Link the req and the scsipi_xfer. */
845 req->xfer = xs;
846
847 /* Now we build the command for the IOC */
848 mpt_req = req->req_vbuf;
849 memset(mpt_req, 0, sizeof(*mpt_req));
850
851 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
852 mpt_req->Bus = mpt->bus;
853
854 mpt_req->SenseBufferLength =
855 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
856 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
857
858 /*
859 * We use the message context to find the request structure when
860 * we get the command completion interrupt from the IOC.
861 */
862 mpt_req->MsgContext = htole32(req->index);
863
864 /* Which physical device to do the I/O on. */
865 mpt_req->TargetID = periph->periph_target;
866 mpt_req->LUN[1] = periph->periph_lun;
867
868 /* Set the direction of the transfer. */
869 if (xs->xs_control & XS_CTL_DATA_IN)
870 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
871 else if (xs->xs_control & XS_CTL_DATA_OUT)
872 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
873 else
874 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
875
876 /* Set the queue behavior. */
877 if (__predict_true((!mpt->is_scsi) ||
878 (mpt->mpt_tag_enable &
879 (1 << periph->periph_target)))) {
880 switch (XS_CTL_TAGTYPE(xs)) {
881 case XS_CTL_HEAD_TAG:
882 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
883 break;
884
885 #if 0 /* XXX */
886 case XS_CTL_ACA_TAG:
887 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
888 break;
889 #endif
890
891 case XS_CTL_ORDERED_TAG:
892 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
893 break;
894
895 case XS_CTL_SIMPLE_TAG:
896 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
897 break;
898
899 default:
900 if (mpt->is_scsi)
901 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
902 else
903 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
904 break;
905 }
906 } else
907 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
908
909 if (__predict_false(mpt->is_scsi &&
910 (mpt->mpt_disc_enable &
911 (1 << periph->periph_target)) == 0))
912 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
913
914 mpt_req->Control = htole32(mpt_req->Control);
915
916 /* Copy the SCSI command block into place. */
917 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
918
919 mpt_req->CDBLength = xs->cmdlen;
920 mpt_req->DataLength = htole32(xs->datalen);
921 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
922
923 /*
924 * Map the DMA transfer.
925 */
926 if (xs->datalen) {
927 SGE_SIMPLE32 *se;
928
929 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
930 xs->datalen, NULL,
931 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
932 : BUS_DMA_WAITOK) |
933 BUS_DMA_STREAMING |
934 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
935 : BUS_DMA_WRITE));
936 switch (error) {
937 case 0:
938 break;
939
940 case ENOMEM:
941 case EAGAIN:
942 xs->error = XS_RESOURCE_SHORTAGE;
943 goto out_bad;
944
945 default:
946 xs->error = XS_DRIVER_STUFFUP;
947 mpt_prt(mpt, "error %d loading DMA map", error);
948 out_bad:
949 s = splbio();
950 mpt_free_request(mpt, req);
951 scsipi_done(xs);
952 splx(s);
953 return;
954 }
955
956 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
957 int seg, i, nleft = req->dmap->dm_nsegs;
958 uint32_t flags;
959 SGE_CHAIN32 *ce;
960
961 seg = 0;
962 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
963 if (xs->xs_control & XS_CTL_DATA_OUT)
964 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
965
966 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
967 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
968 i++, se++, seg++) {
969 uint32_t tf;
970
971 memset(se, 0, sizeof(*se));
972 se->Address =
973 htole32(req->dmap->dm_segs[seg].ds_addr);
974 MPI_pSGE_SET_LENGTH(se,
975 req->dmap->dm_segs[seg].ds_len);
976 tf = flags;
977 if (i == MPT_NSGL_FIRST(mpt) - 2)
978 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
979 MPI_pSGE_SET_FLAGS(se, tf);
980 se->FlagsLength = htole32(se->FlagsLength);
981 nleft--;
982 }
983
984 /*
985 * Tell the IOC where to find the first chain element.
986 */
987 mpt_req->ChainOffset =
988 ((char *)se - (char *)mpt_req) >> 2;
989
990 /*
991 * Until we're finished with all segments...
992 */
993 while (nleft) {
994 int ntodo;
995
996 /*
997 * Construct the chain element that points to
998 * the next segment.
999 */
1000 ce = (SGE_CHAIN32 *) se++;
1001 if (nleft > MPT_NSGL(mpt)) {
1002 ntodo = MPT_NSGL(mpt) - 1;
1003 ce->NextChainOffset = (MPT_RQSL(mpt) -
1004 sizeof(SGE_SIMPLE32)) >> 2;
1005 ce->Length = htole16(MPT_NSGL(mpt)
1006 * sizeof(SGE_SIMPLE32));
1007 } else {
1008 ntodo = nleft;
1009 ce->NextChainOffset = 0;
1010 ce->Length = htole16(ntodo
1011 * sizeof(SGE_SIMPLE32));
1012 }
1013 ce->Address = htole32(req->req_pbuf +
1014 ((char *)se - (char *)mpt_req));
1015 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1016 for (i = 0; i < ntodo; i++, se++, seg++) {
1017 uint32_t tf;
1018
1019 memset(se, 0, sizeof(*se));
1020 se->Address = htole32(
1021 req->dmap->dm_segs[seg].ds_addr);
1022 MPI_pSGE_SET_LENGTH(se,
1023 req->dmap->dm_segs[seg].ds_len);
1024 tf = flags;
1025 if (i == ntodo - 1) {
1026 tf |=
1027 MPI_SGE_FLAGS_LAST_ELEMENT;
1028 if (ce->NextChainOffset == 0) {
1029 tf |=
1030 MPI_SGE_FLAGS_END_OF_LIST |
1031 MPI_SGE_FLAGS_END_OF_BUFFER;
1032 }
1033 }
1034 MPI_pSGE_SET_FLAGS(se, tf);
1035 se->FlagsLength =
1036 htole32(se->FlagsLength);
1037 nleft--;
1038 }
1039 }
1040 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1041 req->dmap->dm_mapsize,
1042 (xs->xs_control & XS_CTL_DATA_IN) ?
1043 BUS_DMASYNC_PREREAD
1044 : BUS_DMASYNC_PREWRITE);
1045 } else {
1046 int i;
1047 uint32_t flags;
1048
1049 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1050 if (xs->xs_control & XS_CTL_DATA_OUT)
1051 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1052
1053 /* Copy the segments into our SG list. */
1054 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1055 for (i = 0; i < req->dmap->dm_nsegs;
1056 i++, se++) {
1057 uint32_t tf;
1058
1059 memset(se, 0, sizeof(*se));
1060 se->Address =
1061 htole32(req->dmap->dm_segs[i].ds_addr);
1062 MPI_pSGE_SET_LENGTH(se,
1063 req->dmap->dm_segs[i].ds_len);
1064 tf = flags;
1065 if (i == req->dmap->dm_nsegs - 1) {
1066 tf |=
1067 MPI_SGE_FLAGS_LAST_ELEMENT |
1068 MPI_SGE_FLAGS_END_OF_BUFFER |
1069 MPI_SGE_FLAGS_END_OF_LIST;
1070 }
1071 MPI_pSGE_SET_FLAGS(se, tf);
1072 se->FlagsLength = htole32(se->FlagsLength);
1073 }
1074 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1075 req->dmap->dm_mapsize,
1076 (xs->xs_control & XS_CTL_DATA_IN) ?
1077 BUS_DMASYNC_PREREAD
1078 : BUS_DMASYNC_PREWRITE);
1079 }
1080 } else {
1081 /*
1082 * No data to transfer; just make a single simple SGL
1083 * with zero length.
1084 */
1085 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1086 memset(se, 0, sizeof(*se));
1087 MPI_pSGE_SET_FLAGS(se,
1088 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1089 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1090 se->FlagsLength = htole32(se->FlagsLength);
1091 }
1092
1093 if (mpt->verbose > 1)
1094 mpt_print_scsi_io_request(mpt_req);
1095
1096 if (xs->timeout == 0) {
1097 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1098 req->index);
1099 xs->timeout = 500;
1100 }
1101
1102 s = splbio();
1103 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1104 callout_reset(&xs->xs_callout,
1105 mstohz(xs->timeout), mpt_timeout, req);
1106 mpt_send_cmd(mpt, req);
1107 splx(s);
1108
1109 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1110 return;
1111
1112 /*
1113 * If we can't use interrupts, poll on completion.
1114 */
1115 if (mpt_poll(mpt, xs, xs->timeout))
1116 mpt_timeout(req);
1117 }
1118
1119 static void
1120 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1121 {
1122 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1123
1124 /*
1125 * Always allow disconnect; we don't have a way to disable
1126 * it right now, in any case.
1127 */
1128 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1129
1130 if (xm->xm_mode & PERIPH_CAP_TQING)
1131 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1132 else
1133 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1134
1135 if (mpt->is_scsi) {
1136 /*
1137 * SCSI transport settings only make any sense for
1138 * SCSI
1139 */
1140
1141 tmp = mpt->mpt_dev_page1[xm->xm_target];
1142
1143 /*
1144 * Set the wide/narrow parameter for the target.
1145 */
1146 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1147 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1148 else
1149 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1150
1151 /*
1152 * Set the synchronous parameters for the target.
1153 *
1154 * XXX If we request sync transfers, we just go ahead and
1155 * XXX request the maximum available. We need finer control
1156 * XXX in order to implement Domain Validation.
1157 */
1158 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1159 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1160 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1161 MPI_SCSIDEVPAGE1_RP_IU);
1162 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1163 int factor, offset, np;
1164
1165 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1166 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1167 np = 0;
1168 if (factor < 0x9) {
1169 /* Ultra320 */
1170 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1171 }
1172 if (factor < 0xa) {
1173 /* at least Ultra160 */
1174 np |= MPI_SCSIDEVPAGE1_RP_DT;
1175 }
1176 np |= (factor << 8) | (offset << 16);
1177 tmp.RequestedParameters |= np;
1178 }
1179
1180 host2mpt_config_page_scsi_device_1(&tmp);
1181 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1182 mpt_prt(mpt, "unable to write Device Page 1");
1183 return;
1184 }
1185
1186 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1187 mpt_prt(mpt, "unable to read back Device Page 1");
1188 return;
1189 }
1190
1191 mpt2host_config_page_scsi_device_1(&tmp);
1192 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1193 if (mpt->verbose > 1) {
1194 mpt_prt(mpt,
1195 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1196 xm->xm_target,
1197 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1198 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1199 }
1200 }
1201
1202 /*
1203 * Make a note that we should perform an async callback at the
1204 * end of the next successful command completion to report the
1205 * negotiated transfer mode.
1206 */
1207 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1208 }
1209
1210 static void
1211 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1212 {
1213 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1214 struct scsipi_xfer_mode xm;
1215 int period, offset;
1216
1217 tmp = mpt->mpt_dev_page0[periph->periph_target];
1218 host2mpt_config_page_scsi_device_0(&tmp);
1219 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1220 mpt_prt(mpt, "unable to read Device Page 0");
1221 return;
1222 }
1223 mpt2host_config_page_scsi_device_0(&tmp);
1224
1225 if (mpt->verbose > 1) {
1226 mpt_prt(mpt,
1227 "SPI Tgt %d Page 0: NParms %x Information %x",
1228 periph->periph_target,
1229 tmp.NegotiatedParameters, tmp.Information);
1230 }
1231
1232 xm.xm_target = periph->periph_target;
1233 xm.xm_mode = 0;
1234
1235 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1236 xm.xm_mode |= PERIPH_CAP_WIDE16;
1237
1238 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1239 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1240 if (offset) {
1241 xm.xm_period = period;
1242 xm.xm_offset = offset;
1243 xm.xm_mode |= PERIPH_CAP_SYNC;
1244 }
1245
1246 /*
1247 * Tagged queueing is all controlled by us; there is no
1248 * other setting to query.
1249 */
1250 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1251 xm.xm_mode |= PERIPH_CAP_TQING;
1252
1253 /*
1254 * We're going to deliver the async event, so clear the marker.
1255 */
1256 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1257
1258 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1259 }
1260
1261 static void
1262 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1263 {
1264 MSG_DEFAULT_REPLY *dmsg = vmsg;
1265
1266 switch (dmsg->Function) {
1267 case MPI_FUNCTION_EVENT_NOTIFICATION:
1268 mpt_event_notify_reply(mpt, vmsg);
1269 mpt_free_reply(mpt, (reply << 1));
1270 break;
1271
1272 case MPI_FUNCTION_EVENT_ACK:
1273 mpt_free_reply(mpt, (reply << 1));
1274 break;
1275
1276 case MPI_FUNCTION_PORT_ENABLE:
1277 {
1278 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1279 int index = le32toh(msg->MsgContext) & ~0x80000000;
1280 if (mpt->verbose > 1)
1281 mpt_prt(mpt, "enable port reply index %d", index);
1282 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1283 request_t *req = &mpt->request_pool[index];
1284 req->debug = REQ_DONE;
1285 }
1286 mpt_free_reply(mpt, (reply << 1));
1287 break;
1288 }
1289
1290 case MPI_FUNCTION_CONFIG:
1291 {
1292 MSG_CONFIG_REPLY *msg = vmsg;
1293 int index = le32toh(msg->MsgContext) & ~0x80000000;
1294 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1295 request_t *req = &mpt->request_pool[index];
1296 req->debug = REQ_DONE;
1297 req->sequence = reply;
1298 } else
1299 mpt_free_reply(mpt, (reply << 1));
1300 break;
1301 }
1302
1303 default:
1304 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1305 }
1306 }
1307
1308 static void
1309 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1310 {
1311
1312 switch (le32toh(msg->Event)) {
1313 case MPI_EVENT_LOG_DATA:
1314 {
1315 int i;
1316
1317 /* Some error occurrerd that the Fusion wants logged. */
1318 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1319 mpt_prt(mpt, "EvtLogData: Event Data:");
1320 for (i = 0; i < msg->EventDataLength; i++) {
1321 if ((i % 4) == 0)
1322 printf("%s:\t", device_xname(mpt->sc_dev));
1323 printf("0x%08x%c", msg->Data[i],
1324 ((i % 4) == 3) ? '\n' : ' ');
1325 }
1326 if ((i % 4) != 0)
1327 printf("\n");
1328 break;
1329 }
1330
1331 case MPI_EVENT_UNIT_ATTENTION:
1332 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1333 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1334 break;
1335
1336 case MPI_EVENT_IOC_BUS_RESET:
1337 /* We generated a bus reset. */
1338 mpt_prt(mpt, "IOC Bus Reset Port %d",
1339 (msg->Data[0] >> 8) & 0xff);
1340 break;
1341
1342 case MPI_EVENT_EXT_BUS_RESET:
1343 /* Someone else generated a bus reset. */
1344 mpt_prt(mpt, "External Bus Reset");
1345 /*
1346 * These replies don't return EventData like the MPI
1347 * spec says they do.
1348 */
1349 /* XXX Send an async event? */
1350 break;
1351
1352 case MPI_EVENT_RESCAN:
1353 /*
1354 * In general, thise means a device has been added
1355 * to the loop.
1356 */
1357 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1358 /* XXX Send an async event? */
1359 break;
1360
1361 case MPI_EVENT_LINK_STATUS_CHANGE:
1362 mpt_prt(mpt, "Port %d: Link state %s",
1363 (msg->Data[1] >> 8) & 0xff,
1364 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1365 break;
1366
1367 case MPI_EVENT_LOOP_STATE_CHANGE:
1368 switch ((msg->Data[0] >> 16) & 0xff) {
1369 case 0x01:
1370 mpt_prt(mpt,
1371 "Port %d: FC Link Event: LIP(%02x,%02x) "
1372 "(Loop Initialization)",
1373 (msg->Data[1] >> 8) & 0xff,
1374 (msg->Data[0] >> 8) & 0xff,
1375 (msg->Data[0] ) & 0xff);
1376 switch ((msg->Data[0] >> 8) & 0xff) {
1377 case 0xf7:
1378 if ((msg->Data[0] & 0xff) == 0xf7)
1379 mpt_prt(mpt, "\tDevice needs AL_PA");
1380 else
1381 mpt_prt(mpt, "\tDevice %02x doesn't "
1382 "like FC performance",
1383 msg->Data[0] & 0xff);
1384 break;
1385
1386 case 0xf8:
1387 if ((msg->Data[0] & 0xff) == 0xf7)
1388 mpt_prt(mpt, "\tDevice detected loop "
1389 "failure before acquiring AL_PA");
1390 else
1391 mpt_prt(mpt, "\tDevice %02x detected "
1392 "loop failure",
1393 msg->Data[0] & 0xff);
1394 break;
1395
1396 default:
1397 mpt_prt(mpt, "\tDevice %02x requests that "
1398 "device %02x reset itself",
1399 msg->Data[0] & 0xff,
1400 (msg->Data[0] >> 8) & 0xff);
1401 break;
1402 }
1403 break;
1404
1405 case 0x02:
1406 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1407 "(Loop Port Enable)",
1408 (msg->Data[1] >> 8) & 0xff,
1409 (msg->Data[0] >> 8) & 0xff,
1410 (msg->Data[0] ) & 0xff);
1411 break;
1412
1413 case 0x03:
1414 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1415 "(Loop Port Bypass)",
1416 (msg->Data[1] >> 8) & 0xff,
1417 (msg->Data[0] >> 8) & 0xff,
1418 (msg->Data[0] ) & 0xff);
1419 break;
1420
1421 default:
1422 mpt_prt(mpt, "Port %d: FC Link Event: "
1423 "Unknown event (%02x %02x %02x)",
1424 (msg->Data[1] >> 8) & 0xff,
1425 (msg->Data[0] >> 16) & 0xff,
1426 (msg->Data[0] >> 8) & 0xff,
1427 (msg->Data[0] ) & 0xff);
1428 break;
1429 }
1430 break;
1431
1432 case MPI_EVENT_LOGOUT:
1433 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1434 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1435 break;
1436
1437 case MPI_EVENT_EVENT_CHANGE:
1438 /*
1439 * This is just an acknowledgement of our
1440 * mpt_send_event_request().
1441 */
1442 break;
1443
1444 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1445 switch ((msg->Data[0] >> 12) & 0x0f) {
1446 case 0x00:
1447 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1448 msg->Data[0] & 0xff);
1449 break;
1450 case 0x01:
1451 mpt_prt(mpt, "Phy %d: Link Disabled",
1452 msg->Data[0] & 0xff);
1453 break;
1454 case 0x02:
1455 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1456 msg->Data[0] & 0xff);
1457 break;
1458 case 0x03:
1459 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1460 msg->Data[0] & 0xff);
1461 break;
1462 case 0x08:
1463 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1464 msg->Data[0] & 0xff);
1465 break;
1466 case 0x09:
1467 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1468 msg->Data[0] & 0xff);
1469 break;
1470 default:
1471 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1472 "Unknown event (%0x)",
1473 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1474 }
1475 break;
1476
1477 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1478 case MPI_EVENT_SAS_DISCOVERY:
1479 /* ignore these events for now */
1480 break;
1481
1482 case MPI_EVENT_QUEUE_FULL:
1483 /* This can get a little chatty */
1484 if (mpt->verbose > 0)
1485 mpt_prt(mpt, "Queue Full Event");
1486 break;
1487
1488 default:
1489 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1490 break;
1491 }
1492
1493 if (msg->AckRequired) {
1494 MSG_EVENT_ACK *ackp;
1495 request_t *req;
1496
1497 if ((req = mpt_get_request(mpt)) == NULL) {
1498 /* XXX XXX XXX XXXJRT */
1499 panic("mpt_event_notify_reply: unable to allocate "
1500 "request structure");
1501 }
1502
1503 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1504 memset(ackp, 0, sizeof(*ackp));
1505 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1506 ackp->Event = msg->Event;
1507 ackp->EventContext = msg->EventContext;
1508 ackp->MsgContext = htole32(req->index | 0x80000000);
1509 mpt_check_doorbell(mpt);
1510 mpt_send_cmd(mpt, req);
1511 }
1512 }
1513
1514 static void
1515 mpt_bus_reset(mpt_softc_t *mpt)
1516 {
1517 request_t *req;
1518 MSG_SCSI_TASK_MGMT *mngt_req;
1519 int s;
1520
1521 s = splbio();
1522 if (mpt->mngt_req) {
1523 /* request already queued; can't do more */
1524 splx(s);
1525 return;
1526 }
1527 req = mpt_get_request(mpt);
1528 if (__predict_false(req == NULL)) {
1529 mpt_prt(mpt, "no mngt request\n");
1530 splx(s);
1531 return;
1532 }
1533 mpt->mngt_req = req;
1534 splx(s);
1535 mngt_req = req->req_vbuf;
1536 memset(mngt_req, 0, sizeof(*mngt_req));
1537 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1538 mngt_req->Bus = mpt->bus;
1539 mngt_req->TargetID = 0;
1540 mngt_req->ChainOffset = 0;
1541 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1542 mngt_req->Reserved1 = 0;
1543 mngt_req->MsgFlags =
1544 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1545 mngt_req->MsgContext = req->index;
1546 mngt_req->TaskMsgContext = 0;
1547 s = splbio();
1548 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1549 splx(s);
1550 }
1551
1552 /*****************************************************************************
1553 * SCSI interface routines
1554 *****************************************************************************/
1555
1556 static void
1557 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1558 void *arg)
1559 {
1560 struct scsipi_adapter *adapt = chan->chan_adapter;
1561 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1562
1563 switch (req) {
1564 case ADAPTER_REQ_RUN_XFER:
1565 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1566 return;
1567
1568 case ADAPTER_REQ_GROW_RESOURCES:
1569 /* Not supported. */
1570 return;
1571
1572 case ADAPTER_REQ_SET_XFER_MODE:
1573 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1574 return;
1575 }
1576 }
1577
1578 static void
1579 mpt_minphys(struct buf *bp)
1580 {
1581
1582 /*
1583 * Subtract one from the SGL limit, since we need an extra one to handle
1584 * an non-page-aligned transfer.
1585 */
1586 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1587
1588 if (bp->b_bcount > MPT_MAX_XFER)
1589 bp->b_bcount = MPT_MAX_XFER;
1590 minphys(bp);
1591 }
1592
1593 static int
1594 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1595 int flag, struct proc *p)
1596 {
1597 mpt_softc_t *mpt;
1598 int s;
1599
1600 mpt = device_private(chan->chan_adapter->adapt_dev);
1601 switch (cmd) {
1602 case SCBUSIORESET:
1603 mpt_bus_reset(mpt);
1604 s = splbio();
1605 mpt_intr(mpt);
1606 splx(s);
1607 return(0);
1608 default:
1609 return (ENOTTY);
1610 }
1611 }
1612
1613 #if NBIO > 0
1614 static fCONFIG_PAGE_IOC_2 *
1615 mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1616 {
1617 fCONFIG_PAGE_HEADER hdr;
1618 fCONFIG_PAGE_IOC_2 *ioc2;
1619 int rv;
1620
1621 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1622 if (rv)
1623 return NULL;
1624
1625 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1626 if (ioc2 == NULL)
1627 return NULL;
1628
1629 memcpy(ioc2, &hdr, sizeof(hdr));
1630
1631 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1632 if (rv)
1633 goto fail;
1634 mpt2host_config_page_ioc_2(ioc2);
1635
1636 return ioc2;
1637
1638 fail:
1639 free(ioc2, M_DEVBUF);
1640 return NULL;
1641 }
1642
1643 static fCONFIG_PAGE_IOC_3 *
1644 mpt_get_cfg_page_ioc3(mpt_softc_t *mpt)
1645 {
1646 fCONFIG_PAGE_HEADER hdr;
1647 fCONFIG_PAGE_IOC_3 *ioc3;
1648 int rv;
1649
1650 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr);
1651 if (rv)
1652 return NULL;
1653
1654 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1655 if (ioc3 == NULL)
1656 return NULL;
1657
1658 memcpy(ioc3, &hdr, sizeof(hdr));
1659
1660 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header);
1661 if (rv)
1662 goto fail;
1663
1664 return ioc3;
1665
1666 fail:
1667 free(ioc3, M_DEVBUF);
1668 return NULL;
1669 }
1670
1671
1672 static fCONFIG_PAGE_RAID_VOL_0 *
1673 mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1674 {
1675 fCONFIG_PAGE_HEADER hdr;
1676 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1677 int rv;
1678
1679 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1680 address, &hdr);
1681 if (rv)
1682 return NULL;
1683
1684 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1685 if (rvol0 == NULL)
1686 return NULL;
1687
1688 memcpy(rvol0, &hdr, sizeof(hdr));
1689
1690 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1691 if (rv)
1692 goto fail;
1693 mpt2host_config_page_raid_vol_0(rvol0);
1694
1695 return rvol0;
1696
1697 fail:
1698 free(rvol0, M_DEVBUF);
1699 return NULL;
1700 }
1701
1702 static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
1703 mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1704 {
1705 fCONFIG_PAGE_HEADER hdr;
1706 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1707 int rv;
1708
1709 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1710 address, &hdr);
1711 if (rv)
1712 return NULL;
1713
1714 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1715 if (physdisk0 == NULL)
1716 return NULL;
1717
1718 memcpy(physdisk0, &hdr, sizeof(hdr));
1719
1720 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1721 if (rv)
1722 goto fail;
1723 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1724
1725 return physdisk0;
1726
1727 fail:
1728 free(physdisk0, M_DEVBUF);
1729 return NULL;
1730 }
1731
1732 static bool
1733 mpt_is_raid(mpt_softc_t *mpt)
1734 {
1735 fCONFIG_PAGE_IOC_2 *ioc2;
1736 bool is_raid = false;
1737
1738 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1739 if (ioc2 == NULL)
1740 return false;
1741
1742 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1743 is_raid = !!(ioc2->CapabilitiesFlags &
1744 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1745 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1746 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1747 }
1748
1749 free(ioc2, M_DEVBUF);
1750
1751 return is_raid;
1752 }
1753
1754 static int
1755 mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1756 {
1757 mpt_softc_t *mpt = device_private(dev);
1758 int error, s;
1759
1760 KERNEL_LOCK(1, curlwp);
1761 s = splbio();
1762
1763 switch (cmd) {
1764 case BIOCINQ:
1765 error = mpt_bio_ioctl_inq(mpt, addr);
1766 break;
1767 case BIOCVOL:
1768 error = mpt_bio_ioctl_vol(mpt, addr);
1769 break;
1770 case BIOCDISK_NOVOL:
1771 error = mpt_bio_ioctl_disk_novol(mpt, addr);
1772 break;
1773 case BIOCDISK:
1774 error = mpt_bio_ioctl_disk(mpt, addr);
1775 break;
1776 case BIOCSETSTATE:
1777 error = mpt_bio_ioctl_setstate(mpt, addr);
1778 break;
1779 default:
1780 error = EINVAL;
1781 break;
1782 }
1783
1784 splx(s);
1785 KERNEL_UNLOCK_ONE(curlwp);
1786
1787 return error;
1788 }
1789
1790 static int
1791 mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1792 {
1793 fCONFIG_PAGE_IOC_2 *ioc2;
1794 fCONFIG_PAGE_IOC_3 *ioc3;
1795
1796 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1797 if (ioc2 == NULL)
1798 return EIO;
1799 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1800 if (ioc3 == NULL) {
1801 free(ioc2, M_DEVBUF);
1802 return EIO;
1803 }
1804
1805 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev));
1806 bi->bi_novol = ioc2->NumActiveVolumes;
1807 bi->bi_nodisk = ioc3->NumPhysDisks;
1808
1809 free(ioc2, M_DEVBUF);
1810 free(ioc3, M_DEVBUF);
1811
1812 return 0;
1813 }
1814
1815 static int
1816 mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1817 {
1818 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1819 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1820 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1821 struct scsipi_periph *periph;
1822 struct scsipi_inquiry_data inqbuf;
1823 char vendor[9], product[17], revision[5];
1824 int address;
1825
1826 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1827 if (ioc2 == NULL)
1828 return EIO;
1829
1830 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1831 goto fail;
1832
1833 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1834 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1835
1836 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1837 if (rvol0 == NULL)
1838 goto fail;
1839
1840 bv->bv_dev[0] = '\0';
1841 bv->bv_vendor[0] = '\0';
1842
1843 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0);
1844 if (periph != NULL) {
1845 if (periph->periph_dev != NULL) {
1846 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1847 device_xname(periph->periph_dev));
1848 }
1849 memset(&inqbuf, 0, sizeof(inqbuf));
1850 if (scsipi_inquire(periph, &inqbuf,
1851 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) {
1852 scsipi_strvis(vendor, sizeof(vendor),
1853 inqbuf.vendor, sizeof(inqbuf.vendor));
1854 scsipi_strvis(product, sizeof(product),
1855 inqbuf.product, sizeof(inqbuf.product));
1856 scsipi_strvis(revision, sizeof(revision),
1857 inqbuf.revision, sizeof(inqbuf.revision));
1858
1859 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor),
1860 "%s %s %s", vendor, product, revision);
1861 }
1862
1863 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1864 device_xname(periph->periph_dev));
1865 }
1866 bv->bv_nodisk = rvol0->NumPhysDisks;
1867 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1868 bv->bv_stripe_size = rvol0->StripeSize;
1869 bv->bv_percent = -1;
1870 bv->bv_seconds = 0;
1871
1872 switch (rvol0->VolumeStatus.State) {
1873 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1874 bv->bv_status = BIOC_SVONLINE;
1875 break;
1876 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1877 bv->bv_status = BIOC_SVDEGRADED;
1878 break;
1879 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1880 bv->bv_status = BIOC_SVOFFLINE;
1881 break;
1882 default:
1883 bv->bv_status = BIOC_SVINVALID;
1884 break;
1885 }
1886
1887 switch (ioc2rvol->VolumeType) {
1888 case MPI_RAID_VOL_TYPE_IS:
1889 bv->bv_level = 0;
1890 break;
1891 case MPI_RAID_VOL_TYPE_IME:
1892 case MPI_RAID_VOL_TYPE_IM:
1893 bv->bv_level = 1;
1894 break;
1895 default:
1896 bv->bv_level = -1;
1897 break;
1898 }
1899
1900 free(ioc2, M_DEVBUF);
1901 free(rvol0, M_DEVBUF);
1902
1903 return 0;
1904
1905 fail:
1906 if (ioc2) free(ioc2, M_DEVBUF);
1907 if (rvol0) free(rvol0, M_DEVBUF);
1908 return EINVAL;
1909 }
1910
1911 static void
1912 mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd,
1913 int address)
1914 {
1915 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1916 char vendor_id[9], product_id[17], product_rev_level[5];
1917
1918 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1919 if (phys == NULL)
1920 return;
1921
1922 scsipi_strvis(vendor_id, sizeof(vendor_id),
1923 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID));
1924 scsipi_strvis(product_id, sizeof(product_id),
1925 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID));
1926 scsipi_strvis(product_rev_level, sizeof(product_rev_level),
1927 phys->InquiryData.ProductRevLevel,
1928 sizeof(phys->InquiryData.ProductRevLevel));
1929
1930 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s",
1931 vendor_id, product_id, product_rev_level);
1932 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1933 bd->bd_procdev[0] = '\0';
1934 bd->bd_channel = phys->PhysDiskBus;
1935 bd->bd_target = phys->PhysDiskID;
1936 bd->bd_lun = 0;
1937 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1938
1939 switch (phys->PhysDiskStatus.State) {
1940 case MPI_PHYSDISK0_STATUS_ONLINE:
1941 bd->bd_status = BIOC_SDONLINE;
1942 break;
1943 case MPI_PHYSDISK0_STATUS_MISSING:
1944 case MPI_PHYSDISK0_STATUS_FAILED:
1945 bd->bd_status = BIOC_SDFAILED;
1946 break;
1947 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1948 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1949 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1950 bd->bd_status = BIOC_SDOFFLINE;
1951 break;
1952 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1953 bd->bd_status = BIOC_SDSCRUB;
1954 break;
1955 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1956 default:
1957 bd->bd_status = BIOC_SDINVALID;
1958 break;
1959 }
1960
1961 free(phys, M_DEVBUF);
1962 }
1963
1964 static int
1965 mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd)
1966 {
1967 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1968 fCONFIG_PAGE_IOC_3 *ioc3 = NULL;
1969 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1970 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1971 int address, v, d;
1972
1973 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1974 if (ioc2 == NULL)
1975 return EIO;
1976 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1977 if (ioc3 == NULL) {
1978 free(ioc2, M_DEVBUF);
1979 return EIO;
1980 }
1981
1982 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks)
1983 goto fail;
1984
1985 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum;
1986
1987 mpt_bio_ioctl_disk_common(mpt, bd, address);
1988
1989 bd->bd_disknovol = true;
1990 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) {
1991 ioc2rvol = &ioc2->RaidVolume[v];
1992 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1993
1994 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1995 if (rvol0 == NULL)
1996 continue;
1997
1998 for (d = 0; d < rvol0->NumPhysDisks; d++) {
1999 if (rvol0->PhysDisk[d].PhysDiskNum ==
2000 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) {
2001 bd->bd_disknovol = false;
2002 bd->bd_volid = v;
2003 break;
2004 }
2005 }
2006 free(rvol0, M_DEVBUF);
2007 }
2008
2009 free(ioc3, M_DEVBUF);
2010 free(ioc2, M_DEVBUF);
2011
2012 return 0;
2013
2014 fail:
2015 if (ioc3) free(ioc3, M_DEVBUF);
2016 if (ioc2) free(ioc2, M_DEVBUF);
2017 return EINVAL;
2018 }
2019
2020
2021 static int
2022 mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
2023 {
2024 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
2025 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
2026 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
2027 int address;
2028
2029 ioc2 = mpt_get_cfg_page_ioc2(mpt);
2030 if (ioc2 == NULL)
2031 return EIO;
2032
2033 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
2034 goto fail;
2035
2036 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
2037 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2038
2039 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2040 if (rvol0 == NULL)
2041 goto fail;
2042
2043 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
2044 goto fail;
2045
2046 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
2047
2048 mpt_bio_ioctl_disk_common(mpt, bd, address);
2049
2050 free(ioc2, M_DEVBUF);
2051
2052 return 0;
2053
2054 fail:
2055 if (ioc2) free(ioc2, M_DEVBUF);
2056 return EINVAL;
2057 }
2058
2059 static int
2060 mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs)
2061 {
2062 return ENOTTY;
2063 }
2064 #endif
2065
2066