mpt_netbsd.c revision 1.17.2.2 1 /* $NetBSD: mpt_netbsd.c,v 1.17.2.2 2014/11/03 22:05:37 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.17.2.2 2014/11/03 22:05:37 msaitoh Exp $");
81
82 #include "bio.h"
83
84 #include <dev/ic/mpt.h> /* pulls in all headers */
85
86 #if NBIO > 0
87 #include <dev/biovar.h>
88 #include <sys/ioccom.h>
89 #endif
90
91 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92 static void mpt_timeout(void *);
93 static void mpt_done(mpt_softc_t *, uint32_t);
94 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
95 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
96 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
97 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
98 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
99
100 static void mpt_scsipi_request(struct scsipi_channel *,
101 scsipi_adapter_req_t, void *);
102 static void mpt_minphys(struct buf *);
103
104 #if NBIO > 0
105 static bool mpt_is_raid(mpt_softc_t *);
106 static int mpt_bio_ioctl(device_t, u_long, void *);
107 static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
108 static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
109 static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
110 static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *);
111 static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *);
112 #endif
113
114 void
115 mpt_scsipi_attach(mpt_softc_t *mpt)
116 {
117 struct scsipi_adapter *adapt = &mpt->sc_adapter;
118 struct scsipi_channel *chan = &mpt->sc_channel;
119 int maxq;
120
121 mpt->bus = 0; /* XXX ?? */
122
123 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
124 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
125
126 /* Fill in the scsipi_adapter. */
127 memset(adapt, 0, sizeof(*adapt));
128 adapt->adapt_dev = &mpt->sc_dev;
129 adapt->adapt_nchannels = 1;
130 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
131 adapt->adapt_max_periph = maxq - 2;
132 adapt->adapt_request = mpt_scsipi_request;
133 adapt->adapt_minphys = mpt_minphys;
134
135 /* Fill in the scsipi_channel. */
136 memset(chan, 0, sizeof(*chan));
137 chan->chan_adapter = adapt;
138 if (mpt->is_sas) {
139 chan->chan_bustype = &scsi_sas_bustype;
140 } else if (mpt->is_fc) {
141 chan->chan_bustype = &scsi_fc_bustype;
142 } else {
143 chan->chan_bustype = &scsi_bustype;
144 }
145 chan->chan_channel = 0;
146 chan->chan_flags = 0;
147 chan->chan_nluns = 8;
148 chan->chan_ntargets = mpt->mpt_max_devices;
149 chan->chan_id = mpt->mpt_ini_id;
150
151 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint);
152
153 #if NBIO > 0
154 if (mpt_is_raid(mpt)) {
155 if (bio_register(&mpt->sc_dev, mpt_bio_ioctl) != 0)
156 panic("%s: controller registration failed",
157 device_xname(&mpt->sc_dev));
158 }
159 #endif
160 }
161
162 int
163 mpt_dma_mem_alloc(mpt_softc_t *mpt)
164 {
165 bus_dma_segment_t reply_seg, request_seg;
166 int reply_rseg, request_rseg;
167 bus_addr_t pptr, end;
168 char *vptr;
169 size_t len;
170 int error, i;
171
172 /* Check if we have already allocated the reply memory. */
173 if (mpt->reply != NULL)
174 return (0);
175
176 /*
177 * Allocate the request pool. This isn't really DMA'd memory,
178 * but it's a convenient place to do it.
179 */
180 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
181 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
182 if (mpt->request_pool == NULL) {
183 aprint_error_dev(&mpt->sc_dev, "unable to allocate request pool\n");
184 return (ENOMEM);
185 }
186
187 /*
188 * Allocate DMA resources for reply buffers.
189 */
190 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
191 &reply_seg, 1, &reply_rseg, 0);
192 if (error) {
193 aprint_error_dev(&mpt->sc_dev, "unable to allocate reply area, error = %d\n",
194 error);
195 goto fail_0;
196 }
197
198 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
199 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
200 if (error) {
201 aprint_error_dev(&mpt->sc_dev, "unable to map reply area, error = %d\n",
202 error);
203 goto fail_1;
204 }
205
206 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
207 0, 0, &mpt->reply_dmap);
208 if (error) {
209 aprint_error_dev(&mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
210 error);
211 goto fail_2;
212 }
213
214 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
215 PAGE_SIZE, NULL, 0);
216 if (error) {
217 aprint_error_dev(&mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
218 error);
219 goto fail_3;
220 }
221 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
222
223 /*
224 * Allocate DMA resources for request buffers.
225 */
226 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
227 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
228 if (error) {
229 aprint_error_dev(&mpt->sc_dev, "unable to allocate request area, "
230 "error = %d\n", error);
231 goto fail_4;
232 }
233
234 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
235 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
236 if (error) {
237 aprint_error_dev(&mpt->sc_dev, "unable to map request area, error = %d\n",
238 error);
239 goto fail_5;
240 }
241
242 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
243 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
244 if (error) {
245 aprint_error_dev(&mpt->sc_dev, "unable to create request DMA map, "
246 "error = %d\n", error);
247 goto fail_6;
248 }
249
250 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
251 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
252 if (error) {
253 aprint_error_dev(&mpt->sc_dev, "unable to load request DMA map, error = %d\n",
254 error);
255 goto fail_7;
256 }
257 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
258
259 pptr = mpt->request_phys;
260 vptr = (void *) mpt->request;
261 end = pptr + MPT_REQ_MEM_SIZE(mpt);
262
263 for (i = 0; pptr < end; i++) {
264 request_t *req = &mpt->request_pool[i];
265 req->index = i;
266
267 /* Store location of Request Data */
268 req->req_pbuf = pptr;
269 req->req_vbuf = vptr;
270
271 pptr += MPT_REQUEST_AREA;
272 vptr += MPT_REQUEST_AREA;
273
274 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
275 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
276
277 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
278 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
279 if (error) {
280 aprint_error_dev(&mpt->sc_dev, "unable to create req %d DMA map, "
281 "error = %d\n", i, error);
282 goto fail_8;
283 }
284 }
285
286 return (0);
287
288 fail_8:
289 for (--i; i >= 0; i--) {
290 request_t *req = &mpt->request_pool[i];
291 if (req->dmap != NULL)
292 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
293 }
294 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
295 fail_7:
296 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
297 fail_6:
298 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
299 fail_5:
300 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
301 fail_4:
302 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
303 fail_3:
304 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
305 fail_2:
306 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
307 fail_1:
308 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
309 fail_0:
310 free(mpt->request_pool, M_DEVBUF);
311
312 mpt->reply = NULL;
313 mpt->request = NULL;
314 mpt->request_pool = NULL;
315
316 return (error);
317 }
318
319 int
320 mpt_intr(void *arg)
321 {
322 mpt_softc_t *mpt = arg;
323 int nrepl = 0;
324 uint32_t reply;
325
326 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
327 return (0);
328
329 reply = mpt_pop_reply_queue(mpt);
330 while (reply != MPT_REPLY_EMPTY) {
331 nrepl++;
332 if (mpt->verbose > 1) {
333 if ((reply & MPT_CONTEXT_REPLY) != 0) {
334 /* Address reply; IOC has something to say */
335 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
336 } else {
337 /* Context reply; all went well */
338 mpt_prt(mpt, "context %u reply OK", reply);
339 }
340 }
341 mpt_done(mpt, reply);
342 reply = mpt_pop_reply_queue(mpt);
343 }
344 return (nrepl != 0);
345 }
346
347 void
348 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
349 {
350 va_list ap;
351
352 printf("%s: ", device_xname(&mpt->sc_dev));
353 va_start(ap, fmt);
354 vprintf(fmt, ap);
355 va_end(ap);
356 printf("\n");
357 }
358
359 static int
360 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
361 {
362
363 /* Timeouts are in msec, so we loop in 1000usec cycles */
364 while (count) {
365 mpt_intr(mpt);
366 if (xs->xs_status & XS_STS_DONE)
367 return (0);
368 delay(1000); /* only happens in boot, so ok */
369 count--;
370 }
371 return (1);
372 }
373
374 static void
375 mpt_timeout(void *arg)
376 {
377 request_t *req = arg;
378 struct scsipi_xfer *xs = req->xfer;
379 struct scsipi_periph *periph = xs->xs_periph;
380 mpt_softc_t *mpt =
381 (void *) periph->periph_channel->chan_adapter->adapt_dev;
382 uint32_t oseq;
383 int s;
384
385 scsipi_printaddr(periph);
386 printf("command timeout\n");
387
388 s = splbio();
389
390 oseq = req->sequence;
391 mpt->timeouts++;
392 if (mpt_intr(mpt)) {
393 if (req->sequence != oseq) {
394 mpt_prt(mpt, "recovered from command timeout");
395 splx(s);
396 return;
397 }
398 }
399 mpt_prt(mpt,
400 "timeout on request index = 0x%x, seq = 0x%08x",
401 req->index, req->sequence);
402 mpt_check_doorbell(mpt);
403 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
404 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
405 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
406 mpt_read(mpt, MPT_OFFSET_DOORBELL));
407 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
408 if (mpt->verbose > 1)
409 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
410
411 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
412 req->xfer = NULL;
413 mpt_free_request(mpt, req);
414
415 xs->error = XS_TIMEOUT;
416 scsipi_done(xs);
417
418 splx(s);
419 }
420
421 static void
422 mpt_done(mpt_softc_t *mpt, uint32_t reply)
423 {
424 struct scsipi_xfer *xs = NULL;
425 struct scsipi_periph *periph;
426 int index;
427 request_t *req;
428 MSG_REQUEST_HEADER *mpt_req;
429 MSG_SCSI_IO_REPLY *mpt_reply;
430
431 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
432 /* context reply (ok) */
433 mpt_reply = NULL;
434 index = reply & MPT_CONTEXT_MASK;
435 } else {
436 /* address reply (error) */
437
438 /* XXX BUS_DMASYNC_POSTREAD XXX */
439 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
440 if (mpt->verbose > 1) {
441 uint32_t *pReply = (uint32_t *) mpt_reply;
442
443 mpt_prt(mpt, "Address Reply (index %u):",
444 le32toh(mpt_reply->MsgContext) & 0xffff);
445 mpt_prt(mpt, "%08x %08x %08x %08x",
446 pReply[0], pReply[1], pReply[2], pReply[3]);
447 mpt_prt(mpt, "%08x %08x %08x %08x",
448 pReply[4], pReply[5], pReply[6], pReply[7]);
449 mpt_prt(mpt, "%08x %08x %08x %08x",
450 pReply[8], pReply[9], pReply[10], pReply[11]);
451 }
452 index = le32toh(mpt_reply->MsgContext);
453 }
454
455 /*
456 * Address reply with MessageContext high bit set.
457 * This is most likely a notify message, so we try
458 * to process it, then free it.
459 */
460 if (__predict_false((index & 0x80000000) != 0)) {
461 if (mpt_reply != NULL)
462 mpt_ctlop(mpt, mpt_reply, reply);
463 else
464 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index);
465 return;
466 }
467
468 /* Did we end up with a valid index into the table? */
469 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
470 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index);
471 return;
472 }
473
474 req = &mpt->request_pool[index];
475
476 /* Make sure memory hasn't been trashed. */
477 if (__predict_false(req->index != index)) {
478 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index);
479 return;
480 }
481
482 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
483 mpt_req = req->req_vbuf;
484
485 /* Short cut for task management replies; nothing more for us to do. */
486 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
487 if (mpt->verbose > 1)
488 mpt_prt(mpt, "mpt_done: TASK MGMT");
489 goto done;
490 }
491
492 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
493 goto done;
494
495 /*
496 * At this point, it had better be a SCSI I/O command, but don't
497 * crash if it isn't.
498 */
499 if (__predict_false(mpt_req->Function !=
500 MPI_FUNCTION_SCSI_IO_REQUEST)) {
501 if (mpt->verbose > 1)
502 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)",
503 mpt_req->Function, index);
504 goto done;
505 }
506
507 /* Recover scsipi_xfer from the request structure. */
508 xs = req->xfer;
509
510 /* Can't have a SCSI command without a scsipi_xfer. */
511 if (__predict_false(xs == NULL)) {
512 mpt_prt(mpt,
513 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
514 req->index, req->sequence);
515 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
516 mpt_prt(mpt, "mpt_request:");
517 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
518
519 if (mpt_reply != NULL) {
520 mpt_prt(mpt, "mpt_reply:");
521 mpt_print_reply(mpt_reply);
522 } else {
523 mpt_prt(mpt, "context reply: 0x%08x", reply);
524 }
525 goto done;
526 }
527
528 callout_stop(&xs->xs_callout);
529
530 periph = xs->xs_periph;
531
532 /*
533 * If we were a data transfer, unload the map that described
534 * the data buffer.
535 */
536 if (__predict_true(xs->datalen != 0)) {
537 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
538 req->dmap->dm_mapsize,
539 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
540 : BUS_DMASYNC_POSTWRITE);
541 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
542 }
543
544 if (__predict_true(mpt_reply == NULL)) {
545 /*
546 * Context reply; report that the command was
547 * successful!
548 *
549 * Also report the xfer mode, if necessary.
550 */
551 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
552 if ((mpt->mpt_report_xfer_mode &
553 (1 << periph->periph_target)) != 0)
554 mpt_get_xfer_mode(mpt, periph);
555 }
556 xs->error = XS_NOERROR;
557 xs->status = SCSI_OK;
558 xs->resid = 0;
559 mpt_free_request(mpt, req);
560 scsipi_done(xs);
561 return;
562 }
563
564 xs->status = mpt_reply->SCSIStatus;
565 switch (le16toh(mpt_reply->IOCStatus)) {
566 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
567 xs->error = XS_DRIVER_STUFFUP;
568 break;
569
570 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
571 /*
572 * Yikes! Tagged queue full comes through this path!
573 *
574 * So we'll change it to a status error and anything
575 * that returns status should probably be a status
576 * error as well.
577 */
578 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
579 if (mpt_reply->SCSIState &
580 MPI_SCSI_STATE_NO_SCSI_STATUS) {
581 xs->error = XS_DRIVER_STUFFUP;
582 break;
583 }
584 /* FALLTHROUGH */
585 case MPI_IOCSTATUS_SUCCESS:
586 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
587 switch (xs->status) {
588 case SCSI_OK:
589 /* Report the xfer mode, if necessary. */
590 if ((mpt->mpt_report_xfer_mode &
591 (1 << periph->periph_target)) != 0)
592 mpt_get_xfer_mode(mpt, periph);
593 xs->resid = 0;
594 break;
595
596 case SCSI_CHECK:
597 xs->error = XS_SENSE;
598 break;
599
600 case SCSI_BUSY:
601 case SCSI_QUEUE_FULL:
602 xs->error = XS_BUSY;
603 break;
604
605 default:
606 scsipi_printaddr(periph);
607 printf("invalid status code %d\n", xs->status);
608 xs->error = XS_DRIVER_STUFFUP;
609 break;
610 }
611 break;
612
613 case MPI_IOCSTATUS_BUSY:
614 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
615 xs->error = XS_RESOURCE_SHORTAGE;
616 break;
617
618 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
619 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
620 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
621 xs->error = XS_SELTIMEOUT;
622 break;
623
624 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
625 xs->error = XS_DRIVER_STUFFUP;
626 break;
627
628 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
629 /* XXX What should we do here? */
630 break;
631
632 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
633 /* XXX */
634 xs->error = XS_DRIVER_STUFFUP;
635 break;
636
637 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
638 /* XXX */
639 xs->error = XS_DRIVER_STUFFUP;
640 break;
641
642 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
643 /* XXX This is a bus-reset */
644 xs->error = XS_DRIVER_STUFFUP;
645 break;
646
647 default:
648 /* XXX unrecognized HBA error */
649 xs->error = XS_DRIVER_STUFFUP;
650 break;
651 }
652
653 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
654 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
655 sizeof(xs->sense.scsi_sense));
656 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
657 /*
658 * This will cause the scsipi layer to issue
659 * a REQUEST SENSE.
660 */
661 if (xs->status == SCSI_CHECK)
662 xs->error = XS_BUSY;
663 }
664
665 done:
666 /* If IOC done with this requeset, free it up. */
667 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
668 mpt_free_request(mpt, req);
669
670 /* If address reply, give the buffer back to the IOC. */
671 if (mpt_reply != NULL)
672 mpt_free_reply(mpt, (reply << 1));
673
674 if (xs != NULL)
675 scsipi_done(xs);
676 }
677
678 static void
679 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
680 {
681 struct scsipi_periph *periph = xs->xs_periph;
682 request_t *req;
683 MSG_SCSI_IO_REQUEST *mpt_req;
684 int error, s;
685
686 s = splbio();
687 req = mpt_get_request(mpt);
688 if (__predict_false(req == NULL)) {
689 /* This should happen very infrequently. */
690 xs->error = XS_RESOURCE_SHORTAGE;
691 scsipi_done(xs);
692 splx(s);
693 return;
694 }
695 splx(s);
696
697 /* Link the req and the scsipi_xfer. */
698 req->xfer = xs;
699
700 /* Now we build the command for the IOC */
701 mpt_req = req->req_vbuf;
702 memset(mpt_req, 0, sizeof(*mpt_req));
703
704 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
705 mpt_req->Bus = mpt->bus;
706
707 mpt_req->SenseBufferLength =
708 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
709 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
710
711 /*
712 * We use the message context to find the request structure when
713 * we get the command completion interrupt from the IOC.
714 */
715 mpt_req->MsgContext = htole32(req->index);
716
717 /* Which physical device to do the I/O on. */
718 mpt_req->TargetID = periph->periph_target;
719 mpt_req->LUN[1] = periph->periph_lun;
720
721 /* Set the direction of the transfer. */
722 if (xs->xs_control & XS_CTL_DATA_IN)
723 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
724 else if (xs->xs_control & XS_CTL_DATA_OUT)
725 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
726 else
727 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
728
729 /* Set the queue behavior. */
730 if (__predict_true((!mpt->is_scsi) ||
731 (mpt->mpt_tag_enable &
732 (1 << periph->periph_target)))) {
733 switch (XS_CTL_TAGTYPE(xs)) {
734 case XS_CTL_HEAD_TAG:
735 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
736 break;
737
738 #if 0 /* XXX */
739 case XS_CTL_ACA_TAG:
740 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
741 break;
742 #endif
743
744 case XS_CTL_ORDERED_TAG:
745 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
746 break;
747
748 case XS_CTL_SIMPLE_TAG:
749 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
750 break;
751
752 default:
753 if (mpt->is_scsi)
754 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
755 else
756 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
757 break;
758 }
759 } else
760 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
761
762 if (__predict_false(mpt->is_scsi &&
763 (mpt->mpt_disc_enable &
764 (1 << periph->periph_target)) == 0))
765 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
766
767 mpt_req->Control = htole32(mpt_req->Control);
768
769 /* Copy the SCSI command block into place. */
770 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
771
772 mpt_req->CDBLength = xs->cmdlen;
773 mpt_req->DataLength = htole32(xs->datalen);
774 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
775
776 /*
777 * Map the DMA transfer.
778 */
779 if (xs->datalen) {
780 SGE_SIMPLE32 *se;
781
782 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
783 xs->datalen, NULL,
784 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
785 : BUS_DMA_WAITOK) |
786 BUS_DMA_STREAMING |
787 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
788 : BUS_DMA_WRITE));
789 switch (error) {
790 case 0:
791 break;
792
793 case ENOMEM:
794 case EAGAIN:
795 xs->error = XS_RESOURCE_SHORTAGE;
796 goto out_bad;
797
798 default:
799 xs->error = XS_DRIVER_STUFFUP;
800 mpt_prt(mpt, "error %d loading DMA map", error);
801 out_bad:
802 s = splbio();
803 mpt_free_request(mpt, req);
804 scsipi_done(xs);
805 splx(s);
806 return;
807 }
808
809 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
810 int seg, i, nleft = req->dmap->dm_nsegs;
811 uint32_t flags;
812 SGE_CHAIN32 *ce;
813
814 seg = 0;
815 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
816 if (xs->xs_control & XS_CTL_DATA_OUT)
817 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
818
819 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
820 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
821 i++, se++, seg++) {
822 uint32_t tf;
823
824 memset(se, 0, sizeof(*se));
825 se->Address =
826 htole32(req->dmap->dm_segs[seg].ds_addr);
827 MPI_pSGE_SET_LENGTH(se,
828 req->dmap->dm_segs[seg].ds_len);
829 tf = flags;
830 if (i == MPT_NSGL_FIRST(mpt) - 2)
831 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
832 MPI_pSGE_SET_FLAGS(se, tf);
833 se->FlagsLength = htole32(se->FlagsLength);
834 nleft--;
835 }
836
837 /*
838 * Tell the IOC where to find the first chain element.
839 */
840 mpt_req->ChainOffset =
841 ((char *)se - (char *)mpt_req) >> 2;
842
843 /*
844 * Until we're finished with all segments...
845 */
846 while (nleft) {
847 int ntodo;
848
849 /*
850 * Construct the chain element that points to
851 * the next segment.
852 */
853 ce = (SGE_CHAIN32 *) se++;
854 if (nleft > MPT_NSGL(mpt)) {
855 ntodo = MPT_NSGL(mpt) - 1;
856 ce->NextChainOffset = (MPT_RQSL(mpt) -
857 sizeof(SGE_SIMPLE32)) >> 2;
858 ce->Length = htole16(MPT_NSGL(mpt)
859 * sizeof(SGE_SIMPLE32));
860 } else {
861 ntodo = nleft;
862 ce->NextChainOffset = 0;
863 ce->Length = htole16(ntodo
864 * sizeof(SGE_SIMPLE32));
865 }
866 ce->Address = htole32(req->req_pbuf +
867 ((char *)se - (char *)mpt_req));
868 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
869 for (i = 0; i < ntodo; i++, se++, seg++) {
870 uint32_t tf;
871
872 memset(se, 0, sizeof(*se));
873 se->Address = htole32(
874 req->dmap->dm_segs[seg].ds_addr);
875 MPI_pSGE_SET_LENGTH(se,
876 req->dmap->dm_segs[seg].ds_len);
877 tf = flags;
878 if (i == ntodo - 1) {
879 tf |=
880 MPI_SGE_FLAGS_LAST_ELEMENT;
881 if (ce->NextChainOffset == 0) {
882 tf |=
883 MPI_SGE_FLAGS_END_OF_LIST |
884 MPI_SGE_FLAGS_END_OF_BUFFER;
885 }
886 }
887 MPI_pSGE_SET_FLAGS(se, tf);
888 se->FlagsLength =
889 htole32(se->FlagsLength);
890 nleft--;
891 }
892 }
893 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
894 req->dmap->dm_mapsize,
895 (xs->xs_control & XS_CTL_DATA_IN) ?
896 BUS_DMASYNC_PREREAD
897 : BUS_DMASYNC_PREWRITE);
898 } else {
899 int i;
900 uint32_t flags;
901
902 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
903 if (xs->xs_control & XS_CTL_DATA_OUT)
904 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
905
906 /* Copy the segments into our SG list. */
907 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
908 for (i = 0; i < req->dmap->dm_nsegs;
909 i++, se++) {
910 uint32_t tf;
911
912 memset(se, 0, sizeof(*se));
913 se->Address =
914 htole32(req->dmap->dm_segs[i].ds_addr);
915 MPI_pSGE_SET_LENGTH(se,
916 req->dmap->dm_segs[i].ds_len);
917 tf = flags;
918 if (i == req->dmap->dm_nsegs - 1) {
919 tf |=
920 MPI_SGE_FLAGS_LAST_ELEMENT |
921 MPI_SGE_FLAGS_END_OF_BUFFER |
922 MPI_SGE_FLAGS_END_OF_LIST;
923 }
924 MPI_pSGE_SET_FLAGS(se, tf);
925 se->FlagsLength = htole32(se->FlagsLength);
926 }
927 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
928 req->dmap->dm_mapsize,
929 (xs->xs_control & XS_CTL_DATA_IN) ?
930 BUS_DMASYNC_PREREAD
931 : BUS_DMASYNC_PREWRITE);
932 }
933 } else {
934 /*
935 * No data to transfer; just make a single simple SGL
936 * with zero length.
937 */
938 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
939 memset(se, 0, sizeof(*se));
940 MPI_pSGE_SET_FLAGS(se,
941 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
942 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
943 se->FlagsLength = htole32(se->FlagsLength);
944 }
945
946 if (mpt->verbose > 1)
947 mpt_print_scsi_io_request(mpt_req);
948
949 s = splbio();
950 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
951 callout_reset(&xs->xs_callout,
952 mstohz(xs->timeout), mpt_timeout, req);
953 mpt_send_cmd(mpt, req);
954 splx(s);
955
956 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
957 return;
958
959 /*
960 * If we can't use interrupts, poll on completion.
961 */
962 if (mpt_poll(mpt, xs, xs->timeout))
963 mpt_timeout(req);
964 }
965
966 static void
967 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
968 {
969 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
970
971 /*
972 * Always allow disconnect; we don't have a way to disable
973 * it right now, in any case.
974 */
975 mpt->mpt_disc_enable |= (1 << xm->xm_target);
976
977 if (xm->xm_mode & PERIPH_CAP_TQING)
978 mpt->mpt_tag_enable |= (1 << xm->xm_target);
979 else
980 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
981
982 if (mpt->is_scsi) {
983 /*
984 * SCSI transport settings only make any sense for
985 * SCSI
986 */
987
988 tmp = mpt->mpt_dev_page1[xm->xm_target];
989
990 /*
991 * Set the wide/narrow parameter for the target.
992 */
993 if (xm->xm_mode & PERIPH_CAP_WIDE16)
994 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
995 else
996 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
997
998 /*
999 * Set the synchronous parameters for the target.
1000 *
1001 * XXX If we request sync transfers, we just go ahead and
1002 * XXX request the maximum available. We need finer control
1003 * XXX in order to implement Domain Validation.
1004 */
1005 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1006 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1007 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1008 MPI_SCSIDEVPAGE1_RP_IU);
1009 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1010 int factor, offset, np;
1011
1012 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1013 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1014 np = 0;
1015 if (factor < 0x9) {
1016 /* Ultra320 */
1017 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1018 }
1019 if (factor < 0xa) {
1020 /* at least Ultra160 */
1021 np |= MPI_SCSIDEVPAGE1_RP_DT;
1022 }
1023 np |= (factor << 8) | (offset << 16);
1024 tmp.RequestedParameters |= np;
1025 }
1026
1027 host2mpt_config_page_scsi_device_1(&tmp);
1028 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1029 mpt_prt(mpt, "unable to write Device Page 1");
1030 return;
1031 }
1032
1033 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1034 mpt_prt(mpt, "unable to read back Device Page 1");
1035 return;
1036 }
1037
1038 mpt2host_config_page_scsi_device_1(&tmp);
1039 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1040 if (mpt->verbose > 1) {
1041 mpt_prt(mpt,
1042 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1043 xm->xm_target,
1044 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1045 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1046 }
1047 }
1048
1049 /*
1050 * Make a note that we should perform an async callback at the
1051 * end of the next successful command completion to report the
1052 * negotiated transfer mode.
1053 */
1054 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1055 }
1056
1057 static void
1058 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1059 {
1060 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1061 struct scsipi_xfer_mode xm;
1062 int period, offset;
1063
1064 tmp = mpt->mpt_dev_page0[periph->periph_target];
1065 host2mpt_config_page_scsi_device_0(&tmp);
1066 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1067 mpt_prt(mpt, "unable to read Device Page 0");
1068 return;
1069 }
1070 mpt2host_config_page_scsi_device_0(&tmp);
1071
1072 if (mpt->verbose > 1) {
1073 mpt_prt(mpt,
1074 "SPI Tgt %d Page 0: NParms %x Information %x",
1075 periph->periph_target,
1076 tmp.NegotiatedParameters, tmp.Information);
1077 }
1078
1079 xm.xm_target = periph->periph_target;
1080 xm.xm_mode = 0;
1081
1082 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1083 xm.xm_mode |= PERIPH_CAP_WIDE16;
1084
1085 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1086 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1087 if (offset) {
1088 xm.xm_period = period;
1089 xm.xm_offset = offset;
1090 xm.xm_mode |= PERIPH_CAP_SYNC;
1091 }
1092
1093 /*
1094 * Tagged queueing is all controlled by us; there is no
1095 * other setting to query.
1096 */
1097 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1098 xm.xm_mode |= PERIPH_CAP_TQING;
1099
1100 /*
1101 * We're going to deliver the async event, so clear the marker.
1102 */
1103 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1104
1105 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1106 }
1107
1108 static void
1109 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1110 {
1111 MSG_DEFAULT_REPLY *dmsg = vmsg;
1112
1113 switch (dmsg->Function) {
1114 case MPI_FUNCTION_EVENT_NOTIFICATION:
1115 mpt_event_notify_reply(mpt, vmsg);
1116 mpt_free_reply(mpt, (reply << 1));
1117 break;
1118
1119 case MPI_FUNCTION_EVENT_ACK:
1120 mpt_free_reply(mpt, (reply << 1));
1121 break;
1122
1123 case MPI_FUNCTION_PORT_ENABLE:
1124 {
1125 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1126 int index = le32toh(msg->MsgContext) & ~0x80000000;
1127 if (mpt->verbose > 1)
1128 mpt_prt(mpt, "enable port reply index %d", index);
1129 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1130 request_t *req = &mpt->request_pool[index];
1131 req->debug = REQ_DONE;
1132 }
1133 mpt_free_reply(mpt, (reply << 1));
1134 break;
1135 }
1136
1137 case MPI_FUNCTION_CONFIG:
1138 {
1139 MSG_CONFIG_REPLY *msg = vmsg;
1140 int index = le32toh(msg->MsgContext) & ~0x80000000;
1141 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1142 request_t *req = &mpt->request_pool[index];
1143 req->debug = REQ_DONE;
1144 req->sequence = reply;
1145 } else
1146 mpt_free_reply(mpt, (reply << 1));
1147 break;
1148 }
1149
1150 default:
1151 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1152 }
1153 }
1154
1155 static void
1156 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1157 {
1158
1159 switch (le32toh(msg->Event)) {
1160 case MPI_EVENT_LOG_DATA:
1161 {
1162 int i;
1163
1164 /* Some error occurrerd that the Fusion wants logged. */
1165 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1166 mpt_prt(mpt, "EvtLogData: Event Data:");
1167 for (i = 0; i < msg->EventDataLength; i++) {
1168 if ((i % 4) == 0)
1169 printf("%s:\t", device_xname(&mpt->sc_dev));
1170 printf("0x%08x%c", msg->Data[i],
1171 ((i % 4) == 3) ? '\n' : ' ');
1172 }
1173 if ((i % 4) != 0)
1174 printf("\n");
1175 break;
1176 }
1177
1178 case MPI_EVENT_UNIT_ATTENTION:
1179 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1180 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1181 break;
1182
1183 case MPI_EVENT_IOC_BUS_RESET:
1184 /* We generated a bus reset. */
1185 mpt_prt(mpt, "IOC Bus Reset Port %d",
1186 (msg->Data[0] >> 8) & 0xff);
1187 break;
1188
1189 case MPI_EVENT_EXT_BUS_RESET:
1190 /* Someone else generated a bus reset. */
1191 mpt_prt(mpt, "External Bus Reset");
1192 /*
1193 * These replies don't return EventData like the MPI
1194 * spec says they do.
1195 */
1196 /* XXX Send an async event? */
1197 break;
1198
1199 case MPI_EVENT_RESCAN:
1200 /*
1201 * In general, thise means a device has been added
1202 * to the loop.
1203 */
1204 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1205 /* XXX Send an async event? */
1206 break;
1207
1208 case MPI_EVENT_LINK_STATUS_CHANGE:
1209 mpt_prt(mpt, "Port %d: Link state %s",
1210 (msg->Data[1] >> 8) & 0xff,
1211 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1212 break;
1213
1214 case MPI_EVENT_LOOP_STATE_CHANGE:
1215 switch ((msg->Data[0] >> 16) & 0xff) {
1216 case 0x01:
1217 mpt_prt(mpt,
1218 "Port %d: FC Link Event: LIP(%02x,%02x) "
1219 "(Loop Initialization)",
1220 (msg->Data[1] >> 8) & 0xff,
1221 (msg->Data[0] >> 8) & 0xff,
1222 (msg->Data[0] ) & 0xff);
1223 switch ((msg->Data[0] >> 8) & 0xff) {
1224 case 0xf7:
1225 if ((msg->Data[0] & 0xff) == 0xf7)
1226 mpt_prt(mpt, "\tDevice needs AL_PA");
1227 else
1228 mpt_prt(mpt, "\tDevice %02x doesn't "
1229 "like FC performance",
1230 msg->Data[0] & 0xff);
1231 break;
1232
1233 case 0xf8:
1234 if ((msg->Data[0] & 0xff) == 0xf7)
1235 mpt_prt(mpt, "\tDevice detected loop "
1236 "failure before acquiring AL_PA");
1237 else
1238 mpt_prt(mpt, "\tDevice %02x detected "
1239 "loop failure",
1240 msg->Data[0] & 0xff);
1241 break;
1242
1243 default:
1244 mpt_prt(mpt, "\tDevice %02x requests that "
1245 "device %02x reset itself",
1246 msg->Data[0] & 0xff,
1247 (msg->Data[0] >> 8) & 0xff);
1248 break;
1249 }
1250 break;
1251
1252 case 0x02:
1253 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1254 "(Loop Port Enable)",
1255 (msg->Data[1] >> 8) & 0xff,
1256 (msg->Data[0] >> 8) & 0xff,
1257 (msg->Data[0] ) & 0xff);
1258 break;
1259
1260 case 0x03:
1261 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1262 "(Loop Port Bypass)",
1263 (msg->Data[1] >> 8) & 0xff,
1264 (msg->Data[0] >> 8) & 0xff,
1265 (msg->Data[0] ) & 0xff);
1266 break;
1267
1268 default:
1269 mpt_prt(mpt, "Port %d: FC Link Event: "
1270 "Unknown event (%02x %02x %02x)",
1271 (msg->Data[1] >> 8) & 0xff,
1272 (msg->Data[0] >> 16) & 0xff,
1273 (msg->Data[0] >> 8) & 0xff,
1274 (msg->Data[0] ) & 0xff);
1275 break;
1276 }
1277 break;
1278
1279 case MPI_EVENT_LOGOUT:
1280 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1281 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1282 break;
1283
1284 case MPI_EVENT_EVENT_CHANGE:
1285 /*
1286 * This is just an acknowledgement of our
1287 * mpt_send_event_request().
1288 */
1289 break;
1290
1291 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1292 switch ((msg->Data[0] >> 12) & 0x0f) {
1293 case 0x00:
1294 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1295 msg->Data[0] & 0xff);
1296 break;
1297 case 0x01:
1298 mpt_prt(mpt, "Phy %d: Link Disabled",
1299 msg->Data[0] & 0xff);
1300 break;
1301 case 0x02:
1302 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1303 msg->Data[0] & 0xff);
1304 break;
1305 case 0x03:
1306 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1307 msg->Data[0] & 0xff);
1308 break;
1309 case 0x08:
1310 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1311 msg->Data[0] & 0xff);
1312 break;
1313 case 0x09:
1314 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1315 msg->Data[0] & 0xff);
1316 break;
1317 default:
1318 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1319 "Unknown event (%0x)",
1320 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1321 }
1322 break;
1323
1324 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1325 case MPI_EVENT_SAS_DISCOVERY:
1326 /* ignore these events for now */
1327 break;
1328
1329 case MPI_EVENT_QUEUE_FULL:
1330 /* This can get a little chatty */
1331 if (mpt->verbose > 0)
1332 mpt_prt(mpt, "Queue Full Event");
1333 break;
1334
1335 default:
1336 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1337 break;
1338 }
1339
1340 if (msg->AckRequired) {
1341 MSG_EVENT_ACK *ackp;
1342 request_t *req;
1343
1344 if ((req = mpt_get_request(mpt)) == NULL) {
1345 /* XXX XXX XXX XXXJRT */
1346 panic("mpt_event_notify_reply: unable to allocate "
1347 "request structure");
1348 }
1349
1350 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1351 memset(ackp, 0, sizeof(*ackp));
1352 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1353 ackp->Event = msg->Event;
1354 ackp->EventContext = msg->EventContext;
1355 ackp->MsgContext = htole32(req->index | 0x80000000);
1356 mpt_check_doorbell(mpt);
1357 mpt_send_cmd(mpt, req);
1358 }
1359 }
1360
1361 /* XXXJRT mpt_bus_reset() */
1362
1363 /*****************************************************************************
1364 * SCSI interface routines
1365 *****************************************************************************/
1366
1367 static void
1368 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1369 void *arg)
1370 {
1371 struct scsipi_adapter *adapt = chan->chan_adapter;
1372 mpt_softc_t *mpt = (void *) adapt->adapt_dev;
1373
1374 switch (req) {
1375 case ADAPTER_REQ_RUN_XFER:
1376 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1377 return;
1378
1379 case ADAPTER_REQ_GROW_RESOURCES:
1380 /* Not supported. */
1381 return;
1382
1383 case ADAPTER_REQ_SET_XFER_MODE:
1384 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1385 return;
1386 }
1387 }
1388
1389 static void
1390 mpt_minphys(struct buf *bp)
1391 {
1392
1393 /*
1394 * Subtract one from the SGL limit, since we need an extra one to handle
1395 * an non-page-aligned transfer.
1396 */
1397 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1398
1399 if (bp->b_bcount > MPT_MAX_XFER)
1400 bp->b_bcount = MPT_MAX_XFER;
1401 minphys(bp);
1402 }
1403
1404 #if NBIO > 0
1405 static fCONFIG_PAGE_IOC_2 *
1406 mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1407 {
1408 fCONFIG_PAGE_HEADER hdr;
1409 fCONFIG_PAGE_IOC_2 *ioc2;
1410 int rv;
1411
1412 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1413 if (rv)
1414 return NULL;
1415
1416 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1417 if (ioc2 == NULL)
1418 return NULL;
1419
1420 memcpy(ioc2, &hdr, sizeof(hdr));
1421
1422 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1423 if (rv)
1424 goto fail;
1425 mpt2host_config_page_ioc_2(ioc2);
1426
1427 return ioc2;
1428
1429 fail:
1430 free(ioc2, M_DEVBUF);
1431 return NULL;
1432 }
1433
1434 static fCONFIG_PAGE_IOC_3 *
1435 mpt_get_cfg_page_ioc3(mpt_softc_t *mpt)
1436 {
1437 fCONFIG_PAGE_HEADER hdr;
1438 fCONFIG_PAGE_IOC_3 *ioc3;
1439 int rv;
1440
1441 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr);
1442 if (rv)
1443 return NULL;
1444
1445 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1446 if (ioc3 == NULL)
1447 return NULL;
1448
1449 memcpy(ioc3, &hdr, sizeof(hdr));
1450
1451 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header);
1452 if (rv)
1453 goto fail;
1454
1455 return ioc3;
1456
1457 fail:
1458 free(ioc3, M_DEVBUF);
1459 return NULL;
1460 }
1461
1462
1463 static fCONFIG_PAGE_RAID_VOL_0 *
1464 mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1465 {
1466 fCONFIG_PAGE_HEADER hdr;
1467 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1468 int rv;
1469
1470 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1471 address, &hdr);
1472 if (rv)
1473 return NULL;
1474
1475 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1476 if (rvol0 == NULL)
1477 return NULL;
1478
1479 memcpy(rvol0, &hdr, sizeof(hdr));
1480
1481 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1482 if (rv)
1483 goto fail;
1484 mpt2host_config_page_raid_vol_0(rvol0);
1485
1486 return rvol0;
1487
1488 fail:
1489 free(rvol0, M_DEVBUF);
1490 return NULL;
1491 }
1492
1493 static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
1494 mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1495 {
1496 fCONFIG_PAGE_HEADER hdr;
1497 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1498 int rv;
1499
1500 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1501 address, &hdr);
1502 if (rv)
1503 return NULL;
1504
1505 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1506 if (physdisk0 == NULL)
1507 return NULL;
1508
1509 memcpy(physdisk0, &hdr, sizeof(hdr));
1510
1511 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1512 if (rv)
1513 goto fail;
1514 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1515
1516 return physdisk0;
1517
1518 fail:
1519 free(physdisk0, M_DEVBUF);
1520 return NULL;
1521 }
1522
1523 static bool
1524 mpt_is_raid(mpt_softc_t *mpt)
1525 {
1526 fCONFIG_PAGE_IOC_2 *ioc2;
1527 bool is_raid = false;
1528
1529 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1530 if (ioc2 == NULL)
1531 return false;
1532
1533 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1534 is_raid = !!(ioc2->CapabilitiesFlags &
1535 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1536 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1537 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1538 }
1539
1540 free(ioc2, M_DEVBUF);
1541
1542 return is_raid;
1543 }
1544
1545 static int
1546 mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1547 {
1548 mpt_softc_t *mpt = device_private(dev);
1549 int error, s;
1550
1551 KERNEL_LOCK(1, curlwp);
1552 s = splbio();
1553
1554 switch (cmd) {
1555 case BIOCINQ:
1556 error = mpt_bio_ioctl_inq(mpt, addr);
1557 break;
1558 case BIOCVOL:
1559 error = mpt_bio_ioctl_vol(mpt, addr);
1560 break;
1561 case BIOCDISK_NOVOL:
1562 error = mpt_bio_ioctl_disk_novol(mpt, addr);
1563 break;
1564 case BIOCDISK:
1565 error = mpt_bio_ioctl_disk(mpt, addr);
1566 break;
1567 case BIOCSETSTATE:
1568 error = mpt_bio_ioctl_setstate(mpt, addr);
1569 break;
1570 default:
1571 error = EINVAL;
1572 break;
1573 }
1574
1575 splx(s);
1576 KERNEL_UNLOCK_ONE(curlwp);
1577
1578 return error;
1579 }
1580
1581 static int
1582 mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1583 {
1584 fCONFIG_PAGE_IOC_2 *ioc2;
1585 fCONFIG_PAGE_IOC_3 *ioc3;
1586
1587 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1588 if (ioc2 == NULL)
1589 return EIO;
1590 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1591 if (ioc3 == NULL) {
1592 free(ioc2, M_DEVBUF);
1593 return EIO;
1594 }
1595
1596 strlcpy(bi->bi_dev, device_xname(&mpt->sc_dev), sizeof(bi->bi_dev));
1597 bi->bi_novol = ioc2->NumActiveVolumes;
1598 bi->bi_nodisk = ioc3->NumPhysDisks;
1599
1600 free(ioc2, M_DEVBUF);
1601 free(ioc3, M_DEVBUF);
1602
1603 return 0;
1604 }
1605
1606 static int
1607 mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1608 {
1609 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1610 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1611 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1612 struct scsipi_periph *periph;
1613 struct scsipi_inquiry_data inqbuf;
1614 char vendor[9], product[17], revision[5];
1615 int address;
1616
1617 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1618 if (ioc2 == NULL)
1619 return EIO;
1620
1621 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1622 goto fail;
1623
1624 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1625 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1626
1627 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1628 if (rvol0 == NULL)
1629 goto fail;
1630
1631 bv->bv_dev[0] = '\0';
1632 bv->bv_vendor[0] = '\0';
1633
1634 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0);
1635 if (periph != NULL) {
1636 if (periph->periph_dev != NULL) {
1637 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1638 device_xname(periph->periph_dev));
1639 }
1640 memset(&inqbuf, 0, sizeof(inqbuf));
1641 if (scsipi_inquire(periph, &inqbuf,
1642 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) {
1643 scsipi_strvis(vendor, sizeof(vendor),
1644 inqbuf.vendor, sizeof(inqbuf.vendor));
1645 scsipi_strvis(product, sizeof(product),
1646 inqbuf.product, sizeof(inqbuf.product));
1647 scsipi_strvis(revision, sizeof(revision),
1648 inqbuf.revision, sizeof(inqbuf.revision));
1649
1650 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor),
1651 "%s %s %s", vendor, product, revision);
1652 }
1653 }
1654 bv->bv_nodisk = rvol0->NumPhysDisks;
1655 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1656 bv->bv_stripe_size = rvol0->StripeSize;
1657 bv->bv_percent = -1;
1658 bv->bv_seconds = 0;
1659
1660 switch (rvol0->VolumeStatus.State) {
1661 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1662 bv->bv_status = BIOC_SVONLINE;
1663 break;
1664 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1665 bv->bv_status = BIOC_SVDEGRADED;
1666 break;
1667 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1668 bv->bv_status = BIOC_SVOFFLINE;
1669 break;
1670 default:
1671 bv->bv_status = BIOC_SVINVALID;
1672 break;
1673 }
1674
1675 switch (ioc2rvol->VolumeType) {
1676 case MPI_RAID_VOL_TYPE_IS:
1677 bv->bv_level = 0;
1678 break;
1679 case MPI_RAID_VOL_TYPE_IME:
1680 case MPI_RAID_VOL_TYPE_IM:
1681 bv->bv_level = 1;
1682 break;
1683 default:
1684 bv->bv_level = -1;
1685 break;
1686 }
1687
1688 free(ioc2, M_DEVBUF);
1689 free(rvol0, M_DEVBUF);
1690
1691 return 0;
1692
1693 fail:
1694 if (ioc2) free(ioc2, M_DEVBUF);
1695 if (rvol0) free(rvol0, M_DEVBUF);
1696 return EINVAL;
1697 }
1698
1699 static void
1700 mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd,
1701 int address)
1702 {
1703 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1704 char vendor[9], product[17], revision[5];
1705
1706 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1707 if (phys == NULL)
1708 return;
1709
1710 scsipi_strvis(vendor, sizeof(vendor),
1711 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID));
1712 scsipi_strvis(product, sizeof(product),
1713 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID));
1714 scsipi_strvis(revision, sizeof(revision),
1715 phys->InquiryData.ProductRevLevel,
1716 sizeof(phys->InquiryData.ProductRevLevel));
1717
1718 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s",
1719 vendor, product, revision);
1720 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1721 bd->bd_procdev[0] = '\0';
1722 bd->bd_channel = phys->PhysDiskBus;
1723 bd->bd_target = phys->PhysDiskID;
1724 bd->bd_lun = 0;
1725 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1726
1727 switch (phys->PhysDiskStatus.State) {
1728 case MPI_PHYSDISK0_STATUS_ONLINE:
1729 bd->bd_status = BIOC_SDONLINE;
1730 break;
1731 case MPI_PHYSDISK0_STATUS_MISSING:
1732 case MPI_PHYSDISK0_STATUS_FAILED:
1733 bd->bd_status = BIOC_SDFAILED;
1734 break;
1735 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1736 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1737 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1738 bd->bd_status = BIOC_SDOFFLINE;
1739 break;
1740 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1741 bd->bd_status = BIOC_SDSCRUB;
1742 break;
1743 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1744 default:
1745 bd->bd_status = BIOC_SDINVALID;
1746 break;
1747 }
1748
1749 free(phys, M_DEVBUF);
1750 }
1751
1752 static int
1753 mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd)
1754 {
1755 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1756 fCONFIG_PAGE_IOC_3 *ioc3 = NULL;
1757 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1758 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1759 int address, v, d;
1760
1761 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1762 if (ioc2 == NULL)
1763 return EIO;
1764 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1765 if (ioc3 == NULL) {
1766 free(ioc2, M_DEVBUF);
1767 return EIO;
1768 }
1769
1770 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks)
1771 goto fail;
1772
1773 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum;
1774
1775 mpt_bio_ioctl_disk_common(mpt, bd, address);
1776
1777 bd->bd_disknovol = true;
1778 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) {
1779 ioc2rvol = &ioc2->RaidVolume[v];
1780 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1781
1782 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1783 if (rvol0 == NULL)
1784 continue;
1785
1786 for (d = 0; d < rvol0->NumPhysDisks; d++) {
1787 if (rvol0->PhysDisk[d].PhysDiskNum ==
1788 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) {
1789 bd->bd_disknovol = false;
1790 bd->bd_volid = v;
1791 break;
1792 }
1793 }
1794 free(rvol0, M_DEVBUF);
1795 }
1796
1797 free(ioc3, M_DEVBUF);
1798 free(ioc2, M_DEVBUF);
1799
1800 return 0;
1801
1802 fail:
1803 if (ioc3) free(ioc3, M_DEVBUF);
1804 if (ioc2) free(ioc2, M_DEVBUF);
1805 return EINVAL;
1806 }
1807
1808
1809 static int
1810 mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
1811 {
1812 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1813 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1814 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1815 int address;
1816
1817 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1818 if (ioc2 == NULL)
1819 return EIO;
1820
1821 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
1822 goto fail;
1823
1824 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
1825 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1826
1827 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1828 if (rvol0 == NULL)
1829 goto fail;
1830
1831 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
1832 goto fail;
1833
1834 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
1835
1836 mpt_bio_ioctl_disk_common(mpt, bd, address);
1837
1838 free(ioc2, M_DEVBUF);
1839
1840 return 0;
1841
1842 fail:
1843 if (ioc2) free(ioc2, M_DEVBUF);
1844 return EINVAL;
1845 }
1846
1847 static int
1848 mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs)
1849 {
1850 return ENOTTY;
1851 }
1852 #endif
1853
1854