mpt_netbsd.c revision 1.18.2.4 1 /* $NetBSD: mpt_netbsd.c,v 1.18.2.4 2017/12/03 11:37:03 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.18.2.4 2017/12/03 11:37:03 jdolecek Exp $");
81
82 #include "bio.h"
83
84 #include <dev/ic/mpt.h> /* pulls in all headers */
85 #include <sys/scsiio.h>
86
87 #if NBIO > 0
88 #include <dev/biovar.h>
89 #endif
90
91 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92 static void mpt_timeout(void *);
93 static void mpt_restart(mpt_softc_t *, request_t *);
94 static void mpt_done(mpt_softc_t *, uint32_t);
95 static int mpt_drain_queue(mpt_softc_t *);
96 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
97 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
98 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
99 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
100 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
101 static void mpt_bus_reset(mpt_softc_t *);
102
103 static void mpt_scsipi_request(struct scsipi_channel *,
104 scsipi_adapter_req_t, void *);
105 static void mpt_minphys(struct buf *);
106 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
107 struct proc *);
108
109 #if NBIO > 0
110 static bool mpt_is_raid(mpt_softc_t *);
111 static int mpt_bio_ioctl(device_t, u_long, void *);
112 static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
113 static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
114 static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
115 static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *);
116 static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *);
117 #endif
118
119 void
120 mpt_scsipi_attach(mpt_softc_t *mpt)
121 {
122 struct scsipi_adapter *adapt = &mpt->sc_adapter;
123 struct scsipi_channel *chan = &mpt->sc_channel;
124 int maxq;
125
126 mpt->bus = 0; /* XXX ?? */
127
128 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
129 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
130
131 /* Fill in the scsipi_adapter. */
132 memset(adapt, 0, sizeof(*adapt));
133 adapt->adapt_dev = mpt->sc_dev;
134 adapt->adapt_nchannels = 1;
135 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
136 adapt->adapt_max_periph = maxq - 2;
137 adapt->adapt_request = mpt_scsipi_request;
138 adapt->adapt_minphys = mpt_minphys;
139 adapt->adapt_ioctl = mpt_ioctl;
140
141 /* Fill in the scsipi_channel. */
142 memset(chan, 0, sizeof(*chan));
143 chan->chan_adapter = adapt;
144 if (mpt->is_sas) {
145 chan->chan_bustype = &scsi_sas_bustype;
146 } else if (mpt->is_fc) {
147 chan->chan_bustype = &scsi_fc_bustype;
148 } else {
149 chan->chan_bustype = &scsi_bustype;
150 }
151 chan->chan_channel = 0;
152 chan->chan_flags = 0;
153 chan->chan_nluns = 8;
154 chan->chan_ntargets = mpt->mpt_max_devices;
155 chan->chan_id = mpt->mpt_ini_id;
156
157 /*
158 * Save the output of the config so we can rescan the bus in case of
159 * errors
160 */
161 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
162 scsiprint);
163
164 #if NBIO > 0
165 if (mpt_is_raid(mpt)) {
166 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
167 panic("%s: controller registration failed",
168 device_xname(mpt->sc_dev));
169 }
170 #endif
171 }
172
173 int
174 mpt_dma_mem_alloc(mpt_softc_t *mpt)
175 {
176 bus_dma_segment_t reply_seg, request_seg;
177 int reply_rseg, request_rseg;
178 bus_addr_t pptr, end;
179 char *vptr;
180 size_t len;
181 int error, i;
182
183 /* Check if we have already allocated the reply memory. */
184 if (mpt->reply != NULL)
185 return (0);
186
187 /*
188 * Allocate the request pool. This isn't really DMA'd memory,
189 * but it's a convenient place to do it.
190 */
191 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
192 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
193 if (mpt->request_pool == NULL) {
194 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
195 return (ENOMEM);
196 }
197
198 /*
199 * Allocate DMA resources for reply buffers.
200 */
201 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
202 &reply_seg, 1, &reply_rseg, 0);
203 if (error) {
204 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
205 error);
206 goto fail_0;
207 }
208
209 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
210 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
211 if (error) {
212 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
213 error);
214 goto fail_1;
215 }
216
217 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
218 0, 0, &mpt->reply_dmap);
219 if (error) {
220 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
221 error);
222 goto fail_2;
223 }
224
225 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
226 PAGE_SIZE, NULL, 0);
227 if (error) {
228 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
229 error);
230 goto fail_3;
231 }
232 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
233
234 /*
235 * Allocate DMA resources for request buffers.
236 */
237 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
238 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
239 if (error) {
240 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
241 "error = %d\n", error);
242 goto fail_4;
243 }
244
245 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
246 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
247 if (error) {
248 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
249 error);
250 goto fail_5;
251 }
252
253 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
254 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
255 if (error) {
256 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
257 "error = %d\n", error);
258 goto fail_6;
259 }
260
261 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
262 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
263 if (error) {
264 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
265 error);
266 goto fail_7;
267 }
268 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
269
270 pptr = mpt->request_phys;
271 vptr = (void *) mpt->request;
272 end = pptr + MPT_REQ_MEM_SIZE(mpt);
273
274 for (i = 0; pptr < end; i++) {
275 request_t *req = &mpt->request_pool[i];
276 req->index = i;
277
278 /* Store location of Request Data */
279 req->req_pbuf = pptr;
280 req->req_vbuf = vptr;
281
282 pptr += MPT_REQUEST_AREA;
283 vptr += MPT_REQUEST_AREA;
284
285 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
286 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
287
288 error = bus_dmamap_create(mpt->sc_dmat,
289 MPT_SGL_MAX * PAGE_SIZE ,
290 MPT_SGL_MAX,
291 MPT_SGL_MAX * PAGE_SIZE,
292 0, 0, &req->dmap);
293 if (error) {
294 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
295 "error = %d\n", i, error);
296 goto fail_8;
297 }
298 }
299
300 return (0);
301
302 fail_8:
303 for (--i; i >= 0; i--) {
304 request_t *req = &mpt->request_pool[i];
305 if (req->dmap != NULL)
306 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
307 }
308 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
309 fail_7:
310 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
311 fail_6:
312 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
313 fail_5:
314 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
315 fail_4:
316 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
317 fail_3:
318 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
319 fail_2:
320 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
321 fail_1:
322 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
323 fail_0:
324 free(mpt->request_pool, M_DEVBUF);
325
326 mpt->reply = NULL;
327 mpt->request = NULL;
328 mpt->request_pool = NULL;
329
330 return (error);
331 }
332
333 int
334 mpt_intr(void *arg)
335 {
336 mpt_softc_t *mpt = arg;
337 int nrepl = 0;
338
339 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
340 return (0);
341
342 nrepl = mpt_drain_queue(mpt);
343 return (nrepl != 0);
344 }
345
346 void
347 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
348 {
349 va_list ap;
350
351 printf("%s: ", device_xname(mpt->sc_dev));
352 va_start(ap, fmt);
353 vprintf(fmt, ap);
354 va_end(ap);
355 printf("\n");
356 }
357
358 static int
359 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
360 {
361
362 /* Timeouts are in msec, so we loop in 1000usec cycles */
363 while (count) {
364 mpt_intr(mpt);
365 if (xs->xs_status & XS_STS_DONE)
366 return (0);
367 delay(1000); /* only happens in boot, so ok */
368 count--;
369 }
370 return (1);
371 }
372
373 static void
374 mpt_timeout(void *arg)
375 {
376 request_t *req = arg;
377 struct scsipi_xfer *xs;
378 struct scsipi_periph *periph;
379 mpt_softc_t *mpt;
380 uint32_t oseq;
381 int s, nrepl = 0;
382
383 if (req->xfer == NULL) {
384 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
385 req->index, req->sequence);
386 return;
387 }
388 xs = req->xfer;
389 periph = xs->xs_periph;
390 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
391 scsipi_printaddr(periph);
392 printf("command timeout\n");
393
394 s = splbio();
395
396 oseq = req->sequence;
397 mpt->timeouts++;
398 if (mpt_intr(mpt)) {
399 if (req->sequence != oseq) {
400 mpt->success++;
401 mpt_prt(mpt, "recovered from command timeout");
402 splx(s);
403 return;
404 }
405 }
406
407 /*
408 * Ensure the IOC is really done giving us data since it appears it can
409 * sometimes fail to give us interrupts under heavy load.
410 */
411 nrepl = mpt_drain_queue(mpt);
412 if (nrepl ) {
413 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
414 }
415
416 if (req->sequence != oseq) {
417 mpt->success++;
418 splx(s);
419 return;
420 }
421
422 mpt_prt(mpt,
423 "timeout on request index = 0x%x, seq = 0x%08x",
424 req->index, req->sequence);
425 mpt_check_doorbell(mpt);
426 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
427 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
428 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
429 mpt_read(mpt, MPT_OFFSET_DOORBELL));
430 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
431 if (mpt->verbose > 1)
432 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
433
434 xs->error = XS_TIMEOUT;
435 splx(s);
436 mpt_restart(mpt, req);
437 }
438
439 static void
440 mpt_restart(mpt_softc_t *mpt, request_t *req0)
441 {
442 int i, s, nreq;
443 request_t *req;
444 struct scsipi_xfer *xs;
445
446 /* first, reset the IOC, leaving stopped so all requests are idle */
447 if (mpt_soft_reset(mpt) != MPT_OK) {
448 mpt_prt(mpt, "soft reset failed");
449 /*
450 * Don't try a hard reset since this mangles the PCI
451 * configuration registers.
452 */
453 return;
454 }
455
456 /* Freeze the channel so scsipi doesn't queue more commands. */
457 scsipi_channel_freeze(&mpt->sc_channel, 1);
458
459 /* Return all pending requests to scsipi and de-allocate them. */
460 s = splbio();
461 nreq = 0;
462 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
463 req = &mpt->request_pool[i];
464 xs = req->xfer;
465 if (xs != NULL) {
466 if (xs->datalen != 0)
467 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
468 req->xfer = NULL;
469 callout_stop(&xs->xs_callout);
470 if (req != req0) {
471 nreq++;
472 xs->error = XS_REQUEUE;
473 }
474 scsipi_done(xs);
475 /*
476 * Don't need to mpt_free_request() since mpt_init()
477 * below will free all requests anyway.
478 */
479 mpt_free_request(mpt, req);
480 }
481 }
482 splx(s);
483 if (nreq > 0)
484 mpt_prt(mpt, "re-queued %d requests", nreq);
485
486 /* Re-initialize the IOC (which restarts it). */
487 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
488 mpt_prt(mpt, "restart succeeded");
489 /* else error message already printed */
490
491 /* Thaw the channel, causing scsipi to re-queue the commands. */
492 scsipi_channel_thaw(&mpt->sc_channel, 1);
493 }
494
495 static int
496 mpt_drain_queue(mpt_softc_t *mpt)
497 {
498 int nrepl = 0;
499 uint32_t reply;
500
501 reply = mpt_pop_reply_queue(mpt);
502 while (reply != MPT_REPLY_EMPTY) {
503 nrepl++;
504 if (mpt->verbose > 1) {
505 if ((reply & MPT_CONTEXT_REPLY) != 0) {
506 /* Address reply; IOC has something to say */
507 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
508 } else {
509 /* Context reply; all went well */
510 mpt_prt(mpt, "context %u reply OK", reply);
511 }
512 }
513 mpt_done(mpt, reply);
514 reply = mpt_pop_reply_queue(mpt);
515 }
516 return (nrepl);
517 }
518
519 static void
520 mpt_done(mpt_softc_t *mpt, uint32_t reply)
521 {
522 struct scsipi_xfer *xs = NULL;
523 struct scsipi_periph *periph;
524 int index;
525 request_t *req;
526 MSG_REQUEST_HEADER *mpt_req;
527 MSG_SCSI_IO_REPLY *mpt_reply;
528 int restart = 0; /* nonzero if we need to restart the IOC*/
529
530 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
531 /* context reply (ok) */
532 mpt_reply = NULL;
533 index = reply & MPT_CONTEXT_MASK;
534 } else {
535 /* address reply (error) */
536
537 /* XXX BUS_DMASYNC_POSTREAD XXX */
538 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
539 if (mpt_reply != NULL) {
540 if (mpt->verbose > 1) {
541 uint32_t *pReply = (uint32_t *) mpt_reply;
542
543 mpt_prt(mpt, "Address Reply (index %u):",
544 le32toh(mpt_reply->MsgContext) & 0xffff);
545 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
546 pReply[1], pReply[2], pReply[3]);
547 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
548 pReply[5], pReply[6], pReply[7]);
549 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
550 pReply[9], pReply[10], pReply[11]);
551 }
552 index = le32toh(mpt_reply->MsgContext);
553 } else
554 index = reply & MPT_CONTEXT_MASK;
555 }
556
557 /*
558 * Address reply with MessageContext high bit set.
559 * This is most likely a notify message, so we try
560 * to process it, then free it.
561 */
562 if (__predict_false((index & 0x80000000) != 0)) {
563 if (mpt_reply != NULL)
564 mpt_ctlop(mpt, mpt_reply, reply);
565 else
566 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
567 index);
568 return;
569 }
570
571 /* Did we end up with a valid index into the table? */
572 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
573 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
574 index);
575 return;
576 }
577
578 req = &mpt->request_pool[index];
579
580 /* Make sure memory hasn't been trashed. */
581 if (__predict_false(req->index != index)) {
582 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
583 index);
584 return;
585 }
586
587 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
588 mpt_req = req->req_vbuf;
589
590 /* Short cut for task management replies; nothing more for us to do. */
591 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
592 if (mpt->verbose > 1)
593 mpt_prt(mpt, "%s: TASK MGMT", __func__);
594 KASSERT(req == mpt->mngt_req);
595 mpt->mngt_req = NULL;
596 goto done;
597 }
598
599 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
600 goto done;
601
602 /*
603 * At this point, it had better be a SCSI I/O command, but don't
604 * crash if it isn't.
605 */
606 if (__predict_false(mpt_req->Function !=
607 MPI_FUNCTION_SCSI_IO_REQUEST)) {
608 if (mpt->verbose > 1)
609 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
610 __func__, mpt_req->Function, index);
611 goto done;
612 }
613
614 /* Recover scsipi_xfer from the request structure. */
615 xs = req->xfer;
616
617 /* Can't have a SCSI command without a scsipi_xfer. */
618 if (__predict_false(xs == NULL)) {
619 mpt_prt(mpt,
620 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
621 req->index, req->sequence);
622 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
623 mpt_prt(mpt, "mpt_request:");
624 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
625
626 if (mpt_reply != NULL) {
627 mpt_prt(mpt, "mpt_reply:");
628 mpt_print_reply(mpt_reply);
629 } else {
630 mpt_prt(mpt, "context reply: 0x%08x", reply);
631 }
632 goto done;
633 }
634
635 callout_stop(&xs->xs_callout);
636
637 periph = xs->xs_periph;
638
639 /*
640 * If we were a data transfer, unload the map that described
641 * the data buffer.
642 */
643 if (__predict_true(xs->datalen != 0)) {
644 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
645 req->dmap->dm_mapsize,
646 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
647 : BUS_DMASYNC_POSTWRITE);
648 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
649 }
650
651 if (__predict_true(mpt_reply == NULL)) {
652 /*
653 * Context reply; report that the command was
654 * successful!
655 *
656 * Also report the xfer mode, if necessary.
657 */
658 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
659 if ((mpt->mpt_report_xfer_mode &
660 (1 << periph->periph_target)) != 0)
661 mpt_get_xfer_mode(mpt, periph);
662 }
663 xs->error = XS_NOERROR;
664 xs->status = SCSI_OK;
665 xs->resid = 0;
666 mpt_free_request(mpt, req);
667 scsipi_done(xs);
668 return;
669 }
670
671 xs->status = mpt_reply->SCSIStatus;
672 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
673 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
674 xs->error = XS_DRIVER_STUFFUP;
675 mpt_prt(mpt, "%s: IOC overrun!", __func__);
676 break;
677
678 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
679 /*
680 * Yikes! Tagged queue full comes through this path!
681 *
682 * So we'll change it to a status error and anything
683 * that returns status should probably be a status
684 * error as well.
685 */
686 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
687 if (mpt_reply->SCSIState &
688 MPI_SCSI_STATE_NO_SCSI_STATUS) {
689 xs->error = XS_DRIVER_STUFFUP;
690 break;
691 }
692 /* FALLTHROUGH */
693 case MPI_IOCSTATUS_SUCCESS:
694 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
695 switch (xs->status) {
696 case SCSI_OK:
697 /* Report the xfer mode, if necessary. */
698 if ((mpt->mpt_report_xfer_mode &
699 (1 << periph->periph_target)) != 0)
700 mpt_get_xfer_mode(mpt, periph);
701 xs->resid = 0;
702 break;
703
704 case SCSI_CHECK:
705 xs->error = XS_SENSE;
706 break;
707
708 case SCSI_BUSY:
709 case SCSI_QUEUE_FULL:
710 xs->error = XS_BUSY;
711 break;
712
713 default:
714 scsipi_printaddr(periph);
715 printf("invalid status code %d\n", xs->status);
716 xs->error = XS_DRIVER_STUFFUP;
717 break;
718 }
719 break;
720
721 case MPI_IOCSTATUS_BUSY:
722 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
723 xs->error = XS_RESOURCE_SHORTAGE;
724 break;
725
726 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
727 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
728 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
729 xs->error = XS_SELTIMEOUT;
730 break;
731
732 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
733 xs->error = XS_DRIVER_STUFFUP;
734 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
735 restart = 1;
736 break;
737
738 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
739 /* XXX What should we do here? */
740 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
741 restart = 1;
742 break;
743
744 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
745 /* XXX */
746 xs->error = XS_DRIVER_STUFFUP;
747 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
748 restart = 1;
749 break;
750
751 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
752 /* XXX */
753 xs->error = XS_DRIVER_STUFFUP;
754 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
755 restart = 1;
756 break;
757
758 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
759 /* XXX This is a bus-reset */
760 xs->error = XS_DRIVER_STUFFUP;
761 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
762 restart = 1;
763 break;
764
765 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
766 /*
767 * FreeBSD and Linux indicate this is a phase error between
768 * the IOC and the drive itself. When this happens, the IOC
769 * becomes unhappy and stops processing all transactions.
770 * Call mpt_timeout which knows how to get the IOC back
771 * on its feet.
772 */
773 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
774 "recovering...", __func__);
775 xs->error = XS_TIMEOUT;
776 restart = 1;
777
778 break;
779
780 default:
781 /* XXX unrecognized HBA error */
782 xs->error = XS_DRIVER_STUFFUP;
783 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
784 le16toh(mpt_reply->IOCStatus));
785 restart = 1;
786 break;
787 }
788
789 if (mpt_reply != NULL) {
790 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
791 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
792 sizeof(xs->sense.scsi_sense));
793 } else if (mpt_reply->SCSIState &
794 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
795 /*
796 * This will cause the scsipi layer to issue
797 * a REQUEST SENSE.
798 */
799 if (xs->status == SCSI_CHECK)
800 xs->error = XS_BUSY;
801 }
802 }
803
804 done:
805 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
806 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
807 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
808 mpt_ctlop(mpt, mpt_reply, reply);
809 }
810
811 /* If IOC done with this request, free it up. */
812 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
813 mpt_free_request(mpt, req);
814
815 /* If address reply, give the buffer back to the IOC. */
816 if (mpt_reply != NULL)
817 mpt_free_reply(mpt, (reply << 1));
818
819 if (xs != NULL)
820 scsipi_done(xs);
821
822 if (restart) {
823 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
824 mpt_restart(mpt, NULL);
825 }
826 }
827
828 static void
829 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
830 {
831 struct scsipi_periph *periph = xs->xs_periph;
832 request_t *req;
833 MSG_SCSI_IO_REQUEST *mpt_req;
834 int error, s;
835
836 s = splbio();
837 req = mpt_get_request(mpt);
838 if (__predict_false(req == NULL)) {
839 /* This should happen very infrequently. */
840 xs->error = XS_RESOURCE_SHORTAGE;
841 scsipi_done(xs);
842 splx(s);
843 return;
844 }
845 splx(s);
846
847 /* Link the req and the scsipi_xfer. */
848 req->xfer = xs;
849
850 /* Now we build the command for the IOC */
851 mpt_req = req->req_vbuf;
852 memset(mpt_req, 0, sizeof(*mpt_req));
853
854 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
855 mpt_req->Bus = mpt->bus;
856
857 mpt_req->SenseBufferLength =
858 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
859 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
860
861 /*
862 * We use the message context to find the request structure when
863 * we get the command completion interrupt from the IOC.
864 */
865 mpt_req->MsgContext = htole32(req->index);
866
867 /* Which physical device to do the I/O on. */
868 mpt_req->TargetID = periph->periph_target;
869 mpt_req->LUN[1] = periph->periph_lun;
870
871 /* Set the direction of the transfer. */
872 if (xs->xs_control & XS_CTL_DATA_IN)
873 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
874 else if (xs->xs_control & XS_CTL_DATA_OUT)
875 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
876 else
877 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
878
879 /* Set the queue behavior. */
880 if (__predict_true((!mpt->is_scsi) ||
881 (mpt->mpt_tag_enable &
882 (1 << periph->periph_target)))) {
883 switch (XS_CTL_TAGTYPE(xs)) {
884 case XS_CTL_HEAD_TAG:
885 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
886 break;
887
888 #if 0 /* XXX */
889 case XS_CTL_ACA_TAG:
890 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
891 break;
892 #endif
893
894 case XS_CTL_ORDERED_TAG:
895 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
896 break;
897
898 case XS_CTL_SIMPLE_TAG:
899 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
900 break;
901
902 default:
903 if (mpt->is_scsi)
904 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
905 else
906 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
907 break;
908 }
909 } else
910 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
911
912 if (__predict_false(mpt->is_scsi &&
913 (mpt->mpt_disc_enable &
914 (1 << periph->periph_target)) == 0))
915 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
916
917 mpt_req->Control = htole32(mpt_req->Control);
918
919 /* Copy the SCSI command block into place. */
920 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
921
922 mpt_req->CDBLength = xs->cmdlen;
923 mpt_req->DataLength = htole32(xs->datalen);
924 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
925
926 /*
927 * Map the DMA transfer.
928 */
929 if (xs->datalen) {
930 SGE_SIMPLE32 *se;
931
932 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
933 xs->datalen, NULL,
934 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
935 : BUS_DMA_WAITOK) |
936 BUS_DMA_STREAMING |
937 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
938 : BUS_DMA_WRITE));
939 switch (error) {
940 case 0:
941 break;
942
943 case ENOMEM:
944 case EAGAIN:
945 xs->error = XS_RESOURCE_SHORTAGE;
946 goto out_bad;
947
948 default:
949 xs->error = XS_DRIVER_STUFFUP;
950 mpt_prt(mpt, "error %d loading DMA map", error);
951 out_bad:
952 s = splbio();
953 mpt_free_request(mpt, req);
954 scsipi_done(xs);
955 splx(s);
956 return;
957 }
958
959 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
960 int seg, i, nleft = req->dmap->dm_nsegs;
961 uint32_t flags;
962 SGE_CHAIN32 *ce;
963
964 seg = 0;
965 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
966 if (xs->xs_control & XS_CTL_DATA_OUT)
967 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
968
969 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
970 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
971 i++, se++, seg++) {
972 uint32_t tf;
973
974 memset(se, 0, sizeof(*se));
975 se->Address =
976 htole32(req->dmap->dm_segs[seg].ds_addr);
977 MPI_pSGE_SET_LENGTH(se,
978 req->dmap->dm_segs[seg].ds_len);
979 tf = flags;
980 if (i == MPT_NSGL_FIRST(mpt) - 2)
981 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
982 MPI_pSGE_SET_FLAGS(se, tf);
983 se->FlagsLength = htole32(se->FlagsLength);
984 nleft--;
985 }
986
987 /*
988 * Tell the IOC where to find the first chain element.
989 */
990 mpt_req->ChainOffset =
991 ((char *)se - (char *)mpt_req) >> 2;
992
993 /*
994 * Until we're finished with all segments...
995 */
996 while (nleft) {
997 int ntodo;
998
999 /*
1000 * Construct the chain element that points to
1001 * the next segment.
1002 */
1003 ce = (SGE_CHAIN32 *) se++;
1004 if (nleft > MPT_NSGL(mpt)) {
1005 ntodo = MPT_NSGL(mpt) - 1;
1006 ce->NextChainOffset = (MPT_RQSL(mpt) -
1007 sizeof(SGE_SIMPLE32)) >> 2;
1008 ce->Length = htole16(MPT_NSGL(mpt)
1009 * sizeof(SGE_SIMPLE32));
1010 } else {
1011 ntodo = nleft;
1012 ce->NextChainOffset = 0;
1013 ce->Length = htole16(ntodo
1014 * sizeof(SGE_SIMPLE32));
1015 }
1016 ce->Address = htole32(req->req_pbuf +
1017 ((char *)se - (char *)mpt_req));
1018 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1019 for (i = 0; i < ntodo; i++, se++, seg++) {
1020 uint32_t tf;
1021
1022 memset(se, 0, sizeof(*se));
1023 se->Address = htole32(
1024 req->dmap->dm_segs[seg].ds_addr);
1025 MPI_pSGE_SET_LENGTH(se,
1026 req->dmap->dm_segs[seg].ds_len);
1027 tf = flags;
1028 if (i == ntodo - 1) {
1029 tf |=
1030 MPI_SGE_FLAGS_LAST_ELEMENT;
1031 if (ce->NextChainOffset == 0) {
1032 tf |=
1033 MPI_SGE_FLAGS_END_OF_LIST |
1034 MPI_SGE_FLAGS_END_OF_BUFFER;
1035 }
1036 }
1037 MPI_pSGE_SET_FLAGS(se, tf);
1038 se->FlagsLength =
1039 htole32(se->FlagsLength);
1040 nleft--;
1041 }
1042 }
1043 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1044 req->dmap->dm_mapsize,
1045 (xs->xs_control & XS_CTL_DATA_IN) ?
1046 BUS_DMASYNC_PREREAD
1047 : BUS_DMASYNC_PREWRITE);
1048 } else {
1049 int i;
1050 uint32_t flags;
1051
1052 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1053 if (xs->xs_control & XS_CTL_DATA_OUT)
1054 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1055
1056 /* Copy the segments into our SG list. */
1057 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1058 for (i = 0; i < req->dmap->dm_nsegs;
1059 i++, se++) {
1060 uint32_t tf;
1061
1062 memset(se, 0, sizeof(*se));
1063 se->Address =
1064 htole32(req->dmap->dm_segs[i].ds_addr);
1065 MPI_pSGE_SET_LENGTH(se,
1066 req->dmap->dm_segs[i].ds_len);
1067 tf = flags;
1068 if (i == req->dmap->dm_nsegs - 1) {
1069 tf |=
1070 MPI_SGE_FLAGS_LAST_ELEMENT |
1071 MPI_SGE_FLAGS_END_OF_BUFFER |
1072 MPI_SGE_FLAGS_END_OF_LIST;
1073 }
1074 MPI_pSGE_SET_FLAGS(se, tf);
1075 se->FlagsLength = htole32(se->FlagsLength);
1076 }
1077 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1078 req->dmap->dm_mapsize,
1079 (xs->xs_control & XS_CTL_DATA_IN) ?
1080 BUS_DMASYNC_PREREAD
1081 : BUS_DMASYNC_PREWRITE);
1082 }
1083 } else {
1084 /*
1085 * No data to transfer; just make a single simple SGL
1086 * with zero length.
1087 */
1088 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1089 memset(se, 0, sizeof(*se));
1090 MPI_pSGE_SET_FLAGS(se,
1091 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1092 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1093 se->FlagsLength = htole32(se->FlagsLength);
1094 }
1095
1096 if (mpt->verbose > 1)
1097 mpt_print_scsi_io_request(mpt_req);
1098
1099 if (xs->timeout == 0) {
1100 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1101 req->index);
1102 xs->timeout = 500;
1103 }
1104
1105 s = splbio();
1106 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1107 callout_reset(&xs->xs_callout,
1108 mstohz(xs->timeout), mpt_timeout, req);
1109 mpt_send_cmd(mpt, req);
1110 splx(s);
1111
1112 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1113 return;
1114
1115 /*
1116 * If we can't use interrupts, poll on completion.
1117 */
1118 if (mpt_poll(mpt, xs, xs->timeout))
1119 mpt_timeout(req);
1120 }
1121
1122 static void
1123 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1124 {
1125 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1126
1127 /*
1128 * Always allow disconnect; we don't have a way to disable
1129 * it right now, in any case.
1130 */
1131 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1132
1133 if (xm->xm_mode & PERIPH_CAP_TQING)
1134 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1135 else
1136 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1137
1138 if (mpt->is_scsi) {
1139 /*
1140 * SCSI transport settings only make any sense for
1141 * SCSI
1142 */
1143
1144 tmp = mpt->mpt_dev_page1[xm->xm_target];
1145
1146 /*
1147 * Set the wide/narrow parameter for the target.
1148 */
1149 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1150 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1151 else
1152 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1153
1154 /*
1155 * Set the synchronous parameters for the target.
1156 *
1157 * XXX If we request sync transfers, we just go ahead and
1158 * XXX request the maximum available. We need finer control
1159 * XXX in order to implement Domain Validation.
1160 */
1161 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1162 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1163 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1164 MPI_SCSIDEVPAGE1_RP_IU);
1165 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1166 int factor, offset, np;
1167
1168 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1169 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1170 np = 0;
1171 if (factor < 0x9) {
1172 /* Ultra320 */
1173 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1174 }
1175 if (factor < 0xa) {
1176 /* at least Ultra160 */
1177 np |= MPI_SCSIDEVPAGE1_RP_DT;
1178 }
1179 np |= (factor << 8) | (offset << 16);
1180 tmp.RequestedParameters |= np;
1181 }
1182
1183 host2mpt_config_page_scsi_device_1(&tmp);
1184 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1185 mpt_prt(mpt, "unable to write Device Page 1");
1186 return;
1187 }
1188
1189 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1190 mpt_prt(mpt, "unable to read back Device Page 1");
1191 return;
1192 }
1193
1194 mpt2host_config_page_scsi_device_1(&tmp);
1195 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1196 if (mpt->verbose > 1) {
1197 mpt_prt(mpt,
1198 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1199 xm->xm_target,
1200 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1201 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1202 }
1203 }
1204
1205 /*
1206 * Make a note that we should perform an async callback at the
1207 * end of the next successful command completion to report the
1208 * negotiated transfer mode.
1209 */
1210 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1211 }
1212
1213 static void
1214 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1215 {
1216 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1217 struct scsipi_xfer_mode xm;
1218 int period, offset;
1219
1220 tmp = mpt->mpt_dev_page0[periph->periph_target];
1221 host2mpt_config_page_scsi_device_0(&tmp);
1222 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1223 mpt_prt(mpt, "unable to read Device Page 0");
1224 return;
1225 }
1226 mpt2host_config_page_scsi_device_0(&tmp);
1227
1228 if (mpt->verbose > 1) {
1229 mpt_prt(mpt,
1230 "SPI Tgt %d Page 0: NParms %x Information %x",
1231 periph->periph_target,
1232 tmp.NegotiatedParameters, tmp.Information);
1233 }
1234
1235 xm.xm_target = periph->periph_target;
1236 xm.xm_mode = 0;
1237
1238 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1239 xm.xm_mode |= PERIPH_CAP_WIDE16;
1240
1241 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1242 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1243 if (offset) {
1244 xm.xm_period = period;
1245 xm.xm_offset = offset;
1246 xm.xm_mode |= PERIPH_CAP_SYNC;
1247 }
1248
1249 /*
1250 * Tagged queueing is all controlled by us; there is no
1251 * other setting to query.
1252 */
1253 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1254 xm.xm_mode |= PERIPH_CAP_TQING;
1255
1256 /*
1257 * We're going to deliver the async event, so clear the marker.
1258 */
1259 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1260
1261 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1262 }
1263
1264 static void
1265 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1266 {
1267 MSG_DEFAULT_REPLY *dmsg = vmsg;
1268
1269 switch (dmsg->Function) {
1270 case MPI_FUNCTION_EVENT_NOTIFICATION:
1271 mpt_event_notify_reply(mpt, vmsg);
1272 mpt_free_reply(mpt, (reply << 1));
1273 break;
1274
1275 case MPI_FUNCTION_EVENT_ACK:
1276 {
1277 MSG_EVENT_ACK_REPLY *msg = vmsg;
1278 int index = le32toh(msg->MsgContext) & ~0x80000000;
1279 mpt_free_reply(mpt, (reply << 1));
1280 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1281 request_t *req = &mpt->request_pool[index];
1282 mpt_free_request(mpt, req);
1283 }
1284 break;
1285 }
1286
1287 case MPI_FUNCTION_PORT_ENABLE:
1288 {
1289 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1290 int index = le32toh(msg->MsgContext) & ~0x80000000;
1291 if (mpt->verbose > 1)
1292 mpt_prt(mpt, "enable port reply index %d", index);
1293 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1294 request_t *req = &mpt->request_pool[index];
1295 req->debug = REQ_DONE;
1296 }
1297 mpt_free_reply(mpt, (reply << 1));
1298 break;
1299 }
1300
1301 case MPI_FUNCTION_CONFIG:
1302 {
1303 MSG_CONFIG_REPLY *msg = vmsg;
1304 int index = le32toh(msg->MsgContext) & ~0x80000000;
1305 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1306 request_t *req = &mpt->request_pool[index];
1307 req->debug = REQ_DONE;
1308 req->sequence = reply;
1309 } else
1310 mpt_free_reply(mpt, (reply << 1));
1311 break;
1312 }
1313
1314 default:
1315 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1316 }
1317 }
1318
1319 static void
1320 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1321 {
1322
1323 switch (le32toh(msg->Event)) {
1324 case MPI_EVENT_LOG_DATA:
1325 {
1326 int i;
1327
1328 /* Some error occurrerd that the Fusion wants logged. */
1329 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1330 mpt_prt(mpt, "EvtLogData: Event Data:");
1331 for (i = 0; i < msg->EventDataLength; i++) {
1332 if ((i % 4) == 0)
1333 printf("%s:\t", device_xname(mpt->sc_dev));
1334 printf("0x%08x%c", msg->Data[i],
1335 ((i % 4) == 3) ? '\n' : ' ');
1336 }
1337 if ((i % 4) != 0)
1338 printf("\n");
1339 break;
1340 }
1341
1342 case MPI_EVENT_UNIT_ATTENTION:
1343 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1344 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1345 break;
1346
1347 case MPI_EVENT_IOC_BUS_RESET:
1348 /* We generated a bus reset. */
1349 mpt_prt(mpt, "IOC Bus Reset Port %d",
1350 (msg->Data[0] >> 8) & 0xff);
1351 break;
1352
1353 case MPI_EVENT_EXT_BUS_RESET:
1354 /* Someone else generated a bus reset. */
1355 mpt_prt(mpt, "External Bus Reset");
1356 /*
1357 * These replies don't return EventData like the MPI
1358 * spec says they do.
1359 */
1360 /* XXX Send an async event? */
1361 break;
1362
1363 case MPI_EVENT_RESCAN:
1364 /*
1365 * In general, thise means a device has been added
1366 * to the loop.
1367 */
1368 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1369 /* XXX Send an async event? */
1370 break;
1371
1372 case MPI_EVENT_LINK_STATUS_CHANGE:
1373 mpt_prt(mpt, "Port %d: Link state %s",
1374 (msg->Data[1] >> 8) & 0xff,
1375 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1376 break;
1377
1378 case MPI_EVENT_LOOP_STATE_CHANGE:
1379 switch ((msg->Data[0] >> 16) & 0xff) {
1380 case 0x01:
1381 mpt_prt(mpt,
1382 "Port %d: FC Link Event: LIP(%02x,%02x) "
1383 "(Loop Initialization)",
1384 (msg->Data[1] >> 8) & 0xff,
1385 (msg->Data[0] >> 8) & 0xff,
1386 (msg->Data[0] ) & 0xff);
1387 switch ((msg->Data[0] >> 8) & 0xff) {
1388 case 0xf7:
1389 if ((msg->Data[0] & 0xff) == 0xf7)
1390 mpt_prt(mpt, "\tDevice needs AL_PA");
1391 else
1392 mpt_prt(mpt, "\tDevice %02x doesn't "
1393 "like FC performance",
1394 msg->Data[0] & 0xff);
1395 break;
1396
1397 case 0xf8:
1398 if ((msg->Data[0] & 0xff) == 0xf7)
1399 mpt_prt(mpt, "\tDevice detected loop "
1400 "failure before acquiring AL_PA");
1401 else
1402 mpt_prt(mpt, "\tDevice %02x detected "
1403 "loop failure",
1404 msg->Data[0] & 0xff);
1405 break;
1406
1407 default:
1408 mpt_prt(mpt, "\tDevice %02x requests that "
1409 "device %02x reset itself",
1410 msg->Data[0] & 0xff,
1411 (msg->Data[0] >> 8) & 0xff);
1412 break;
1413 }
1414 break;
1415
1416 case 0x02:
1417 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1418 "(Loop Port Enable)",
1419 (msg->Data[1] >> 8) & 0xff,
1420 (msg->Data[0] >> 8) & 0xff,
1421 (msg->Data[0] ) & 0xff);
1422 break;
1423
1424 case 0x03:
1425 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1426 "(Loop Port Bypass)",
1427 (msg->Data[1] >> 8) & 0xff,
1428 (msg->Data[0] >> 8) & 0xff,
1429 (msg->Data[0] ) & 0xff);
1430 break;
1431
1432 default:
1433 mpt_prt(mpt, "Port %d: FC Link Event: "
1434 "Unknown event (%02x %02x %02x)",
1435 (msg->Data[1] >> 8) & 0xff,
1436 (msg->Data[0] >> 16) & 0xff,
1437 (msg->Data[0] >> 8) & 0xff,
1438 (msg->Data[0] ) & 0xff);
1439 break;
1440 }
1441 break;
1442
1443 case MPI_EVENT_LOGOUT:
1444 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1445 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1446 break;
1447
1448 case MPI_EVENT_EVENT_CHANGE:
1449 /*
1450 * This is just an acknowledgement of our
1451 * mpt_send_event_request().
1452 */
1453 break;
1454
1455 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1456 switch ((msg->Data[0] >> 12) & 0x0f) {
1457 case 0x00:
1458 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1459 msg->Data[0] & 0xff);
1460 break;
1461 case 0x01:
1462 mpt_prt(mpt, "Phy %d: Link Disabled",
1463 msg->Data[0] & 0xff);
1464 break;
1465 case 0x02:
1466 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1467 msg->Data[0] & 0xff);
1468 break;
1469 case 0x03:
1470 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1471 msg->Data[0] & 0xff);
1472 break;
1473 case 0x08:
1474 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1475 msg->Data[0] & 0xff);
1476 break;
1477 case 0x09:
1478 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1479 msg->Data[0] & 0xff);
1480 break;
1481 default:
1482 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1483 "Unknown event (%0x)",
1484 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1485 }
1486 break;
1487
1488 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1489 case MPI_EVENT_SAS_DISCOVERY:
1490 /* ignore these events for now */
1491 break;
1492
1493 case MPI_EVENT_QUEUE_FULL:
1494 /* This can get a little chatty */
1495 if (mpt->verbose > 0)
1496 mpt_prt(mpt, "Queue Full Event");
1497 break;
1498
1499 default:
1500 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1501 break;
1502 }
1503
1504 if (msg->AckRequired) {
1505 MSG_EVENT_ACK *ackp;
1506 request_t *req;
1507
1508 if ((req = mpt_get_request(mpt)) == NULL) {
1509 /* XXX XXX XXX XXXJRT */
1510 panic("mpt_event_notify_reply: unable to allocate "
1511 "request structure");
1512 }
1513
1514 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1515 memset(ackp, 0, sizeof(*ackp));
1516 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1517 ackp->Event = msg->Event;
1518 ackp->EventContext = msg->EventContext;
1519 ackp->MsgContext = htole32(req->index | 0x80000000);
1520 mpt_check_doorbell(mpt);
1521 mpt_send_cmd(mpt, req);
1522 }
1523 }
1524
1525 static void
1526 mpt_bus_reset(mpt_softc_t *mpt)
1527 {
1528 request_t *req;
1529 MSG_SCSI_TASK_MGMT *mngt_req;
1530 int s;
1531
1532 s = splbio();
1533 if (mpt->mngt_req) {
1534 /* request already queued; can't do more */
1535 splx(s);
1536 return;
1537 }
1538 req = mpt_get_request(mpt);
1539 if (__predict_false(req == NULL)) {
1540 mpt_prt(mpt, "no mngt request\n");
1541 splx(s);
1542 return;
1543 }
1544 mpt->mngt_req = req;
1545 splx(s);
1546 mngt_req = req->req_vbuf;
1547 memset(mngt_req, 0, sizeof(*mngt_req));
1548 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1549 mngt_req->Bus = mpt->bus;
1550 mngt_req->TargetID = 0;
1551 mngt_req->ChainOffset = 0;
1552 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1553 mngt_req->Reserved1 = 0;
1554 mngt_req->MsgFlags =
1555 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1556 mngt_req->MsgContext = req->index;
1557 mngt_req->TaskMsgContext = 0;
1558 s = splbio();
1559 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1560 splx(s);
1561 }
1562
1563 /*****************************************************************************
1564 * SCSI interface routines
1565 *****************************************************************************/
1566
1567 static void
1568 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1569 void *arg)
1570 {
1571 struct scsipi_adapter *adapt = chan->chan_adapter;
1572 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1573
1574 switch (req) {
1575 case ADAPTER_REQ_RUN_XFER:
1576 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1577 return;
1578
1579 case ADAPTER_REQ_GROW_RESOURCES:
1580 /* Not supported. */
1581 return;
1582
1583 case ADAPTER_REQ_SET_XFER_MODE:
1584 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1585 return;
1586 }
1587 }
1588
1589 static void
1590 mpt_minphys(struct buf *bp)
1591 {
1592 if (bp->b_bcount > MPT_MAX_XFER)
1593 bp->b_bcount = MPT_MAX_XFER;
1594 minphys(bp);
1595 }
1596
1597 static int
1598 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1599 int flag, struct proc *p)
1600 {
1601 mpt_softc_t *mpt;
1602 int s;
1603
1604 mpt = device_private(chan->chan_adapter->adapt_dev);
1605 switch (cmd) {
1606 case SCBUSIORESET:
1607 mpt_bus_reset(mpt);
1608 s = splbio();
1609 mpt_intr(mpt);
1610 splx(s);
1611 return(0);
1612 default:
1613 return (ENOTTY);
1614 }
1615 }
1616
1617 #if NBIO > 0
1618 static fCONFIG_PAGE_IOC_2 *
1619 mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1620 {
1621 fCONFIG_PAGE_HEADER hdr;
1622 fCONFIG_PAGE_IOC_2 *ioc2;
1623 int rv;
1624
1625 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1626 if (rv)
1627 return NULL;
1628
1629 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1630 if (ioc2 == NULL)
1631 return NULL;
1632
1633 memcpy(ioc2, &hdr, sizeof(hdr));
1634
1635 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1636 if (rv)
1637 goto fail;
1638 mpt2host_config_page_ioc_2(ioc2);
1639
1640 return ioc2;
1641
1642 fail:
1643 free(ioc2, M_DEVBUF);
1644 return NULL;
1645 }
1646
1647 static fCONFIG_PAGE_IOC_3 *
1648 mpt_get_cfg_page_ioc3(mpt_softc_t *mpt)
1649 {
1650 fCONFIG_PAGE_HEADER hdr;
1651 fCONFIG_PAGE_IOC_3 *ioc3;
1652 int rv;
1653
1654 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr);
1655 if (rv)
1656 return NULL;
1657
1658 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1659 if (ioc3 == NULL)
1660 return NULL;
1661
1662 memcpy(ioc3, &hdr, sizeof(hdr));
1663
1664 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header);
1665 if (rv)
1666 goto fail;
1667
1668 return ioc3;
1669
1670 fail:
1671 free(ioc3, M_DEVBUF);
1672 return NULL;
1673 }
1674
1675
1676 static fCONFIG_PAGE_RAID_VOL_0 *
1677 mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1678 {
1679 fCONFIG_PAGE_HEADER hdr;
1680 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1681 int rv;
1682
1683 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1684 address, &hdr);
1685 if (rv)
1686 return NULL;
1687
1688 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1689 if (rvol0 == NULL)
1690 return NULL;
1691
1692 memcpy(rvol0, &hdr, sizeof(hdr));
1693
1694 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1695 if (rv)
1696 goto fail;
1697 mpt2host_config_page_raid_vol_0(rvol0);
1698
1699 return rvol0;
1700
1701 fail:
1702 free(rvol0, M_DEVBUF);
1703 return NULL;
1704 }
1705
1706 static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
1707 mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1708 {
1709 fCONFIG_PAGE_HEADER hdr;
1710 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1711 int rv;
1712
1713 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1714 address, &hdr);
1715 if (rv)
1716 return NULL;
1717
1718 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1719 if (physdisk0 == NULL)
1720 return NULL;
1721
1722 memcpy(physdisk0, &hdr, sizeof(hdr));
1723
1724 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1725 if (rv)
1726 goto fail;
1727 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1728
1729 return physdisk0;
1730
1731 fail:
1732 free(physdisk0, M_DEVBUF);
1733 return NULL;
1734 }
1735
1736 static bool
1737 mpt_is_raid(mpt_softc_t *mpt)
1738 {
1739 fCONFIG_PAGE_IOC_2 *ioc2;
1740 bool is_raid = false;
1741
1742 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1743 if (ioc2 == NULL)
1744 return false;
1745
1746 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1747 is_raid = !!(ioc2->CapabilitiesFlags &
1748 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1749 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1750 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1751 }
1752
1753 free(ioc2, M_DEVBUF);
1754
1755 return is_raid;
1756 }
1757
1758 static int
1759 mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1760 {
1761 mpt_softc_t *mpt = device_private(dev);
1762 int error, s;
1763
1764 KERNEL_LOCK(1, curlwp);
1765 s = splbio();
1766
1767 switch (cmd) {
1768 case BIOCINQ:
1769 error = mpt_bio_ioctl_inq(mpt, addr);
1770 break;
1771 case BIOCVOL:
1772 error = mpt_bio_ioctl_vol(mpt, addr);
1773 break;
1774 case BIOCDISK_NOVOL:
1775 error = mpt_bio_ioctl_disk_novol(mpt, addr);
1776 break;
1777 case BIOCDISK:
1778 error = mpt_bio_ioctl_disk(mpt, addr);
1779 break;
1780 case BIOCSETSTATE:
1781 error = mpt_bio_ioctl_setstate(mpt, addr);
1782 break;
1783 default:
1784 error = EINVAL;
1785 break;
1786 }
1787
1788 splx(s);
1789 KERNEL_UNLOCK_ONE(curlwp);
1790
1791 return error;
1792 }
1793
1794 static int
1795 mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1796 {
1797 fCONFIG_PAGE_IOC_2 *ioc2;
1798 fCONFIG_PAGE_IOC_3 *ioc3;
1799
1800 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1801 if (ioc2 == NULL)
1802 return EIO;
1803 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1804 if (ioc3 == NULL) {
1805 free(ioc2, M_DEVBUF);
1806 return EIO;
1807 }
1808
1809 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev));
1810 bi->bi_novol = ioc2->NumActiveVolumes;
1811 bi->bi_nodisk = ioc3->NumPhysDisks;
1812
1813 free(ioc2, M_DEVBUF);
1814 free(ioc3, M_DEVBUF);
1815
1816 return 0;
1817 }
1818
1819 static int
1820 mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1821 {
1822 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1823 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1824 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1825 struct scsipi_periph *periph;
1826 struct scsipi_inquiry_data inqbuf;
1827 char vendor[9], product[17], revision[5];
1828 int address;
1829
1830 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1831 if (ioc2 == NULL)
1832 return EIO;
1833
1834 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1835 goto fail;
1836
1837 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1838 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1839
1840 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1841 if (rvol0 == NULL)
1842 goto fail;
1843
1844 bv->bv_dev[0] = '\0';
1845 bv->bv_vendor[0] = '\0';
1846
1847 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0);
1848 if (periph != NULL) {
1849 if (periph->periph_dev != NULL) {
1850 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1851 device_xname(periph->periph_dev));
1852 }
1853 memset(&inqbuf, 0, sizeof(inqbuf));
1854 if (scsipi_inquire(periph, &inqbuf,
1855 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) {
1856 strnvisx(vendor, sizeof(vendor),
1857 inqbuf.vendor, sizeof(inqbuf.vendor),
1858 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1859 strnvisx(product, sizeof(product),
1860 inqbuf.product, sizeof(inqbuf.product),
1861 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1862 strnvisx(revision, sizeof(revision),
1863 inqbuf.revision, sizeof(inqbuf.revision),
1864 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1865
1866 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor),
1867 "%s %s %s", vendor, product, revision);
1868 }
1869
1870 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1871 device_xname(periph->periph_dev));
1872 }
1873 bv->bv_nodisk = rvol0->NumPhysDisks;
1874 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1875 bv->bv_stripe_size = rvol0->StripeSize;
1876 bv->bv_percent = -1;
1877 bv->bv_seconds = 0;
1878
1879 switch (rvol0->VolumeStatus.State) {
1880 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1881 bv->bv_status = BIOC_SVONLINE;
1882 break;
1883 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1884 bv->bv_status = BIOC_SVDEGRADED;
1885 break;
1886 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1887 bv->bv_status = BIOC_SVOFFLINE;
1888 break;
1889 default:
1890 bv->bv_status = BIOC_SVINVALID;
1891 break;
1892 }
1893
1894 switch (ioc2rvol->VolumeType) {
1895 case MPI_RAID_VOL_TYPE_IS:
1896 bv->bv_level = 0;
1897 break;
1898 case MPI_RAID_VOL_TYPE_IME:
1899 case MPI_RAID_VOL_TYPE_IM:
1900 bv->bv_level = 1;
1901 break;
1902 default:
1903 bv->bv_level = -1;
1904 break;
1905 }
1906
1907 free(ioc2, M_DEVBUF);
1908 free(rvol0, M_DEVBUF);
1909
1910 return 0;
1911
1912 fail:
1913 if (ioc2) free(ioc2, M_DEVBUF);
1914 if (rvol0) free(rvol0, M_DEVBUF);
1915 return EINVAL;
1916 }
1917
1918 static void
1919 mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd,
1920 int address)
1921 {
1922 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1923 char vendor_id[9], product_id[17], product_rev_level[5];
1924
1925 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1926 if (phys == NULL)
1927 return;
1928
1929 strnvisx(vendor_id, sizeof(vendor_id),
1930 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID),
1931 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1932 strnvisx(product_id, sizeof(product_id),
1933 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID),
1934 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1935 strnvisx(product_rev_level, sizeof(product_rev_level),
1936 phys->InquiryData.ProductRevLevel,
1937 sizeof(phys->InquiryData.ProductRevLevel),
1938 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1939
1940 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s",
1941 vendor_id, product_id, product_rev_level);
1942 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1943 bd->bd_procdev[0] = '\0';
1944 bd->bd_channel = phys->PhysDiskBus;
1945 bd->bd_target = phys->PhysDiskID;
1946 bd->bd_lun = 0;
1947 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1948
1949 switch (phys->PhysDiskStatus.State) {
1950 case MPI_PHYSDISK0_STATUS_ONLINE:
1951 bd->bd_status = BIOC_SDONLINE;
1952 break;
1953 case MPI_PHYSDISK0_STATUS_MISSING:
1954 case MPI_PHYSDISK0_STATUS_FAILED:
1955 bd->bd_status = BIOC_SDFAILED;
1956 break;
1957 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1958 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1959 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1960 bd->bd_status = BIOC_SDOFFLINE;
1961 break;
1962 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1963 bd->bd_status = BIOC_SDSCRUB;
1964 break;
1965 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1966 default:
1967 bd->bd_status = BIOC_SDINVALID;
1968 break;
1969 }
1970
1971 free(phys, M_DEVBUF);
1972 }
1973
1974 static int
1975 mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd)
1976 {
1977 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1978 fCONFIG_PAGE_IOC_3 *ioc3 = NULL;
1979 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1980 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1981 int address, v, d;
1982
1983 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1984 if (ioc2 == NULL)
1985 return EIO;
1986 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1987 if (ioc3 == NULL) {
1988 free(ioc2, M_DEVBUF);
1989 return EIO;
1990 }
1991
1992 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks)
1993 goto fail;
1994
1995 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum;
1996
1997 mpt_bio_ioctl_disk_common(mpt, bd, address);
1998
1999 bd->bd_disknovol = true;
2000 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) {
2001 ioc2rvol = &ioc2->RaidVolume[v];
2002 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2003
2004 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2005 if (rvol0 == NULL)
2006 continue;
2007
2008 for (d = 0; d < rvol0->NumPhysDisks; d++) {
2009 if (rvol0->PhysDisk[d].PhysDiskNum ==
2010 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) {
2011 bd->bd_disknovol = false;
2012 bd->bd_volid = v;
2013 break;
2014 }
2015 }
2016 free(rvol0, M_DEVBUF);
2017 }
2018
2019 free(ioc3, M_DEVBUF);
2020 free(ioc2, M_DEVBUF);
2021
2022 return 0;
2023
2024 fail:
2025 if (ioc3) free(ioc3, M_DEVBUF);
2026 if (ioc2) free(ioc2, M_DEVBUF);
2027 return EINVAL;
2028 }
2029
2030
2031 static int
2032 mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
2033 {
2034 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
2035 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
2036 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
2037 int address;
2038
2039 ioc2 = mpt_get_cfg_page_ioc2(mpt);
2040 if (ioc2 == NULL)
2041 return EIO;
2042
2043 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
2044 goto fail;
2045
2046 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
2047 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2048
2049 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2050 if (rvol0 == NULL)
2051 goto fail;
2052
2053 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
2054 goto fail;
2055
2056 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
2057
2058 mpt_bio_ioctl_disk_common(mpt, bd, address);
2059
2060 free(ioc2, M_DEVBUF);
2061
2062 return 0;
2063
2064 fail:
2065 if (ioc2) free(ioc2, M_DEVBUF);
2066 return EINVAL;
2067 }
2068
2069 static int
2070 mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs)
2071 {
2072 return ENOTTY;
2073 }
2074 #endif
2075
2076