mpt_netbsd.c revision 1.26 1 /* $NetBSD: mpt_netbsd.c,v 1.26 2014/09/27 16:14:16 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.26 2014/09/27 16:14:16 jmcneill Exp $");
81
82 #include "bio.h"
83
84 #include <dev/ic/mpt.h> /* pulls in all headers */
85 #include <sys/scsiio.h>
86
87 #if NBIO > 0
88 #include <dev/biovar.h>
89 #endif
90
91 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92 static void mpt_timeout(void *);
93 static void mpt_restart(mpt_softc_t *, request_t *);
94 static void mpt_done(mpt_softc_t *, uint32_t);
95 static int mpt_drain_queue(mpt_softc_t *);
96 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
97 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
98 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
99 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
100 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
101 static void mpt_bus_reset(mpt_softc_t *);
102
103 static void mpt_scsipi_request(struct scsipi_channel *,
104 scsipi_adapter_req_t, void *);
105 static void mpt_minphys(struct buf *);
106 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
107 struct proc *);
108
109 #if NBIO > 0
110 static bool mpt_is_raid(mpt_softc_t *);
111 static int mpt_bio_ioctl(device_t, u_long, void *);
112 static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
113 static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
114 static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
115 static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *);
116 #endif
117
118 void
119 mpt_scsipi_attach(mpt_softc_t *mpt)
120 {
121 struct scsipi_adapter *adapt = &mpt->sc_adapter;
122 struct scsipi_channel *chan = &mpt->sc_channel;
123 int maxq;
124
125 mpt->bus = 0; /* XXX ?? */
126
127 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
128 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
129
130 /* Fill in the scsipi_adapter. */
131 memset(adapt, 0, sizeof(*adapt));
132 adapt->adapt_dev = mpt->sc_dev;
133 adapt->adapt_nchannels = 1;
134 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
135 adapt->adapt_max_periph = maxq - 2;
136 adapt->adapt_request = mpt_scsipi_request;
137 adapt->adapt_minphys = mpt_minphys;
138 adapt->adapt_ioctl = mpt_ioctl;
139
140 /* Fill in the scsipi_channel. */
141 memset(chan, 0, sizeof(*chan));
142 chan->chan_adapter = adapt;
143 if (mpt->is_sas) {
144 chan->chan_bustype = &scsi_sas_bustype;
145 } else if (mpt->is_fc) {
146 chan->chan_bustype = &scsi_fc_bustype;
147 } else {
148 chan->chan_bustype = &scsi_bustype;
149 }
150 chan->chan_channel = 0;
151 chan->chan_flags = 0;
152 chan->chan_nluns = 8;
153 chan->chan_ntargets = mpt->mpt_max_devices;
154 chan->chan_id = mpt->mpt_ini_id;
155
156 /*
157 * Save the output of the config so we can rescan the bus in case of
158 * errors
159 */
160 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
161 scsiprint);
162
163 #if NBIO > 0
164 if (mpt_is_raid(mpt)) {
165 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
166 panic("%s: controller registration failed",
167 device_xname(mpt->sc_dev));
168 }
169 #endif
170 }
171
172 int
173 mpt_dma_mem_alloc(mpt_softc_t *mpt)
174 {
175 bus_dma_segment_t reply_seg, request_seg;
176 int reply_rseg, request_rseg;
177 bus_addr_t pptr, end;
178 char *vptr;
179 size_t len;
180 int error, i;
181
182 /* Check if we have already allocated the reply memory. */
183 if (mpt->reply != NULL)
184 return (0);
185
186 /*
187 * Allocate the request pool. This isn't really DMA'd memory,
188 * but it's a convenient place to do it.
189 */
190 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
191 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
192 if (mpt->request_pool == NULL) {
193 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
194 return (ENOMEM);
195 }
196
197 /*
198 * Allocate DMA resources for reply buffers.
199 */
200 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
201 &reply_seg, 1, &reply_rseg, 0);
202 if (error) {
203 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
204 error);
205 goto fail_0;
206 }
207
208 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
209 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
210 if (error) {
211 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
212 error);
213 goto fail_1;
214 }
215
216 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
217 0, 0, &mpt->reply_dmap);
218 if (error) {
219 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
220 error);
221 goto fail_2;
222 }
223
224 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
225 PAGE_SIZE, NULL, 0);
226 if (error) {
227 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
228 error);
229 goto fail_3;
230 }
231 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
232
233 /*
234 * Allocate DMA resources for request buffers.
235 */
236 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
237 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
238 if (error) {
239 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
240 "error = %d\n", error);
241 goto fail_4;
242 }
243
244 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
245 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
246 if (error) {
247 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
248 error);
249 goto fail_5;
250 }
251
252 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
253 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
254 if (error) {
255 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
256 "error = %d\n", error);
257 goto fail_6;
258 }
259
260 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
261 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
262 if (error) {
263 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
264 error);
265 goto fail_7;
266 }
267 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
268
269 pptr = mpt->request_phys;
270 vptr = (void *) mpt->request;
271 end = pptr + MPT_REQ_MEM_SIZE(mpt);
272
273 for (i = 0; pptr < end; i++) {
274 request_t *req = &mpt->request_pool[i];
275 req->index = i;
276
277 /* Store location of Request Data */
278 req->req_pbuf = pptr;
279 req->req_vbuf = vptr;
280
281 pptr += MPT_REQUEST_AREA;
282 vptr += MPT_REQUEST_AREA;
283
284 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
285 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
286
287 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
288 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
289 if (error) {
290 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
291 "error = %d\n", i, error);
292 goto fail_8;
293 }
294 }
295
296 return (0);
297
298 fail_8:
299 for (--i; i >= 0; i--) {
300 request_t *req = &mpt->request_pool[i];
301 if (req->dmap != NULL)
302 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
303 }
304 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
305 fail_7:
306 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
307 fail_6:
308 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
309 fail_5:
310 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
311 fail_4:
312 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
313 fail_3:
314 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
315 fail_2:
316 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
317 fail_1:
318 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
319 fail_0:
320 free(mpt->request_pool, M_DEVBUF);
321
322 mpt->reply = NULL;
323 mpt->request = NULL;
324 mpt->request_pool = NULL;
325
326 return (error);
327 }
328
329 int
330 mpt_intr(void *arg)
331 {
332 mpt_softc_t *mpt = arg;
333 int nrepl = 0;
334
335 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
336 return (0);
337
338 nrepl = mpt_drain_queue(mpt);
339 return (nrepl != 0);
340 }
341
342 void
343 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
344 {
345 va_list ap;
346
347 printf("%s: ", device_xname(mpt->sc_dev));
348 va_start(ap, fmt);
349 vprintf(fmt, ap);
350 va_end(ap);
351 printf("\n");
352 }
353
354 static int
355 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
356 {
357
358 /* Timeouts are in msec, so we loop in 1000usec cycles */
359 while (count) {
360 mpt_intr(mpt);
361 if (xs->xs_status & XS_STS_DONE)
362 return (0);
363 delay(1000); /* only happens in boot, so ok */
364 count--;
365 }
366 return (1);
367 }
368
369 static void
370 mpt_timeout(void *arg)
371 {
372 request_t *req = arg;
373 struct scsipi_xfer *xs;
374 struct scsipi_periph *periph;
375 mpt_softc_t *mpt;
376 uint32_t oseq;
377 int s, nrepl = 0;
378
379 if (req->xfer == NULL) {
380 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
381 req->index, req->sequence);
382 return;
383 }
384 xs = req->xfer;
385 periph = xs->xs_periph;
386 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
387 scsipi_printaddr(periph);
388 printf("command timeout\n");
389
390 s = splbio();
391
392 oseq = req->sequence;
393 mpt->timeouts++;
394 if (mpt_intr(mpt)) {
395 if (req->sequence != oseq) {
396 mpt->success++;
397 mpt_prt(mpt, "recovered from command timeout");
398 splx(s);
399 return;
400 }
401 }
402
403 /*
404 * Ensure the IOC is really done giving us data since it appears it can
405 * sometimes fail to give us interrupts under heavy load.
406 */
407 nrepl = mpt_drain_queue(mpt);
408 if (nrepl ) {
409 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
410 }
411
412 if (req->sequence != oseq) {
413 mpt->success++;
414 splx(s);
415 return;
416 }
417
418 mpt_prt(mpt,
419 "timeout on request index = 0x%x, seq = 0x%08x",
420 req->index, req->sequence);
421 mpt_check_doorbell(mpt);
422 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
423 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
424 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
425 mpt_read(mpt, MPT_OFFSET_DOORBELL));
426 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
427 if (mpt->verbose > 1)
428 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
429
430 xs->error = XS_TIMEOUT;
431 splx(s);
432 mpt_restart(mpt, req);
433 }
434
435 static void
436 mpt_restart(mpt_softc_t *mpt, request_t *req0)
437 {
438 int i, s, nreq;
439 request_t *req;
440 struct scsipi_xfer *xs;
441
442 /* first, reset the IOC, leaving stopped so all requests are idle */
443 if (mpt_soft_reset(mpt) != MPT_OK) {
444 mpt_prt(mpt, "soft reset failed");
445 /*
446 * Don't try a hard reset since this mangles the PCI
447 * configuration registers.
448 */
449 return;
450 }
451
452 /* Freeze the channel so scsipi doesn't queue more commands. */
453 scsipi_channel_freeze(&mpt->sc_channel, 1);
454
455 /* Return all pending requests to scsipi and de-allocate them. */
456 s = splbio();
457 nreq = 0;
458 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
459 req = &mpt->request_pool[i];
460 xs = req->xfer;
461 if (xs != NULL) {
462 if (xs->datalen != 0)
463 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
464 req->xfer = NULL;
465 callout_stop(&xs->xs_callout);
466 if (req != req0) {
467 nreq++;
468 xs->error = XS_REQUEUE;
469 }
470 scsipi_done(xs);
471 /*
472 * Don't need to mpt_free_request() since mpt_init()
473 * below will free all requests anyway.
474 */
475 mpt_free_request(mpt, req);
476 }
477 }
478 splx(s);
479 if (nreq > 0)
480 mpt_prt(mpt, "re-queued %d requests", nreq);
481
482 /* Re-initialize the IOC (which restarts it). */
483 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
484 mpt_prt(mpt, "restart succeeded");
485 /* else error message already printed */
486
487 /* Thaw the channel, causing scsipi to re-queue the commands. */
488 scsipi_channel_thaw(&mpt->sc_channel, 1);
489 }
490
491 static int
492 mpt_drain_queue(mpt_softc_t *mpt)
493 {
494 int nrepl = 0;
495 uint32_t reply;
496
497 reply = mpt_pop_reply_queue(mpt);
498 while (reply != MPT_REPLY_EMPTY) {
499 nrepl++;
500 if (mpt->verbose > 1) {
501 if ((reply & MPT_CONTEXT_REPLY) != 0) {
502 /* Address reply; IOC has something to say */
503 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
504 } else {
505 /* Context reply; all went well */
506 mpt_prt(mpt, "context %u reply OK", reply);
507 }
508 }
509 mpt_done(mpt, reply);
510 reply = mpt_pop_reply_queue(mpt);
511 }
512 return (nrepl);
513 }
514
515 static void
516 mpt_done(mpt_softc_t *mpt, uint32_t reply)
517 {
518 struct scsipi_xfer *xs = NULL;
519 struct scsipi_periph *periph;
520 int index;
521 request_t *req;
522 MSG_REQUEST_HEADER *mpt_req;
523 MSG_SCSI_IO_REPLY *mpt_reply;
524 int restart = 0; /* nonzero if we need to restart the IOC*/
525
526 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
527 /* context reply (ok) */
528 mpt_reply = NULL;
529 index = reply & MPT_CONTEXT_MASK;
530 } else {
531 /* address reply (error) */
532
533 /* XXX BUS_DMASYNC_POSTREAD XXX */
534 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
535 if (mpt_reply != NULL) {
536 if (mpt->verbose > 1) {
537 uint32_t *pReply = (uint32_t *) mpt_reply;
538
539 mpt_prt(mpt, "Address Reply (index %u):",
540 le32toh(mpt_reply->MsgContext) & 0xffff);
541 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
542 pReply[1], pReply[2], pReply[3]);
543 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
544 pReply[5], pReply[6], pReply[7]);
545 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
546 pReply[9], pReply[10], pReply[11]);
547 }
548 index = le32toh(mpt_reply->MsgContext);
549 } else
550 index = reply & MPT_CONTEXT_MASK;
551 }
552
553 /*
554 * Address reply with MessageContext high bit set.
555 * This is most likely a notify message, so we try
556 * to process it, then free it.
557 */
558 if (__predict_false((index & 0x80000000) != 0)) {
559 if (mpt_reply != NULL)
560 mpt_ctlop(mpt, mpt_reply, reply);
561 else
562 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
563 index);
564 return;
565 }
566
567 /* Did we end up with a valid index into the table? */
568 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
569 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
570 index);
571 return;
572 }
573
574 req = &mpt->request_pool[index];
575
576 /* Make sure memory hasn't been trashed. */
577 if (__predict_false(req->index != index)) {
578 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
579 index);
580 return;
581 }
582
583 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
584 mpt_req = req->req_vbuf;
585
586 /* Short cut for task management replies; nothing more for us to do. */
587 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
588 if (mpt->verbose > 1)
589 mpt_prt(mpt, "%s: TASK MGMT", __func__);
590 KASSERT(req == mpt->mngt_req);
591 mpt->mngt_req = NULL;
592 goto done;
593 }
594
595 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
596 goto done;
597
598 /*
599 * At this point, it had better be a SCSI I/O command, but don't
600 * crash if it isn't.
601 */
602 if (__predict_false(mpt_req->Function !=
603 MPI_FUNCTION_SCSI_IO_REQUEST)) {
604 if (mpt->verbose > 1)
605 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
606 __func__, mpt_req->Function, index);
607 goto done;
608 }
609
610 /* Recover scsipi_xfer from the request structure. */
611 xs = req->xfer;
612
613 /* Can't have a SCSI command without a scsipi_xfer. */
614 if (__predict_false(xs == NULL)) {
615 mpt_prt(mpt,
616 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
617 req->index, req->sequence);
618 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
619 mpt_prt(mpt, "mpt_request:");
620 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
621
622 if (mpt_reply != NULL) {
623 mpt_prt(mpt, "mpt_reply:");
624 mpt_print_reply(mpt_reply);
625 } else {
626 mpt_prt(mpt, "context reply: 0x%08x", reply);
627 }
628 goto done;
629 }
630
631 callout_stop(&xs->xs_callout);
632
633 periph = xs->xs_periph;
634
635 /*
636 * If we were a data transfer, unload the map that described
637 * the data buffer.
638 */
639 if (__predict_true(xs->datalen != 0)) {
640 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
641 req->dmap->dm_mapsize,
642 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
643 : BUS_DMASYNC_POSTWRITE);
644 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
645 }
646
647 if (__predict_true(mpt_reply == NULL)) {
648 /*
649 * Context reply; report that the command was
650 * successful!
651 *
652 * Also report the xfer mode, if necessary.
653 */
654 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
655 if ((mpt->mpt_report_xfer_mode &
656 (1 << periph->periph_target)) != 0)
657 mpt_get_xfer_mode(mpt, periph);
658 }
659 xs->error = XS_NOERROR;
660 xs->status = SCSI_OK;
661 xs->resid = 0;
662 mpt_free_request(mpt, req);
663 scsipi_done(xs);
664 return;
665 }
666
667 xs->status = mpt_reply->SCSIStatus;
668 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
669 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
670 xs->error = XS_DRIVER_STUFFUP;
671 mpt_prt(mpt, "%s: IOC overrun!", __func__);
672 break;
673
674 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
675 /*
676 * Yikes! Tagged queue full comes through this path!
677 *
678 * So we'll change it to a status error and anything
679 * that returns status should probably be a status
680 * error as well.
681 */
682 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
683 if (mpt_reply->SCSIState &
684 MPI_SCSI_STATE_NO_SCSI_STATUS) {
685 xs->error = XS_DRIVER_STUFFUP;
686 break;
687 }
688 /* FALLTHROUGH */
689 case MPI_IOCSTATUS_SUCCESS:
690 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
691 switch (xs->status) {
692 case SCSI_OK:
693 /* Report the xfer mode, if necessary. */
694 if ((mpt->mpt_report_xfer_mode &
695 (1 << periph->periph_target)) != 0)
696 mpt_get_xfer_mode(mpt, periph);
697 xs->resid = 0;
698 break;
699
700 case SCSI_CHECK:
701 xs->error = XS_SENSE;
702 break;
703
704 case SCSI_BUSY:
705 case SCSI_QUEUE_FULL:
706 xs->error = XS_BUSY;
707 break;
708
709 default:
710 scsipi_printaddr(periph);
711 printf("invalid status code %d\n", xs->status);
712 xs->error = XS_DRIVER_STUFFUP;
713 break;
714 }
715 break;
716
717 case MPI_IOCSTATUS_BUSY:
718 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
719 xs->error = XS_RESOURCE_SHORTAGE;
720 break;
721
722 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
723 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
724 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
725 xs->error = XS_SELTIMEOUT;
726 break;
727
728 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
729 xs->error = XS_DRIVER_STUFFUP;
730 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
731 restart = 1;
732 break;
733
734 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
735 /* XXX What should we do here? */
736 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
737 restart = 1;
738 break;
739
740 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
741 /* XXX */
742 xs->error = XS_DRIVER_STUFFUP;
743 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
744 restart = 1;
745 break;
746
747 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
748 /* XXX */
749 xs->error = XS_DRIVER_STUFFUP;
750 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
751 restart = 1;
752 break;
753
754 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
755 /* XXX This is a bus-reset */
756 xs->error = XS_DRIVER_STUFFUP;
757 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
758 restart = 1;
759 break;
760
761 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
762 /*
763 * FreeBSD and Linux indicate this is a phase error between
764 * the IOC and the drive itself. When this happens, the IOC
765 * becomes unhappy and stops processing all transactions.
766 * Call mpt_timeout which knows how to get the IOC back
767 * on its feet.
768 */
769 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
770 "recovering...", __func__);
771 xs->error = XS_TIMEOUT;
772 restart = 1;
773
774 break;
775
776 default:
777 /* XXX unrecognized HBA error */
778 xs->error = XS_DRIVER_STUFFUP;
779 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
780 le16toh(mpt_reply->IOCStatus));
781 restart = 1;
782 break;
783 }
784
785 if (mpt_reply != NULL) {
786 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
787 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
788 sizeof(xs->sense.scsi_sense));
789 } else if (mpt_reply->SCSIState &
790 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
791 /*
792 * This will cause the scsipi layer to issue
793 * a REQUEST SENSE.
794 */
795 if (xs->status == SCSI_CHECK)
796 xs->error = XS_BUSY;
797 }
798 }
799
800 done:
801 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
802 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
803 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
804 mpt_ctlop(mpt, mpt_reply, reply);
805 }
806
807 /* If IOC done with this request, free it up. */
808 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
809 mpt_free_request(mpt, req);
810
811 /* If address reply, give the buffer back to the IOC. */
812 if (mpt_reply != NULL)
813 mpt_free_reply(mpt, (reply << 1));
814
815 if (xs != NULL)
816 scsipi_done(xs);
817
818 if (restart) {
819 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
820 mpt_restart(mpt, NULL);
821 }
822 }
823
824 static void
825 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
826 {
827 struct scsipi_periph *periph = xs->xs_periph;
828 request_t *req;
829 MSG_SCSI_IO_REQUEST *mpt_req;
830 int error, s;
831
832 s = splbio();
833 req = mpt_get_request(mpt);
834 if (__predict_false(req == NULL)) {
835 /* This should happen very infrequently. */
836 xs->error = XS_RESOURCE_SHORTAGE;
837 scsipi_done(xs);
838 splx(s);
839 return;
840 }
841 splx(s);
842
843 /* Link the req and the scsipi_xfer. */
844 req->xfer = xs;
845
846 /* Now we build the command for the IOC */
847 mpt_req = req->req_vbuf;
848 memset(mpt_req, 0, sizeof(*mpt_req));
849
850 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
851 mpt_req->Bus = mpt->bus;
852
853 mpt_req->SenseBufferLength =
854 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
855 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
856
857 /*
858 * We use the message context to find the request structure when
859 * we get the command completion interrupt from the IOC.
860 */
861 mpt_req->MsgContext = htole32(req->index);
862
863 /* Which physical device to do the I/O on. */
864 mpt_req->TargetID = periph->periph_target;
865 mpt_req->LUN[1] = periph->periph_lun;
866
867 /* Set the direction of the transfer. */
868 if (xs->xs_control & XS_CTL_DATA_IN)
869 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
870 else if (xs->xs_control & XS_CTL_DATA_OUT)
871 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
872 else
873 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
874
875 /* Set the queue behavior. */
876 if (__predict_true((!mpt->is_scsi) ||
877 (mpt->mpt_tag_enable &
878 (1 << periph->periph_target)))) {
879 switch (XS_CTL_TAGTYPE(xs)) {
880 case XS_CTL_HEAD_TAG:
881 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
882 break;
883
884 #if 0 /* XXX */
885 case XS_CTL_ACA_TAG:
886 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
887 break;
888 #endif
889
890 case XS_CTL_ORDERED_TAG:
891 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
892 break;
893
894 case XS_CTL_SIMPLE_TAG:
895 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
896 break;
897
898 default:
899 if (mpt->is_scsi)
900 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
901 else
902 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
903 break;
904 }
905 } else
906 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
907
908 if (__predict_false(mpt->is_scsi &&
909 (mpt->mpt_disc_enable &
910 (1 << periph->periph_target)) == 0))
911 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
912
913 mpt_req->Control = htole32(mpt_req->Control);
914
915 /* Copy the SCSI command block into place. */
916 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
917
918 mpt_req->CDBLength = xs->cmdlen;
919 mpt_req->DataLength = htole32(xs->datalen);
920 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
921
922 /*
923 * Map the DMA transfer.
924 */
925 if (xs->datalen) {
926 SGE_SIMPLE32 *se;
927
928 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
929 xs->datalen, NULL,
930 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
931 : BUS_DMA_WAITOK) |
932 BUS_DMA_STREAMING |
933 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
934 : BUS_DMA_WRITE));
935 switch (error) {
936 case 0:
937 break;
938
939 case ENOMEM:
940 case EAGAIN:
941 xs->error = XS_RESOURCE_SHORTAGE;
942 goto out_bad;
943
944 default:
945 xs->error = XS_DRIVER_STUFFUP;
946 mpt_prt(mpt, "error %d loading DMA map", error);
947 out_bad:
948 s = splbio();
949 mpt_free_request(mpt, req);
950 scsipi_done(xs);
951 splx(s);
952 return;
953 }
954
955 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
956 int seg, i, nleft = req->dmap->dm_nsegs;
957 uint32_t flags;
958 SGE_CHAIN32 *ce;
959
960 seg = 0;
961 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
962 if (xs->xs_control & XS_CTL_DATA_OUT)
963 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
964
965 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
966 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
967 i++, se++, seg++) {
968 uint32_t tf;
969
970 memset(se, 0, sizeof(*se));
971 se->Address =
972 htole32(req->dmap->dm_segs[seg].ds_addr);
973 MPI_pSGE_SET_LENGTH(se,
974 req->dmap->dm_segs[seg].ds_len);
975 tf = flags;
976 if (i == MPT_NSGL_FIRST(mpt) - 2)
977 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
978 MPI_pSGE_SET_FLAGS(se, tf);
979 se->FlagsLength = htole32(se->FlagsLength);
980 nleft--;
981 }
982
983 /*
984 * Tell the IOC where to find the first chain element.
985 */
986 mpt_req->ChainOffset =
987 ((char *)se - (char *)mpt_req) >> 2;
988
989 /*
990 * Until we're finished with all segments...
991 */
992 while (nleft) {
993 int ntodo;
994
995 /*
996 * Construct the chain element that points to
997 * the next segment.
998 */
999 ce = (SGE_CHAIN32 *) se++;
1000 if (nleft > MPT_NSGL(mpt)) {
1001 ntodo = MPT_NSGL(mpt) - 1;
1002 ce->NextChainOffset = (MPT_RQSL(mpt) -
1003 sizeof(SGE_SIMPLE32)) >> 2;
1004 ce->Length = htole16(MPT_NSGL(mpt)
1005 * sizeof(SGE_SIMPLE32));
1006 } else {
1007 ntodo = nleft;
1008 ce->NextChainOffset = 0;
1009 ce->Length = htole16(ntodo
1010 * sizeof(SGE_SIMPLE32));
1011 }
1012 ce->Address = htole32(req->req_pbuf +
1013 ((char *)se - (char *)mpt_req));
1014 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1015 for (i = 0; i < ntodo; i++, se++, seg++) {
1016 uint32_t tf;
1017
1018 memset(se, 0, sizeof(*se));
1019 se->Address = htole32(
1020 req->dmap->dm_segs[seg].ds_addr);
1021 MPI_pSGE_SET_LENGTH(se,
1022 req->dmap->dm_segs[seg].ds_len);
1023 tf = flags;
1024 if (i == ntodo - 1) {
1025 tf |=
1026 MPI_SGE_FLAGS_LAST_ELEMENT;
1027 if (ce->NextChainOffset == 0) {
1028 tf |=
1029 MPI_SGE_FLAGS_END_OF_LIST |
1030 MPI_SGE_FLAGS_END_OF_BUFFER;
1031 }
1032 }
1033 MPI_pSGE_SET_FLAGS(se, tf);
1034 se->FlagsLength =
1035 htole32(se->FlagsLength);
1036 nleft--;
1037 }
1038 }
1039 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1040 req->dmap->dm_mapsize,
1041 (xs->xs_control & XS_CTL_DATA_IN) ?
1042 BUS_DMASYNC_PREREAD
1043 : BUS_DMASYNC_PREWRITE);
1044 } else {
1045 int i;
1046 uint32_t flags;
1047
1048 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1049 if (xs->xs_control & XS_CTL_DATA_OUT)
1050 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1051
1052 /* Copy the segments into our SG list. */
1053 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1054 for (i = 0; i < req->dmap->dm_nsegs;
1055 i++, se++) {
1056 uint32_t tf;
1057
1058 memset(se, 0, sizeof(*se));
1059 se->Address =
1060 htole32(req->dmap->dm_segs[i].ds_addr);
1061 MPI_pSGE_SET_LENGTH(se,
1062 req->dmap->dm_segs[i].ds_len);
1063 tf = flags;
1064 if (i == req->dmap->dm_nsegs - 1) {
1065 tf |=
1066 MPI_SGE_FLAGS_LAST_ELEMENT |
1067 MPI_SGE_FLAGS_END_OF_BUFFER |
1068 MPI_SGE_FLAGS_END_OF_LIST;
1069 }
1070 MPI_pSGE_SET_FLAGS(se, tf);
1071 se->FlagsLength = htole32(se->FlagsLength);
1072 }
1073 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1074 req->dmap->dm_mapsize,
1075 (xs->xs_control & XS_CTL_DATA_IN) ?
1076 BUS_DMASYNC_PREREAD
1077 : BUS_DMASYNC_PREWRITE);
1078 }
1079 } else {
1080 /*
1081 * No data to transfer; just make a single simple SGL
1082 * with zero length.
1083 */
1084 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1085 memset(se, 0, sizeof(*se));
1086 MPI_pSGE_SET_FLAGS(se,
1087 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1088 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1089 se->FlagsLength = htole32(se->FlagsLength);
1090 }
1091
1092 if (mpt->verbose > 1)
1093 mpt_print_scsi_io_request(mpt_req);
1094
1095 if (xs->timeout == 0) {
1096 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1097 req->index);
1098 xs->timeout = 500;
1099 }
1100
1101 s = splbio();
1102 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1103 callout_reset(&xs->xs_callout,
1104 mstohz(xs->timeout), mpt_timeout, req);
1105 mpt_send_cmd(mpt, req);
1106 splx(s);
1107
1108 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1109 return;
1110
1111 /*
1112 * If we can't use interrupts, poll on completion.
1113 */
1114 if (mpt_poll(mpt, xs, xs->timeout))
1115 mpt_timeout(req);
1116 }
1117
1118 static void
1119 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1120 {
1121 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1122
1123 /*
1124 * Always allow disconnect; we don't have a way to disable
1125 * it right now, in any case.
1126 */
1127 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1128
1129 if (xm->xm_mode & PERIPH_CAP_TQING)
1130 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1131 else
1132 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1133
1134 if (mpt->is_scsi) {
1135 /*
1136 * SCSI transport settings only make any sense for
1137 * SCSI
1138 */
1139
1140 tmp = mpt->mpt_dev_page1[xm->xm_target];
1141
1142 /*
1143 * Set the wide/narrow parameter for the target.
1144 */
1145 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1146 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1147 else
1148 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1149
1150 /*
1151 * Set the synchronous parameters for the target.
1152 *
1153 * XXX If we request sync transfers, we just go ahead and
1154 * XXX request the maximum available. We need finer control
1155 * XXX in order to implement Domain Validation.
1156 */
1157 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1158 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1159 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1160 MPI_SCSIDEVPAGE1_RP_IU);
1161 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1162 int factor, offset, np;
1163
1164 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1165 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1166 np = 0;
1167 if (factor < 0x9) {
1168 /* Ultra320 */
1169 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1170 }
1171 if (factor < 0xa) {
1172 /* at least Ultra160 */
1173 np |= MPI_SCSIDEVPAGE1_RP_DT;
1174 }
1175 np |= (factor << 8) | (offset << 16);
1176 tmp.RequestedParameters |= np;
1177 }
1178
1179 host2mpt_config_page_scsi_device_1(&tmp);
1180 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1181 mpt_prt(mpt, "unable to write Device Page 1");
1182 return;
1183 }
1184
1185 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1186 mpt_prt(mpt, "unable to read back Device Page 1");
1187 return;
1188 }
1189
1190 mpt2host_config_page_scsi_device_1(&tmp);
1191 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1192 if (mpt->verbose > 1) {
1193 mpt_prt(mpt,
1194 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1195 xm->xm_target,
1196 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1197 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1198 }
1199 }
1200
1201 /*
1202 * Make a note that we should perform an async callback at the
1203 * end of the next successful command completion to report the
1204 * negotiated transfer mode.
1205 */
1206 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1207 }
1208
1209 static void
1210 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1211 {
1212 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1213 struct scsipi_xfer_mode xm;
1214 int period, offset;
1215
1216 tmp = mpt->mpt_dev_page0[periph->periph_target];
1217 host2mpt_config_page_scsi_device_0(&tmp);
1218 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1219 mpt_prt(mpt, "unable to read Device Page 0");
1220 return;
1221 }
1222 mpt2host_config_page_scsi_device_0(&tmp);
1223
1224 if (mpt->verbose > 1) {
1225 mpt_prt(mpt,
1226 "SPI Tgt %d Page 0: NParms %x Information %x",
1227 periph->periph_target,
1228 tmp.NegotiatedParameters, tmp.Information);
1229 }
1230
1231 xm.xm_target = periph->periph_target;
1232 xm.xm_mode = 0;
1233
1234 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1235 xm.xm_mode |= PERIPH_CAP_WIDE16;
1236
1237 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1238 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1239 if (offset) {
1240 xm.xm_period = period;
1241 xm.xm_offset = offset;
1242 xm.xm_mode |= PERIPH_CAP_SYNC;
1243 }
1244
1245 /*
1246 * Tagged queueing is all controlled by us; there is no
1247 * other setting to query.
1248 */
1249 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1250 xm.xm_mode |= PERIPH_CAP_TQING;
1251
1252 /*
1253 * We're going to deliver the async event, so clear the marker.
1254 */
1255 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1256
1257 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1258 }
1259
1260 static void
1261 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1262 {
1263 MSG_DEFAULT_REPLY *dmsg = vmsg;
1264
1265 switch (dmsg->Function) {
1266 case MPI_FUNCTION_EVENT_NOTIFICATION:
1267 mpt_event_notify_reply(mpt, vmsg);
1268 mpt_free_reply(mpt, (reply << 1));
1269 break;
1270
1271 case MPI_FUNCTION_EVENT_ACK:
1272 mpt_free_reply(mpt, (reply << 1));
1273 break;
1274
1275 case MPI_FUNCTION_PORT_ENABLE:
1276 {
1277 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1278 int index = le32toh(msg->MsgContext) & ~0x80000000;
1279 if (mpt->verbose > 1)
1280 mpt_prt(mpt, "enable port reply index %d", index);
1281 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1282 request_t *req = &mpt->request_pool[index];
1283 req->debug = REQ_DONE;
1284 }
1285 mpt_free_reply(mpt, (reply << 1));
1286 break;
1287 }
1288
1289 case MPI_FUNCTION_CONFIG:
1290 {
1291 MSG_CONFIG_REPLY *msg = vmsg;
1292 int index = le32toh(msg->MsgContext) & ~0x80000000;
1293 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1294 request_t *req = &mpt->request_pool[index];
1295 req->debug = REQ_DONE;
1296 req->sequence = reply;
1297 } else
1298 mpt_free_reply(mpt, (reply << 1));
1299 break;
1300 }
1301
1302 default:
1303 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1304 }
1305 }
1306
1307 static void
1308 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1309 {
1310
1311 switch (le32toh(msg->Event)) {
1312 case MPI_EVENT_LOG_DATA:
1313 {
1314 int i;
1315
1316 /* Some error occurrerd that the Fusion wants logged. */
1317 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1318 mpt_prt(mpt, "EvtLogData: Event Data:");
1319 for (i = 0; i < msg->EventDataLength; i++) {
1320 if ((i % 4) == 0)
1321 printf("%s:\t", device_xname(mpt->sc_dev));
1322 printf("0x%08x%c", msg->Data[i],
1323 ((i % 4) == 3) ? '\n' : ' ');
1324 }
1325 if ((i % 4) != 0)
1326 printf("\n");
1327 break;
1328 }
1329
1330 case MPI_EVENT_UNIT_ATTENTION:
1331 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1332 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1333 break;
1334
1335 case MPI_EVENT_IOC_BUS_RESET:
1336 /* We generated a bus reset. */
1337 mpt_prt(mpt, "IOC Bus Reset Port %d",
1338 (msg->Data[0] >> 8) & 0xff);
1339 break;
1340
1341 case MPI_EVENT_EXT_BUS_RESET:
1342 /* Someone else generated a bus reset. */
1343 mpt_prt(mpt, "External Bus Reset");
1344 /*
1345 * These replies don't return EventData like the MPI
1346 * spec says they do.
1347 */
1348 /* XXX Send an async event? */
1349 break;
1350
1351 case MPI_EVENT_RESCAN:
1352 /*
1353 * In general, thise means a device has been added
1354 * to the loop.
1355 */
1356 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1357 /* XXX Send an async event? */
1358 break;
1359
1360 case MPI_EVENT_LINK_STATUS_CHANGE:
1361 mpt_prt(mpt, "Port %d: Link state %s",
1362 (msg->Data[1] >> 8) & 0xff,
1363 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1364 break;
1365
1366 case MPI_EVENT_LOOP_STATE_CHANGE:
1367 switch ((msg->Data[0] >> 16) & 0xff) {
1368 case 0x01:
1369 mpt_prt(mpt,
1370 "Port %d: FC Link Event: LIP(%02x,%02x) "
1371 "(Loop Initialization)",
1372 (msg->Data[1] >> 8) & 0xff,
1373 (msg->Data[0] >> 8) & 0xff,
1374 (msg->Data[0] ) & 0xff);
1375 switch ((msg->Data[0] >> 8) & 0xff) {
1376 case 0xf7:
1377 if ((msg->Data[0] & 0xff) == 0xf7)
1378 mpt_prt(mpt, "\tDevice needs AL_PA");
1379 else
1380 mpt_prt(mpt, "\tDevice %02x doesn't "
1381 "like FC performance",
1382 msg->Data[0] & 0xff);
1383 break;
1384
1385 case 0xf8:
1386 if ((msg->Data[0] & 0xff) == 0xf7)
1387 mpt_prt(mpt, "\tDevice detected loop "
1388 "failure before acquiring AL_PA");
1389 else
1390 mpt_prt(mpt, "\tDevice %02x detected "
1391 "loop failure",
1392 msg->Data[0] & 0xff);
1393 break;
1394
1395 default:
1396 mpt_prt(mpt, "\tDevice %02x requests that "
1397 "device %02x reset itself",
1398 msg->Data[0] & 0xff,
1399 (msg->Data[0] >> 8) & 0xff);
1400 break;
1401 }
1402 break;
1403
1404 case 0x02:
1405 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1406 "(Loop Port Enable)",
1407 (msg->Data[1] >> 8) & 0xff,
1408 (msg->Data[0] >> 8) & 0xff,
1409 (msg->Data[0] ) & 0xff);
1410 break;
1411
1412 case 0x03:
1413 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1414 "(Loop Port Bypass)",
1415 (msg->Data[1] >> 8) & 0xff,
1416 (msg->Data[0] >> 8) & 0xff,
1417 (msg->Data[0] ) & 0xff);
1418 break;
1419
1420 default:
1421 mpt_prt(mpt, "Port %d: FC Link Event: "
1422 "Unknown event (%02x %02x %02x)",
1423 (msg->Data[1] >> 8) & 0xff,
1424 (msg->Data[0] >> 16) & 0xff,
1425 (msg->Data[0] >> 8) & 0xff,
1426 (msg->Data[0] ) & 0xff);
1427 break;
1428 }
1429 break;
1430
1431 case MPI_EVENT_LOGOUT:
1432 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1433 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1434 break;
1435
1436 case MPI_EVENT_EVENT_CHANGE:
1437 /*
1438 * This is just an acknowledgement of our
1439 * mpt_send_event_request().
1440 */
1441 break;
1442
1443 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1444 switch ((msg->Data[0] >> 12) & 0x0f) {
1445 case 0x00:
1446 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1447 msg->Data[0] & 0xff);
1448 break;
1449 case 0x01:
1450 mpt_prt(mpt, "Phy %d: Link Disabled",
1451 msg->Data[0] & 0xff);
1452 break;
1453 case 0x02:
1454 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1455 msg->Data[0] & 0xff);
1456 break;
1457 case 0x03:
1458 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1459 msg->Data[0] & 0xff);
1460 break;
1461 case 0x08:
1462 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1463 msg->Data[0] & 0xff);
1464 break;
1465 case 0x09:
1466 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1467 msg->Data[0] & 0xff);
1468 break;
1469 default:
1470 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1471 "Unknown event (%0x)",
1472 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1473 }
1474 break;
1475
1476 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1477 case MPI_EVENT_SAS_DISCOVERY:
1478 /* ignore these events for now */
1479 break;
1480
1481 case MPI_EVENT_QUEUE_FULL:
1482 /* This can get a little chatty */
1483 if (mpt->verbose > 0)
1484 mpt_prt(mpt, "Queue Full Event");
1485 break;
1486
1487 default:
1488 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1489 break;
1490 }
1491
1492 if (msg->AckRequired) {
1493 MSG_EVENT_ACK *ackp;
1494 request_t *req;
1495
1496 if ((req = mpt_get_request(mpt)) == NULL) {
1497 /* XXX XXX XXX XXXJRT */
1498 panic("mpt_event_notify_reply: unable to allocate "
1499 "request structure");
1500 }
1501
1502 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1503 memset(ackp, 0, sizeof(*ackp));
1504 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1505 ackp->Event = msg->Event;
1506 ackp->EventContext = msg->EventContext;
1507 ackp->MsgContext = htole32(req->index | 0x80000000);
1508 mpt_check_doorbell(mpt);
1509 mpt_send_cmd(mpt, req);
1510 }
1511 }
1512
1513 static void
1514 mpt_bus_reset(mpt_softc_t *mpt)
1515 {
1516 request_t *req;
1517 MSG_SCSI_TASK_MGMT *mngt_req;
1518 int s;
1519
1520 s = splbio();
1521 if (mpt->mngt_req) {
1522 /* request already queued; can't do more */
1523 splx(s);
1524 return;
1525 }
1526 req = mpt_get_request(mpt);
1527 if (__predict_false(req == NULL)) {
1528 mpt_prt(mpt, "no mngt request\n");
1529 splx(s);
1530 return;
1531 }
1532 mpt->mngt_req = req;
1533 splx(s);
1534 mngt_req = req->req_vbuf;
1535 memset(mngt_req, 0, sizeof(*mngt_req));
1536 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1537 mngt_req->Bus = mpt->bus;
1538 mngt_req->TargetID = 0;
1539 mngt_req->ChainOffset = 0;
1540 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1541 mngt_req->Reserved1 = 0;
1542 mngt_req->MsgFlags =
1543 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1544 mngt_req->MsgContext = req->index;
1545 mngt_req->TaskMsgContext = 0;
1546 s = splbio();
1547 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1548 splx(s);
1549 }
1550
1551 /*****************************************************************************
1552 * SCSI interface routines
1553 *****************************************************************************/
1554
1555 static void
1556 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1557 void *arg)
1558 {
1559 struct scsipi_adapter *adapt = chan->chan_adapter;
1560 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1561
1562 switch (req) {
1563 case ADAPTER_REQ_RUN_XFER:
1564 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1565 return;
1566
1567 case ADAPTER_REQ_GROW_RESOURCES:
1568 /* Not supported. */
1569 return;
1570
1571 case ADAPTER_REQ_SET_XFER_MODE:
1572 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1573 return;
1574 }
1575 }
1576
1577 static void
1578 mpt_minphys(struct buf *bp)
1579 {
1580
1581 /*
1582 * Subtract one from the SGL limit, since we need an extra one to handle
1583 * an non-page-aligned transfer.
1584 */
1585 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1586
1587 if (bp->b_bcount > MPT_MAX_XFER)
1588 bp->b_bcount = MPT_MAX_XFER;
1589 minphys(bp);
1590 }
1591
1592 static int
1593 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1594 int flag, struct proc *p)
1595 {
1596 mpt_softc_t *mpt;
1597 int s;
1598
1599 mpt = device_private(chan->chan_adapter->adapt_dev);
1600 switch (cmd) {
1601 case SCBUSIORESET:
1602 mpt_bus_reset(mpt);
1603 s = splbio();
1604 mpt_intr(mpt);
1605 splx(s);
1606 return(0);
1607 default:
1608 return (ENOTTY);
1609 }
1610 }
1611
1612 #if NBIO > 0
1613 static fCONFIG_PAGE_IOC_2 *
1614 mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1615 {
1616 fCONFIG_PAGE_HEADER hdr;
1617 fCONFIG_PAGE_IOC_2 *ioc2;
1618 int rv;
1619
1620 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1621 if (rv)
1622 return NULL;
1623
1624 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1625 if (ioc2 == NULL)
1626 return NULL;
1627
1628 memcpy(ioc2, &hdr, sizeof(hdr));
1629
1630 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1631 if (rv)
1632 goto fail;
1633 mpt2host_config_page_ioc_2(ioc2);
1634
1635 return ioc2;
1636
1637 fail:
1638 free(ioc2, M_DEVBUF);
1639 return NULL;
1640 }
1641
1642 static fCONFIG_PAGE_RAID_VOL_0 *
1643 mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1644 {
1645 fCONFIG_PAGE_HEADER hdr;
1646 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1647 int rv;
1648
1649 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1650 address, &hdr);
1651 if (rv)
1652 return NULL;
1653
1654 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1655 if (rvol0 == NULL)
1656 return NULL;
1657
1658 memcpy(rvol0, &hdr, sizeof(hdr));
1659
1660 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1661 if (rv)
1662 goto fail;
1663 mpt2host_config_page_raid_vol_0(rvol0);
1664
1665 return rvol0;
1666
1667 fail:
1668 free(rvol0, M_DEVBUF);
1669 return NULL;
1670 }
1671
1672 static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
1673 mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1674 {
1675 fCONFIG_PAGE_HEADER hdr;
1676 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1677 int rv;
1678
1679 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1680 address, &hdr);
1681 if (rv)
1682 return NULL;
1683
1684 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1685 if (physdisk0 == NULL)
1686 return NULL;
1687
1688 memcpy(physdisk0, &hdr, sizeof(hdr));
1689
1690 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1691 if (rv)
1692 goto fail;
1693 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1694
1695 return physdisk0;
1696
1697 fail:
1698 free(physdisk0, M_DEVBUF);
1699 return NULL;
1700 }
1701
1702 static bool
1703 mpt_is_raid(mpt_softc_t *mpt)
1704 {
1705 fCONFIG_PAGE_IOC_2 *ioc2;
1706 bool is_raid = false;
1707
1708 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1709 if (ioc2 == NULL)
1710 return false;
1711
1712 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1713 is_raid = !!(ioc2->CapabilitiesFlags &
1714 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1715 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1716 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1717 }
1718
1719 free(ioc2, M_DEVBUF);
1720
1721 return is_raid;
1722 }
1723
1724 static int
1725 mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1726 {
1727 mpt_softc_t *mpt = device_private(dev);
1728 int error, s;
1729
1730 KERNEL_LOCK(1, curlwp);
1731 s = splbio();
1732
1733 switch (cmd) {
1734 case BIOCINQ:
1735 error = mpt_bio_ioctl_inq(mpt, addr);
1736 break;
1737 case BIOCVOL:
1738 error = mpt_bio_ioctl_vol(mpt, addr);
1739 break;
1740 case BIOCDISK:
1741 error = mpt_bio_ioctl_disk(mpt, addr);
1742 break;
1743 case BIOCSETSTATE:
1744 error = mpt_bio_ioctl_setstate(mpt, addr);
1745 break;
1746 default:
1747 error = EINVAL;
1748 break;
1749 }
1750
1751 splx(s);
1752 KERNEL_UNLOCK_ONE(curlwp);
1753
1754 return error;
1755 }
1756
1757 static int
1758 mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1759 {
1760 fCONFIG_PAGE_IOC_2 *ioc2;
1761
1762 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1763 if (ioc2 == NULL)
1764 return EIO;
1765
1766 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev));
1767 bi->bi_novol = ioc2->NumActiveVolumes;
1768 bi->bi_nodisk = ioc2->NumActivePhysDisks;
1769
1770 free(ioc2, M_DEVBUF);
1771
1772 return 0;
1773 }
1774
1775 static int
1776 mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1777 {
1778 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1779 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1780 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1781 int address;
1782
1783 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1784 if (ioc2 == NULL)
1785 return EIO;
1786
1787 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1788 goto fail;
1789
1790 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1791 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1792
1793 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1794 if (rvol0 == NULL)
1795 goto fail;
1796
1797 strlcpy(bv->bv_dev, device_xname(mpt->sc_dev), sizeof(bv->bv_dev));
1798 /* TODO: bv->bv_vendor */
1799 bv->bv_nodisk = rvol0->NumPhysDisks;
1800 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1801 bv->bv_stripe_size = rvol0->StripeSize;
1802 bv->bv_percent = -1;
1803 bv->bv_seconds = 0;
1804
1805 switch (rvol0->VolumeStatus.State) {
1806 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1807 bv->bv_status = BIOC_SVONLINE;
1808 break;
1809 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1810 bv->bv_status = BIOC_SVDEGRADED;
1811 break;
1812 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1813 bv->bv_status = BIOC_SVOFFLINE;
1814 break;
1815 default:
1816 bv->bv_status = BIOC_SVINVALID;
1817 break;
1818 }
1819
1820 switch (ioc2rvol->VolumeType) {
1821 case MPI_RAID_VOL_TYPE_IS:
1822 bv->bv_level = 0;
1823 break;
1824 case MPI_RAID_VOL_TYPE_IME:
1825 case MPI_RAID_VOL_TYPE_IM:
1826 bv->bv_level = 1;
1827 break;
1828 default:
1829 bv->bv_level = -1;
1830 break;
1831 }
1832
1833 free(ioc2, M_DEVBUF);
1834 free(rvol0, M_DEVBUF);
1835
1836 return 0;
1837
1838 fail:
1839 if (ioc2) free(ioc2, M_DEVBUF);
1840 if (rvol0) free(rvol0, M_DEVBUF);
1841 return EINVAL;
1842 }
1843
1844 static int
1845 mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
1846 {
1847 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1848 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1849 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1850 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1851 int address;
1852
1853 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1854 if (ioc2 == NULL)
1855 return EIO;
1856
1857 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
1858 goto fail;
1859
1860 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
1861 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1862
1863 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1864 if (rvol0 == NULL)
1865 goto fail;
1866
1867 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
1868 goto fail;
1869
1870 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
1871
1872 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1873 if (phys == NULL)
1874 goto fail;
1875
1876 bd->bd_channel = phys->PhysDiskBus;
1877 bd->bd_target = phys->PhysDiskID;
1878 bd->bd_lun = 0;
1879 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1880 strlcpy(bd->bd_vendor, phys->InquiryData.VendorID,
1881 min(sizeof(bd->bd_vendor), sizeof(phys->InquiryData.VendorID)));
1882 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1883 bd->bd_procdev[0] = '\0';
1884
1885 switch (phys->PhysDiskStatus.State) {
1886 case MPI_PHYSDISK0_STATUS_ONLINE:
1887 bd->bd_status = BIOC_SDONLINE;
1888 break;
1889 case MPI_PHYSDISK0_STATUS_MISSING:
1890 case MPI_PHYSDISK0_STATUS_FAILED:
1891 bd->bd_status = BIOC_SDFAILED;
1892 break;
1893 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1894 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1895 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1896 bd->bd_status = BIOC_SDOFFLINE;
1897 break;
1898 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1899 bd->bd_status = BIOC_SDSCRUB;
1900 break;
1901 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1902 default:
1903 bd->bd_status = BIOC_SDINVALID;
1904 break;
1905 }
1906
1907 free(ioc2, M_DEVBUF);
1908 free(phys, M_DEVBUF);
1909
1910 return 0;
1911
1912 fail:
1913 if (ioc2) free(ioc2, M_DEVBUF);
1914 if (phys) free(phys, M_DEVBUF);
1915 return EINVAL;
1916 }
1917
1918 static int
1919 mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs)
1920 {
1921 return ENOTTY;
1922 }
1923 #endif
1924
1925