mfi.c revision 1.71 1 /* $NetBSD: mfi.c,v 1.71 2022/05/07 14:14:34 msaitoh Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2012 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44 /*-
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 *
49 * Copyright 1994-2009 The FreeBSD Project.
50 * All rights reserved.
51 *
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 *
70 * The views and conclusions contained in the software and documentation
71 * are those of the authors and should not be interpreted as representing
72 * official policies,either expressed or implied, of the FreeBSD Project.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.71 2022/05/07 14:14:34 msaitoh Exp $");
77
78 #include "bio.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/buf.h>
83 #include <sys/ioctl.h>
84 #include <sys/device.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc.h>
88 #include <sys/cpu.h>
89 #include <sys/conf.h>
90 #include <sys/kauth.h>
91
92 #include <uvm/uvm_param.h>
93
94 #include <sys/bus.h>
95
96 #include <dev/scsipi/scsipi_all.h>
97 #include <dev/scsipi/scsi_all.h>
98 #include <dev/scsipi/scsi_spc.h>
99 #include <dev/scsipi/scsipi_disk.h>
100 #include <dev/scsipi/scsi_disk.h>
101 #include <dev/scsipi/scsiconf.h>
102
103 #include <dev/ic/mfireg.h>
104 #include <dev/ic/mfivar.h>
105 #include <dev/ic/mfiio.h>
106
107 #if NBIO > 0
108 #include <dev/biovar.h>
109 #endif /* NBIO > 0 */
110
111 #include "ioconf.h"
112
113 #ifdef MFI_DEBUG
114 uint32_t mfi_debug = 0
115 /* | MFI_D_CMD */
116 /* | MFI_D_INTR */
117 /* | MFI_D_MISC */
118 /* | MFI_D_DMA */
119 /* | MFI_D_IOCTL */
120 /* | MFI_D_RW */
121 /* | MFI_D_MEM */
122 /* | MFI_D_CCB */
123 /* | MFI_D_SYNC */
124 ;
125 #endif
126
127 static void mfi_scsipi_request(struct scsipi_channel *,
128 scsipi_adapter_req_t, void *);
129 static void mfiminphys(struct buf *bp);
130
131 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
132 static void mfi_put_ccb(struct mfi_ccb *);
133 static int mfi_init_ccb(struct mfi_softc *);
134
135 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
136 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
137
138 static int mfi_transition_firmware(struct mfi_softc *);
139 static int mfi_initialize_firmware(struct mfi_softc *);
140 static int mfi_get_info(struct mfi_softc *);
141 static int mfi_get_bbu(struct mfi_softc *,
142 struct mfi_bbu_status *);
143 /* return codes for mfi_get_bbu */
144 #define MFI_BBU_GOOD 0
145 #define MFI_BBU_BAD 1
146 #define MFI_BBU_UNKNOWN 2
147 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
148 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
149 static int mfi_poll(struct mfi_ccb *);
150 static int mfi_create_sgl(struct mfi_ccb *, int);
151
152 /* commands */
153 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
154 static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
155 uint64_t, uint32_t);
156 static void mfi_scsi_ld_done(struct mfi_ccb *);
157 static void mfi_scsi_xs_done(struct mfi_ccb *, int, int);
158 static int mfi_mgmt_internal(struct mfi_softc *, uint32_t,
159 uint32_t, uint32_t, void *, uint8_t *, bool);
160 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
161 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
162 static void mfi_mgmt_done(struct mfi_ccb *);
163
164 #if NBIO > 0
165 static int mfi_ioctl(device_t, u_long, void *);
166 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
167 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
168 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
169 static int mfi_ioctl_alarm(struct mfi_softc *,
170 struct bioc_alarm *);
171 static int mfi_ioctl_blink(struct mfi_softc *sc,
172 struct bioc_blink *);
173 static int mfi_ioctl_setstate(struct mfi_softc *,
174 struct bioc_setstate *);
175 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
176 static int mfi_create_sensors(struct mfi_softc *);
177 static int mfi_destroy_sensors(struct mfi_softc *);
178 static void mfi_sensor_refresh(struct sysmon_envsys *,
179 envsys_data_t *);
180 #endif /* NBIO > 0 */
181 static bool mfi_shutdown(device_t, int);
182 static bool mfi_suspend(device_t, const pmf_qual_t *);
183 static bool mfi_resume(device_t, const pmf_qual_t *);
184
185 static dev_type_open(mfifopen);
186 static dev_type_close(mfifclose);
187 static dev_type_ioctl(mfifioctl);
188 const struct cdevsw mfi_cdevsw = {
189 .d_open = mfifopen,
190 .d_close = mfifclose,
191 .d_read = noread,
192 .d_write = nowrite,
193 .d_ioctl = mfifioctl,
194 .d_stop = nostop,
195 .d_tty = notty,
196 .d_poll = nopoll,
197 .d_mmap = nommap,
198 .d_kqfilter = nokqfilter,
199 .d_discard = nodiscard,
200 .d_flag = D_OTHER
201 };
202
203 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
204 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
205 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
206 static int mfi_xscale_intr(struct mfi_softc *sc);
207 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
208
209 static const struct mfi_iop_ops mfi_iop_xscale = {
210 mfi_xscale_fw_state,
211 mfi_xscale_intr_dis,
212 mfi_xscale_intr_ena,
213 mfi_xscale_intr,
214 mfi_xscale_post,
215 mfi_scsi_ld_io,
216 };
217
218 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
219 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
220 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
221 static int mfi_ppc_intr(struct mfi_softc *sc);
222 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
223
224 static const struct mfi_iop_ops mfi_iop_ppc = {
225 mfi_ppc_fw_state,
226 mfi_ppc_intr_dis,
227 mfi_ppc_intr_ena,
228 mfi_ppc_intr,
229 mfi_ppc_post,
230 mfi_scsi_ld_io,
231 };
232
233 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
234 void mfi_gen2_intr_ena(struct mfi_softc *sc);
235 void mfi_gen2_intr_dis(struct mfi_softc *sc);
236 int mfi_gen2_intr(struct mfi_softc *sc);
237 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
238
239 static const struct mfi_iop_ops mfi_iop_gen2 = {
240 mfi_gen2_fw_state,
241 mfi_gen2_intr_dis,
242 mfi_gen2_intr_ena,
243 mfi_gen2_intr,
244 mfi_gen2_post,
245 mfi_scsi_ld_io,
246 };
247
248 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
249 void mfi_skinny_intr_dis(struct mfi_softc *);
250 void mfi_skinny_intr_ena(struct mfi_softc *);
251 int mfi_skinny_intr(struct mfi_softc *);
252 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
253
254 static const struct mfi_iop_ops mfi_iop_skinny = {
255 mfi_skinny_fw_state,
256 mfi_skinny_intr_dis,
257 mfi_skinny_intr_ena,
258 mfi_skinny_intr,
259 mfi_skinny_post,
260 mfi_scsi_ld_io,
261 };
262
263 static int mfi_tbolt_init_desc_pool(struct mfi_softc *);
264 static int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
265 static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
266 int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
267 uint64_t, uint32_t);
268 static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
269 static int mfi_tbolt_create_sgl(struct mfi_ccb *, int);
270 void mfi_tbolt_sync_map_info(struct work *, void *);
271 static void mfi_sync_map_complete(struct mfi_ccb *);
272
273 u_int32_t mfi_tbolt_fw_state(struct mfi_softc *);
274 void mfi_tbolt_intr_dis(struct mfi_softc *);
275 void mfi_tbolt_intr_ena(struct mfi_softc *);
276 int mfi_tbolt_intr(struct mfi_softc *sc);
277 void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
278
279 static const struct mfi_iop_ops mfi_iop_tbolt = {
280 mfi_tbolt_fw_state,
281 mfi_tbolt_intr_dis,
282 mfi_tbolt_intr_ena,
283 mfi_tbolt_intr,
284 mfi_tbolt_post,
285 mfi_tbolt_scsi_ld_io,
286 };
287
288 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
289 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
290 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
291 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
292 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
293
294 static struct mfi_ccb *
295 mfi_get_ccb(struct mfi_softc *sc)
296 {
297 struct mfi_ccb *ccb;
298 int s;
299
300 s = splbio();
301 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
302 if (ccb) {
303 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
304 ccb->ccb_state = MFI_CCB_READY;
305 }
306 splx(s);
307
308 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
309 if (__predict_false(ccb == NULL && sc->sc_running))
310 aprint_error_dev(sc->sc_dev, "out of ccb\n");
311
312 return ccb;
313 }
314
315 static void
316 mfi_put_ccb(struct mfi_ccb *ccb)
317 {
318 struct mfi_softc *sc = ccb->ccb_sc;
319 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
320 int s;
321
322 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
323
324 hdr->mfh_cmd_status = 0x0;
325 hdr->mfh_flags = 0x0;
326 ccb->ccb_state = MFI_CCB_FREE;
327 ccb->ccb_xs = NULL;
328 ccb->ccb_flags = 0;
329 ccb->ccb_done = NULL;
330 ccb->ccb_direction = 0;
331 ccb->ccb_frame_size = 0;
332 ccb->ccb_extra_frames = 0;
333 ccb->ccb_sgl = NULL;
334 ccb->ccb_data = NULL;
335 ccb->ccb_len = 0;
336 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
337 /* erase tb_request_desc but preserve SMID */
338 int index = ccb->ccb_tb_request_desc.header.SMID;
339 ccb->ccb_tb_request_desc.words = 0;
340 ccb->ccb_tb_request_desc.header.SMID = index;
341 }
342 s = splbio();
343 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
344 splx(s);
345 }
346
347 static int
348 mfi_destroy_ccb(struct mfi_softc *sc)
349 {
350 struct mfi_ccb *ccb;
351 uint32_t i;
352
353 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
354
355
356 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
357 /* create a dma map for transfer */
358 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
359 }
360
361 if (i < sc->sc_max_cmds)
362 return EBUSY;
363
364 free(sc->sc_ccb, M_DEVBUF);
365
366 return 0;
367 }
368
369 static int
370 mfi_init_ccb(struct mfi_softc *sc)
371 {
372 struct mfi_ccb *ccb;
373 uint32_t i;
374 int error;
375 bus_addr_t io_req_base_phys;
376 uint8_t *io_req_base;
377 int offset;
378
379 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
380
381 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
382 M_DEVBUF, M_WAITOK|M_ZERO);
383 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
384 /*
385 * The first 256 bytes (SMID 0) is not used.
386 * Don't add to the cmd list.
387 */
388 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) +
389 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
390 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) +
391 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
392 } else {
393 io_req_base = NULL; /* XXX: gcc */
394 io_req_base_phys = 0; /* XXX: gcc */
395 }
396
397 for (i = 0; i < sc->sc_max_cmds; i++) {
398 ccb = &sc->sc_ccb[i];
399
400 ccb->ccb_sc = sc;
401
402 /* select i'th frame */
403 ccb->ccb_frame = (union mfi_frame *)
404 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
405 ccb->ccb_pframe =
406 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
407 ccb->ccb_frame->mfr_header.mfh_context = i;
408
409 /* select i'th sense */
410 ccb->ccb_sense = (struct mfi_sense *)
411 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
412 ccb->ccb_psense =
413 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
414
415 /* create a dma map for transfer */
416 error = bus_dmamap_create(sc->sc_datadmat,
417 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
418 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
419 if (error) {
420 aprint_error_dev(sc->sc_dev,
421 "cannot create ccb dmamap (%d)\n", error);
422 goto destroy;
423 }
424 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
425 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
426 ccb->ccb_tb_io_request =
427 (struct mfi_mpi2_request_raid_scsi_io *)
428 (io_req_base + offset);
429 ccb->ccb_tb_pio_request =
430 io_req_base_phys + offset;
431 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
432 ccb->ccb_tb_sg_frame =
433 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
434 offset);
435 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
436 offset;
437 /* SMID 0 is reserved. Set SMID/index from 1 */
438 ccb->ccb_tb_request_desc.header.SMID = i + 1;
439 }
440
441 DNPRINTF(MFI_D_CCB,
442 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
443 ccb->ccb_frame->mfr_header.mfh_context, ccb,
444 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
445 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
446 (u_long)ccb->ccb_dmamap);
447
448 /* add ccb to queue */
449 mfi_put_ccb(ccb);
450 }
451
452 return 0;
453 destroy:
454 /* free dma maps and ccb memory */
455 while (i) {
456 i--;
457 ccb = &sc->sc_ccb[i];
458 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
459 }
460
461 free(sc->sc_ccb, M_DEVBUF);
462
463 return 1;
464 }
465
466 static uint32_t
467 mfi_read(struct mfi_softc *sc, bus_size_t r)
468 {
469 uint32_t rv;
470
471 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
472 BUS_SPACE_BARRIER_READ);
473 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
474
475 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
476 return rv;
477 }
478
479 static void
480 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
481 {
482 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
483
484 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
485 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
486 BUS_SPACE_BARRIER_WRITE);
487 }
488
489 static struct mfi_mem *
490 mfi_allocmem(struct mfi_softc *sc, size_t size)
491 {
492 struct mfi_mem *mm;
493 int nsegs;
494
495 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
496 (long)size);
497
498 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_WAITOK|M_ZERO);
499 mm->am_size = size;
500
501 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
502 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
503 goto amfree;
504
505 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
506 &nsegs, BUS_DMA_NOWAIT) != 0)
507 goto destroy;
508
509 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
510 BUS_DMA_NOWAIT) != 0)
511 goto free;
512
513 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
514 BUS_DMA_NOWAIT) != 0)
515 goto unmap;
516
517 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
518 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
519
520 memset(mm->am_kva, 0, size);
521 return mm;
522
523 unmap:
524 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
525 free:
526 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
527 destroy:
528 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
529 amfree:
530 free(mm, M_DEVBUF);
531
532 return NULL;
533 }
534
535 static void
536 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
537 {
538 struct mfi_mem *mm = *mmp;
539
540 if (mm == NULL)
541 return;
542
543 *mmp = NULL;
544
545 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
546
547 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
548 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
549 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
550 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
551 free(mm, M_DEVBUF);
552 }
553
554 static int
555 mfi_transition_firmware(struct mfi_softc *sc)
556 {
557 uint32_t fw_state, cur_state;
558 int max_wait, i;
559
560 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
561
562 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
563 fw_state);
564
565 while (fw_state != MFI_STATE_READY) {
566 DNPRINTF(MFI_D_MISC,
567 "%s: waiting for firmware to become ready\n",
568 DEVNAME(sc));
569 cur_state = fw_state;
570 switch (fw_state) {
571 case MFI_STATE_FAULT:
572 aprint_error_dev(sc->sc_dev, "firmware fault\n");
573 return 1;
574 case MFI_STATE_WAIT_HANDSHAKE:
575 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
576 sc->sc_ioptype == MFI_IOP_TBOLT)
577 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
578 else
579 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
580 max_wait = 2;
581 break;
582 case MFI_STATE_OPERATIONAL:
583 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
584 sc->sc_ioptype == MFI_IOP_TBOLT)
585 mfi_write(sc, MFI_SKINNY_IDB, MFI_RESET_FLAGS);
586 else
587 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
588 max_wait = 10;
589 break;
590 case MFI_STATE_UNDEFINED:
591 case MFI_STATE_BB_INIT:
592 max_wait = 2;
593 break;
594 case MFI_STATE_FW_INIT:
595 case MFI_STATE_DEVICE_SCAN:
596 case MFI_STATE_FLUSH_CACHE:
597 max_wait = 20;
598 break;
599 case MFI_STATE_BOOT_MESSAGE_PENDING:
600 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
601 sc->sc_ioptype == MFI_IOP_TBOLT) {
602 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
603 } else {
604 mfi_write(sc, MFI_IDB, MFI_INIT_HOTPLUG);
605 }
606 max_wait = 180;
607 break;
608 default:
609 aprint_error_dev(sc->sc_dev,
610 "unknown firmware state %d\n", fw_state);
611 return 1;
612 }
613 for (i = 0; i < (max_wait * 10); i++) {
614 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
615 if (fw_state == cur_state)
616 DELAY(100000);
617 else
618 break;
619 }
620 if (fw_state == cur_state) {
621 aprint_error_dev(sc->sc_dev,
622 "firmware stuck in state %#x\n", fw_state);
623 return 1;
624 }
625 }
626
627 return 0;
628 }
629
630 static int
631 mfi_initialize_firmware(struct mfi_softc *sc)
632 {
633 struct mfi_ccb *ccb;
634 struct mfi_init_frame *init;
635 struct mfi_init_qinfo *qinfo;
636
637 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
638
639 if ((ccb = mfi_get_ccb(sc)) == NULL)
640 return 1;
641
642 init = &ccb->ccb_frame->mfr_init;
643 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
644
645 memset(qinfo, 0, sizeof *qinfo);
646 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
647 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
648 offsetof(struct mfi_prod_cons, mpc_reply_q));
649 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
650 offsetof(struct mfi_prod_cons, mpc_producer));
651 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
652 offsetof(struct mfi_prod_cons, mpc_consumer));
653
654 init->mif_header.mfh_cmd = MFI_CMD_INIT;
655 init->mif_header.mfh_data_len = sizeof *qinfo;
656 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
657
658 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
659 DEVNAME(sc),
660 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
661 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
662
663 if (mfi_poll(ccb)) {
664 aprint_error_dev(sc->sc_dev,
665 "mfi_initialize_firmware failed\n");
666 return 1;
667 }
668
669 mfi_put_ccb(ccb);
670
671 return 0;
672 }
673
674 static int
675 mfi_get_info(struct mfi_softc *sc)
676 {
677 #ifdef MFI_DEBUG
678 int i;
679 #endif
680 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
681
682 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
683 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
684 return 1;
685
686 #ifdef MFI_DEBUG
687
688 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
689 printf("%s: active FW %s Version %s date %s time %s\n",
690 DEVNAME(sc),
691 sc->sc_info.mci_image_component[i].mic_name,
692 sc->sc_info.mci_image_component[i].mic_version,
693 sc->sc_info.mci_image_component[i].mic_build_date,
694 sc->sc_info.mci_image_component[i].mic_build_time);
695 }
696
697 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
698 printf("%s: pending FW %s Version %s date %s time %s\n",
699 DEVNAME(sc),
700 sc->sc_info.mci_pending_image_component[i].mic_name,
701 sc->sc_info.mci_pending_image_component[i].mic_version,
702 sc->sc_info.mci_pending_image_component[i].mic_build_date,
703 sc->sc_info.mci_pending_image_component[i].mic_build_time);
704 }
705
706 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
707 DEVNAME(sc),
708 sc->sc_info.mci_max_arms,
709 sc->sc_info.mci_max_spans,
710 sc->sc_info.mci_max_arrays,
711 sc->sc_info.mci_max_lds,
712 sc->sc_info.mci_product_name);
713
714 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
715 DEVNAME(sc),
716 sc->sc_info.mci_serial_number,
717 sc->sc_info.mci_hw_present,
718 sc->sc_info.mci_current_fw_time,
719 sc->sc_info.mci_max_cmds,
720 sc->sc_info.mci_max_sg_elements);
721
722 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
723 DEVNAME(sc),
724 sc->sc_info.mci_max_request_size,
725 sc->sc_info.mci_lds_present,
726 sc->sc_info.mci_lds_degraded,
727 sc->sc_info.mci_lds_offline,
728 sc->sc_info.mci_pd_present);
729
730 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
731 DEVNAME(sc),
732 sc->sc_info.mci_pd_disks_present,
733 sc->sc_info.mci_pd_disks_pred_failure,
734 sc->sc_info.mci_pd_disks_failed);
735
736 printf("%s: nvram %d mem %d flash %d\n",
737 DEVNAME(sc),
738 sc->sc_info.mci_nvram_size,
739 sc->sc_info.mci_memory_size,
740 sc->sc_info.mci_flash_size);
741
742 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
743 DEVNAME(sc),
744 sc->sc_info.mci_ram_correctable_errors,
745 sc->sc_info.mci_ram_uncorrectable_errors,
746 sc->sc_info.mci_cluster_allowed,
747 sc->sc_info.mci_cluster_active);
748
749 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
750 DEVNAME(sc),
751 sc->sc_info.mci_max_strips_per_io,
752 sc->sc_info.mci_raid_levels,
753 sc->sc_info.mci_adapter_ops,
754 sc->sc_info.mci_ld_ops);
755
756 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
757 DEVNAME(sc),
758 sc->sc_info.mci_stripe_sz_ops.min,
759 sc->sc_info.mci_stripe_sz_ops.max,
760 sc->sc_info.mci_pd_ops,
761 sc->sc_info.mci_pd_mix_support);
762
763 printf("%s: ecc_bucket %d pckg_prop %s\n",
764 DEVNAME(sc),
765 sc->sc_info.mci_ecc_bucket_count,
766 sc->sc_info.mci_package_version);
767
768 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
769 DEVNAME(sc),
770 sc->sc_info.mci_properties.mcp_seq_num,
771 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
772 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
773 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
774
775 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
776 DEVNAME(sc),
777 sc->sc_info.mci_properties.mcp_rebuild_rate,
778 sc->sc_info.mci_properties.mcp_patrol_read_rate,
779 sc->sc_info.mci_properties.mcp_bgi_rate,
780 sc->sc_info.mci_properties.mcp_cc_rate);
781
782 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
783 DEVNAME(sc),
784 sc->sc_info.mci_properties.mcp_recon_rate,
785 sc->sc_info.mci_properties.mcp_cache_flush_interval,
786 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
787 sc->sc_info.mci_properties.mcp_spinup_delay,
788 sc->sc_info.mci_properties.mcp_cluster_enable);
789
790 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
791 DEVNAME(sc),
792 sc->sc_info.mci_properties.mcp_coercion_mode,
793 sc->sc_info.mci_properties.mcp_alarm_enable,
794 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
795 sc->sc_info.mci_properties.mcp_disable_battery_warn,
796 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
797
798 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
799 DEVNAME(sc),
800 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
801 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
802 sc->sc_info.mci_properties.mcp_expose_encl_devices);
803
804 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
805 DEVNAME(sc),
806 sc->sc_info.mci_pci.mip_vendor,
807 sc->sc_info.mci_pci.mip_device,
808 sc->sc_info.mci_pci.mip_subvendor,
809 sc->sc_info.mci_pci.mip_subdevice);
810
811 printf("%s: type %#x port_count %d port_addr ",
812 DEVNAME(sc),
813 sc->sc_info.mci_host.mih_type,
814 sc->sc_info.mci_host.mih_port_count);
815
816 for (i = 0; i < 8; i++)
817 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
818 printf("\n");
819
820 printf("%s: type %.x port_count %d port_addr ",
821 DEVNAME(sc),
822 sc->sc_info.mci_device.mid_type,
823 sc->sc_info.mci_device.mid_port_count);
824
825 for (i = 0; i < 8; i++) {
826 printf("%.0" PRIx64 " ",
827 sc->sc_info.mci_device.mid_port_addr[i]);
828 }
829 printf("\n");
830 #endif /* MFI_DEBUG */
831
832 return 0;
833 }
834
835 static int
836 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
837 {
838 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
839
840 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
841 sizeof(*stat), stat, NULL, cold ? true : false))
842 return MFI_BBU_UNKNOWN;
843 #ifdef MFI_DEBUG
844 printf("bbu type %d, voltage %d, current %d, temperature %d, "
845 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
846 stat->temperature, stat->fw_status);
847 printf("details: ");
848 switch(stat->battery_type) {
849 case MFI_BBU_TYPE_IBBU:
850 printf("guage %d relative charge %d charger state %d "
851 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
852 stat->detail.ibbu.relative_charge ,
853 stat->detail.ibbu.charger_system_state ,
854 stat->detail.ibbu.charger_system_ctrl);
855 printf("\tcurrent %d abs charge %d max error %d\n",
856 stat->detail.ibbu.charging_current ,
857 stat->detail.ibbu.absolute_charge ,
858 stat->detail.ibbu.max_error);
859 break;
860 case MFI_BBU_TYPE_BBU:
861 printf("guage %d relative charge %d charger state %d\n",
862 stat->detail.ibbu.gas_guage_status,
863 stat->detail.bbu.relative_charge ,
864 stat->detail.bbu.charger_status );
865 printf("\trem capacity %d fyll capacity %d SOH %d\n",
866 stat->detail.bbu.remaining_capacity ,
867 stat->detail.bbu.full_charge_capacity ,
868 stat->detail.bbu.is_SOH_good);
869 break;
870 default:
871 printf("\n");
872 }
873 #endif
874 switch(stat->battery_type) {
875 case MFI_BBU_TYPE_BBU:
876 return (stat->detail.bbu.is_SOH_good ?
877 MFI_BBU_GOOD : MFI_BBU_BAD);
878 case MFI_BBU_TYPE_NONE:
879 return MFI_BBU_UNKNOWN;
880 default:
881 if (stat->fw_status &
882 (MFI_BBU_STATE_PACK_MISSING |
883 MFI_BBU_STATE_VOLTAGE_LOW |
884 MFI_BBU_STATE_TEMPERATURE_HIGH |
885 MFI_BBU_STATE_LEARN_CYC_FAIL |
886 MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
887 MFI_BBU_STATE_I2C_ERR_DETECT))
888 return MFI_BBU_BAD;
889 return MFI_BBU_GOOD;
890 }
891 }
892
893 static void
894 mfiminphys(struct buf *bp)
895 {
896 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
897
898 /* XXX currently using MFI_MAXFER = MAXPHYS */
899 if (bp->b_bcount > MFI_MAXFER)
900 bp->b_bcount = MFI_MAXFER;
901 minphys(bp);
902 }
903
904 int
905 mfi_rescan(device_t self, const char *ifattr, const int *locators)
906 {
907 struct mfi_softc *sc = device_private(self);
908
909 if (sc->sc_child != NULL)
910 return 0;
911
912 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint, CFARGS_NONE);
913
914 return 0;
915 }
916
917 void
918 mfi_childdetached(device_t self, device_t child)
919 {
920 struct mfi_softc *sc = device_private(self);
921
922 KASSERT(self == sc->sc_dev);
923 KASSERT(child == sc->sc_child);
924
925 if (child == sc->sc_child)
926 sc->sc_child = NULL;
927 }
928
929 int
930 mfi_detach(struct mfi_softc *sc, int flags)
931 {
932 int error;
933
934 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
935
936 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
937 return error;
938
939 #if NBIO > 0
940 mfi_destroy_sensors(sc);
941 bio_unregister(sc->sc_dev);
942 #endif /* NBIO > 0 */
943
944 mfi_intr_disable(sc);
945 mfi_shutdown(sc->sc_dev, 0);
946
947 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
948 workqueue_destroy(sc->sc_ldsync_wq);
949 mfi_put_ccb(sc->sc_ldsync_ccb);
950 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
951 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
952 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
953 }
954
955 if ((error = mfi_destroy_ccb(sc)) != 0)
956 return error;
957
958 mfi_freemem(sc, &sc->sc_sense);
959
960 mfi_freemem(sc, &sc->sc_frames);
961
962 mfi_freemem(sc, &sc->sc_pcq);
963
964 return 0;
965 }
966
967 static bool
968 mfi_shutdown(device_t dev, int how)
969 {
970 struct mfi_softc *sc = device_private(dev);
971 uint8_t mbox[MFI_MBOX_SIZE];
972 int s = splbio();
973 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
974 if (sc->sc_running) {
975 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
976 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
977 MFI_DATA_NONE, 0, NULL, mbox, true)) {
978 aprint_error_dev(dev, "shutdown: cache flush failed\n");
979 goto fail;
980 }
981
982 mbox[0] = 0;
983 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
984 MFI_DATA_NONE, 0, NULL, mbox, true)) {
985 aprint_error_dev(dev, "shutdown: "
986 "firmware shutdown failed\n");
987 goto fail;
988 }
989 sc->sc_running = false;
990 }
991 splx(s);
992 return true;
993 fail:
994 splx(s);
995 return false;
996 }
997
998 static bool
999 mfi_suspend(device_t dev, const pmf_qual_t *q)
1000 {
1001 /* XXX to be implemented */
1002 return false;
1003 }
1004
1005 static bool
1006 mfi_resume(device_t dev, const pmf_qual_t *q)
1007 {
1008 /* XXX to be implemented */
1009 return false;
1010 }
1011
1012 int
1013 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
1014 {
1015 struct scsipi_adapter *adapt = &sc->sc_adapt;
1016 struct scsipi_channel *chan = &sc->sc_chan;
1017 uint32_t status, frames, max_sgl;
1018 int i;
1019
1020 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
1021
1022 sc->sc_ioptype = iop;
1023
1024 switch (iop) {
1025 case MFI_IOP_XSCALE:
1026 sc->sc_iop = &mfi_iop_xscale;
1027 break;
1028 case MFI_IOP_PPC:
1029 sc->sc_iop = &mfi_iop_ppc;
1030 break;
1031 case MFI_IOP_GEN2:
1032 sc->sc_iop = &mfi_iop_gen2;
1033 break;
1034 case MFI_IOP_SKINNY:
1035 sc->sc_iop = &mfi_iop_skinny;
1036 break;
1037 case MFI_IOP_TBOLT:
1038 sc->sc_iop = &mfi_iop_tbolt;
1039 break;
1040 default:
1041 panic("%s: unknown iop %d", DEVNAME(sc), iop);
1042 }
1043
1044 if (mfi_transition_firmware(sc))
1045 return 1;
1046
1047 TAILQ_INIT(&sc->sc_ccb_freeq);
1048
1049 status = mfi_fw_state(sc);
1050 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
1051 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
1052 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1053 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1054 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1055 } else if (sc->sc_64bit_dma) {
1056 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1057 sc->sc_sgl_size = sizeof(struct mfi_sg64);
1058 } else {
1059 sc->sc_max_sgl = max_sgl;
1060 sc->sc_sgl_size = sizeof(struct mfi_sg32);
1061 }
1062 if (sc->sc_ioptype == MFI_IOP_SKINNY)
1063 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1064 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
1065 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
1066
1067 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1068 uint32_t tb_mem_size;
1069 /* for Alignment */
1070 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALIGNMENT;
1071
1072 tb_mem_size +=
1073 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
1074 sc->sc_reply_pool_size =
1075 ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
1076 tb_mem_size +=
1077 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
1078
1079 /* this is for SGL's */
1080 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
1081 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
1082 if (sc->sc_tbolt_reqmsgpool == NULL) {
1083 aprint_error_dev(sc->sc_dev,
1084 "unable to allocate thunderbolt "
1085 "request message pool\n");
1086 goto nopcq;
1087 }
1088 if (mfi_tbolt_init_desc_pool(sc)) {
1089 aprint_error_dev(sc->sc_dev,
1090 "Thunderbolt pool preparation error\n");
1091 goto nopcq;
1092 }
1093
1094 /*
1095 * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
1096 * we are taking it diffrent from what we have allocated for
1097 * Request and reply descriptors to avoid confusion later
1098 */
1099 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
1100 sizeof(struct mpi2_ioc_init_request));
1101 if (sc->sc_tbolt_ioc_init == NULL) {
1102 aprint_error_dev(sc->sc_dev,
1103 "unable to allocate thunderbolt IOC init memory");
1104 goto nopcq;
1105 }
1106
1107 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
1108 MEGASAS_MAX_NAME*sizeof(bus_addr_t));
1109 if (sc->sc_tbolt_verbuf == NULL) {
1110 aprint_error_dev(sc->sc_dev,
1111 "unable to allocate thunderbolt version buffer\n");
1112 goto nopcq;
1113 }
1114
1115 }
1116 /* consumer/producer and reply queue memory */
1117 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
1118 sizeof(struct mfi_prod_cons));
1119 if (sc->sc_pcq == NULL) {
1120 aprint_error_dev(sc->sc_dev,
1121 "unable to allocate reply queue memory\n");
1122 goto nopcq;
1123 }
1124 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1125 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1126 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1127
1128 /* frame memory */
1129 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
1130 MFI_FRAME_SIZE + 1;
1131 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
1132 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
1133 if (sc->sc_frames == NULL) {
1134 aprint_error_dev(sc->sc_dev,
1135 "unable to allocate frame memory\n");
1136 goto noframe;
1137 }
1138 /* XXX hack, fix this */
1139 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
1140 aprint_error_dev(sc->sc_dev,
1141 "improper frame alignment (%#llx) FIXME\n",
1142 (long long int)MFIMEM_DVA(sc->sc_frames));
1143 goto noframe;
1144 }
1145
1146 /* sense memory */
1147 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
1148 if (sc->sc_sense == NULL) {
1149 aprint_error_dev(sc->sc_dev,
1150 "unable to allocate sense memory\n");
1151 goto nosense;
1152 }
1153
1154 /* now that we have all memory bits go initialize ccbs */
1155 if (mfi_init_ccb(sc)) {
1156 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
1157 goto noinit;
1158 }
1159
1160 /* kickstart firmware with all addresses and pointers */
1161 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1162 if (mfi_tbolt_init_MFI_queue(sc)) {
1163 aprint_error_dev(sc->sc_dev,
1164 "could not initialize firmware\n");
1165 goto noinit;
1166 }
1167 } else {
1168 if (mfi_initialize_firmware(sc)) {
1169 aprint_error_dev(sc->sc_dev,
1170 "could not initialize firmware\n");
1171 goto noinit;
1172 }
1173 }
1174 sc->sc_running = true;
1175
1176 if (mfi_get_info(sc)) {
1177 aprint_error_dev(sc->sc_dev,
1178 "could not retrieve controller information\n");
1179 goto noinit;
1180 }
1181 aprint_normal_dev(sc->sc_dev,
1182 "%s version %s\n",
1183 sc->sc_info.mci_product_name,
1184 sc->sc_info.mci_package_version);
1185
1186
1187 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
1188 sc->sc_info.mci_lds_present,
1189 sc->sc_info.mci_memory_size);
1190 sc->sc_bbuok = false;
1191 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
1192 struct mfi_bbu_status bbu_stat;
1193 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
1194 aprint_normal("BBU type ");
1195 switch (bbu_stat.battery_type) {
1196 case MFI_BBU_TYPE_BBU:
1197 aprint_normal("BBU");
1198 break;
1199 case MFI_BBU_TYPE_IBBU:
1200 aprint_normal("IBBU");
1201 break;
1202 default:
1203 aprint_normal("unknown type %d", bbu_stat.battery_type);
1204 }
1205 aprint_normal(", status ");
1206 switch(mfi_bbu_status) {
1207 case MFI_BBU_GOOD:
1208 aprint_normal("good\n");
1209 sc->sc_bbuok = true;
1210 break;
1211 case MFI_BBU_BAD:
1212 aprint_normal("bad\n");
1213 break;
1214 case MFI_BBU_UNKNOWN:
1215 aprint_normal("unknown\n");
1216 break;
1217 default:
1218 panic("mfi_bbu_status");
1219 }
1220 } else {
1221 aprint_normal("BBU not present\n");
1222 }
1223
1224 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
1225 sc->sc_max_ld = sc->sc_ld_cnt;
1226 for (i = 0; i < sc->sc_ld_cnt; i++)
1227 sc->sc_ld[i].ld_present = 1;
1228
1229 memset(adapt, 0, sizeof(*adapt));
1230 adapt->adapt_dev = sc->sc_dev;
1231 adapt->adapt_nchannels = 1;
1232 /* keep a few commands for management */
1233 if (sc->sc_max_cmds > 4)
1234 adapt->adapt_openings = sc->sc_max_cmds - 4;
1235 else
1236 adapt->adapt_openings = sc->sc_max_cmds;
1237 adapt->adapt_max_periph = adapt->adapt_openings;
1238 adapt->adapt_request = mfi_scsipi_request;
1239 adapt->adapt_minphys = mfiminphys;
1240
1241 memset(chan, 0, sizeof(*chan));
1242 chan->chan_adapter = adapt;
1243 chan->chan_bustype = &scsi_sas_bustype;
1244 chan->chan_channel = 0;
1245 chan->chan_flags = 0;
1246 chan->chan_nluns = 8;
1247 chan->chan_ntargets = MFI_MAX_LD;
1248 chan->chan_id = MFI_MAX_LD;
1249
1250 mfi_rescan(sc->sc_dev, NULL, NULL);
1251
1252 /* enable interrupts */
1253 mfi_intr_enable(sc);
1254
1255 #if NBIO > 0
1256 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
1257 panic("%s: controller registration failed", DEVNAME(sc));
1258 if (mfi_create_sensors(sc) != 0)
1259 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
1260 #endif /* NBIO > 0 */
1261 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
1262 mfi_shutdown)) {
1263 aprint_error_dev(sc->sc_dev,
1264 "couldn't establish power handler\n");
1265 }
1266
1267 return 0;
1268 noinit:
1269 mfi_freemem(sc, &sc->sc_sense);
1270 nosense:
1271 mfi_freemem(sc, &sc->sc_frames);
1272 noframe:
1273 mfi_freemem(sc, &sc->sc_pcq);
1274 nopcq:
1275 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1276 if (sc->sc_tbolt_reqmsgpool)
1277 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
1278 if (sc->sc_tbolt_verbuf)
1279 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
1280 }
1281 return 1;
1282 }
1283
1284 static int
1285 mfi_poll(struct mfi_ccb *ccb)
1286 {
1287 struct mfi_softc *sc = ccb->ccb_sc;
1288 struct mfi_frame_header *hdr;
1289 int to = 0;
1290 int rv = 0;
1291
1292 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
1293
1294 hdr = &ccb->ccb_frame->mfr_header;
1295 hdr->mfh_cmd_status = 0xff;
1296 if (!sc->sc_MFA_enabled)
1297 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1298
1299 /* no callback, caller is supposed to do the cleanup */
1300 ccb->ccb_done = NULL;
1301
1302 mfi_post(sc, ccb);
1303 if (sc->sc_MFA_enabled) {
1304 /*
1305 * depending on the command type, result may be posted
1306 * to *hdr, or not. In addition it seems there's
1307 * no way to avoid posting the SMID to the reply queue.
1308 * So pool using the interrupt routine.
1309 */
1310 while (ccb->ccb_state != MFI_CCB_DONE) {
1311 delay(1000);
1312 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1313 rv = 1;
1314 break;
1315 }
1316 mfi_tbolt_intrh(sc);
1317 }
1318 } else {
1319 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1320 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1321 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1322
1323 while (hdr->mfh_cmd_status == 0xff) {
1324 delay(1000);
1325 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1326 rv = 1;
1327 break;
1328 }
1329 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1330 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1331 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1332 }
1333 }
1334 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1335 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1336 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1337
1338 if (ccb->ccb_data != NULL) {
1339 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1340 DEVNAME(sc));
1341 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1342 ccb->ccb_dmamap->dm_mapsize,
1343 (ccb->ccb_direction & MFI_DATA_IN) ?
1344 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1345
1346 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1347 }
1348
1349 if (rv != 0) {
1350 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
1351 hdr->mfh_context);
1352 ccb->ccb_flags |= MFI_CCB_F_ERR;
1353 return 1;
1354 }
1355
1356 return 0;
1357 }
1358
1359 int
1360 mfi_intr(void *arg)
1361 {
1362 struct mfi_softc *sc = arg;
1363 struct mfi_prod_cons *pcq;
1364 struct mfi_ccb *ccb;
1365 uint32_t producer, consumer, ctx;
1366 int claimed = 0;
1367
1368 if (!mfi_my_intr(sc))
1369 return 0;
1370
1371 pcq = MFIMEM_KVA(sc->sc_pcq);
1372
1373 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
1374 (u_long)sc, (u_long)pcq);
1375
1376 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1377 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1378 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1379
1380 producer = pcq->mpc_producer;
1381 consumer = pcq->mpc_consumer;
1382
1383 while (consumer != producer) {
1384 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1385 DEVNAME(sc), producer, consumer);
1386
1387 ctx = pcq->mpc_reply_q[consumer];
1388 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1389 if (ctx == MFI_INVALID_CTX)
1390 aprint_error_dev(sc->sc_dev,
1391 "invalid context, p: %d c: %d\n",
1392 producer, consumer);
1393 else {
1394 /* XXX remove from queue and call scsi_done */
1395 ccb = &sc->sc_ccb[ctx];
1396 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1397 DEVNAME(sc), ctx);
1398 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1399 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1400 sc->sc_frames_size,
1401 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1402 ccb->ccb_done(ccb);
1403
1404 claimed = 1;
1405 }
1406 consumer++;
1407 if (consumer == (sc->sc_max_cmds + 1))
1408 consumer = 0;
1409 }
1410
1411 pcq->mpc_consumer = consumer;
1412 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1413 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1414 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1415
1416 return claimed;
1417 }
1418
1419 static int
1420 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
1421 uint32_t blockcnt)
1422 {
1423 struct scsipi_periph *periph = xs->xs_periph;
1424 struct mfi_io_frame *io;
1425
1426 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
1427 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1428 periph->periph_target);
1429
1430 if (!xs->data)
1431 return 1;
1432
1433 io = &ccb->ccb_frame->mfr_io;
1434 if (xs->xs_control & XS_CTL_DATA_IN) {
1435 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1436 ccb->ccb_direction = MFI_DATA_IN;
1437 } else {
1438 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1439 ccb->ccb_direction = MFI_DATA_OUT;
1440 }
1441 io->mif_header.mfh_target_id = periph->periph_target;
1442 io->mif_header.mfh_timeout = 0;
1443 io->mif_header.mfh_flags = 0;
1444 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1445 io->mif_header.mfh_data_len= blockcnt;
1446 io->mif_lba_hi = (blockno >> 32);
1447 io->mif_lba_lo = (blockno & 0xffffffff);
1448 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1449 io->mif_sense_addr_hi = 0;
1450
1451 ccb->ccb_done = mfi_scsi_ld_done;
1452 ccb->ccb_xs = xs;
1453 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1454 ccb->ccb_sgl = &io->mif_sgl;
1455 ccb->ccb_data = xs->data;
1456 ccb->ccb_len = xs->datalen;
1457
1458 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1459 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1460 return 1;
1461
1462 return 0;
1463 }
1464
1465 static void
1466 mfi_scsi_ld_done(struct mfi_ccb *ccb)
1467 {
1468 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1469 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
1470 }
1471
1472 static void
1473 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
1474 {
1475 struct scsipi_xfer *xs = ccb->ccb_xs;
1476 struct mfi_softc *sc = ccb->ccb_sc;
1477
1478 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1479 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1480
1481 if (xs->data != NULL) {
1482 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1483 DEVNAME(sc));
1484 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1485 ccb->ccb_dmamap->dm_mapsize,
1486 (xs->xs_control & XS_CTL_DATA_IN) ?
1487 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1488
1489 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1490 }
1491
1492 if (status != MFI_STAT_OK) {
1493 xs->error = XS_DRIVER_STUFFUP;
1494 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1495 DEVNAME(sc), status);
1496
1497 if (scsi_status != 0) {
1498 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1499 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1500 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1501 DNPRINTF(MFI_D_INTR,
1502 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1503 DEVNAME(sc), scsi_status,
1504 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1505 memset(&xs->sense, 0, sizeof(xs->sense));
1506 memcpy(&xs->sense, ccb->ccb_sense,
1507 sizeof(struct scsi_sense_data));
1508 xs->error = XS_SENSE;
1509 }
1510 } else {
1511 xs->error = XS_NOERROR;
1512 xs->status = SCSI_OK;
1513 xs->resid = 0;
1514 }
1515
1516 mfi_put_ccb(ccb);
1517 scsipi_done(xs);
1518 }
1519
1520 static int
1521 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1522 {
1523 struct mfi_pass_frame *pf;
1524 struct scsipi_periph *periph = xs->xs_periph;
1525
1526 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1527 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1528 periph->periph_target);
1529
1530 pf = &ccb->ccb_frame->mfr_pass;
1531 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1532 pf->mpf_header.mfh_target_id = periph->periph_target;
1533 pf->mpf_header.mfh_lun_id = 0;
1534 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1535 pf->mpf_header.mfh_timeout = 0;
1536 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1537 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1538
1539 pf->mpf_sense_addr_hi = 0;
1540 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1541
1542 memset(pf->mpf_cdb, 0, 16);
1543 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1544
1545 ccb->ccb_done = mfi_scsi_ld_done;
1546 ccb->ccb_xs = xs;
1547 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1548 ccb->ccb_sgl = &pf->mpf_sgl;
1549
1550 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1551 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1552 MFI_DATA_IN : MFI_DATA_OUT;
1553 else
1554 ccb->ccb_direction = MFI_DATA_NONE;
1555
1556 if (xs->data) {
1557 ccb->ccb_data = xs->data;
1558 ccb->ccb_len = xs->datalen;
1559
1560 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1561 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1562 return 1;
1563 }
1564
1565 return 0;
1566 }
1567
1568 static void
1569 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1570 void *arg)
1571 {
1572 struct scsipi_periph *periph;
1573 struct scsipi_xfer *xs;
1574 struct scsipi_adapter *adapt = chan->chan_adapter;
1575 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1576 struct mfi_ccb *ccb;
1577 struct scsi_rw_6 *rw;
1578 struct scsipi_rw_10 *rwb;
1579 struct scsipi_rw_12 *rw12;
1580 struct scsipi_rw_16 *rw16;
1581 uint64_t blockno;
1582 uint32_t blockcnt;
1583 uint8_t target;
1584 uint8_t mbox[MFI_MBOX_SIZE];
1585 int s;
1586
1587 switch (req) {
1588 case ADAPTER_REQ_GROW_RESOURCES:
1589 /* Not supported. */
1590 return;
1591 case ADAPTER_REQ_SET_XFER_MODE:
1592 {
1593 struct scsipi_xfer_mode *xm = arg;
1594 xm->xm_mode = PERIPH_CAP_TQING;
1595 xm->xm_period = 0;
1596 xm->xm_offset = 0;
1597 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
1598 return;
1599 }
1600 case ADAPTER_REQ_RUN_XFER:
1601 break;
1602 }
1603
1604 xs = arg;
1605
1606 periph = xs->xs_periph;
1607 target = periph->periph_target;
1608
1609 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
1610 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
1611 periph->periph_target, periph->periph_lun);
1612
1613 s = splbio();
1614 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1615 periph->periph_lun != 0) {
1616 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1617 DEVNAME(sc), target);
1618 xs->error = XS_SELTIMEOUT;
1619 scsipi_done(xs);
1620 splx(s);
1621 return;
1622 }
1623 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
1624 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
1625 /* the cache is stable storage, don't flush */
1626 xs->error = XS_NOERROR;
1627 xs->status = SCSI_OK;
1628 xs->resid = 0;
1629 scsipi_done(xs);
1630 splx(s);
1631 return;
1632 }
1633
1634 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1635 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1636 xs->error = XS_RESOURCE_SHORTAGE;
1637 scsipi_done(xs);
1638 splx(s);
1639 return;
1640 }
1641
1642 switch (xs->cmd->opcode) {
1643 /* IO path */
1644 case READ_16:
1645 case WRITE_16:
1646 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1647 blockno = _8btol(rw16->addr);
1648 blockcnt = _4btol(rw16->length);
1649 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1650 goto stuffup;
1651 }
1652 break;
1653
1654 case READ_12:
1655 case WRITE_12:
1656 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1657 blockno = _4btol(rw12->addr);
1658 blockcnt = _4btol(rw12->length);
1659 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1660 goto stuffup;
1661 }
1662 break;
1663
1664 case READ_10:
1665 case WRITE_10:
1666 rwb = (struct scsipi_rw_10 *)xs->cmd;
1667 blockno = _4btol(rwb->addr);
1668 blockcnt = _2btol(rwb->length);
1669 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1670 goto stuffup;
1671 }
1672 break;
1673
1674 case SCSI_READ_6_COMMAND:
1675 case SCSI_WRITE_6_COMMAND:
1676 rw = (struct scsi_rw_6 *)xs->cmd;
1677 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1678 blockcnt = rw->length ? rw->length : 0x100;
1679 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1680 goto stuffup;
1681 }
1682 break;
1683
1684 case SCSI_SYNCHRONIZE_CACHE_10:
1685 case SCSI_SYNCHRONIZE_CACHE_16:
1686 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1687 if (mfi_mgmt(ccb, xs,
1688 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1689 goto stuffup;
1690 }
1691 break;
1692
1693 /* hand it of to the firmware and let it deal with it */
1694 case SCSI_TEST_UNIT_READY:
1695 /* save off sd? after autoconf */
1696 if (!cold) /* XXX bogus */
1697 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1698 sizeof(sc->sc_ld[target].ld_dev));
1699 /* FALLTHROUGH */
1700
1701 default:
1702 if (mfi_scsi_ld(ccb, xs)) {
1703 goto stuffup;
1704 }
1705 break;
1706 }
1707
1708 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1709
1710 if (xs->xs_control & XS_CTL_POLL) {
1711 if (mfi_poll(ccb)) {
1712 /* XXX check for sense in ccb->ccb_sense? */
1713 aprint_error_dev(sc->sc_dev,
1714 "mfi_scsipi_request poll failed\n");
1715 memset(&xs->sense, 0, sizeof(xs->sense));
1716 xs->sense.scsi_sense.response_code =
1717 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1718 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1719 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1720 xs->error = XS_SENSE;
1721 xs->status = SCSI_CHECK;
1722 } else {
1723 DNPRINTF(MFI_D_DMA,
1724 "%s: mfi_scsipi_request poll complete %d\n",
1725 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1726 xs->error = XS_NOERROR;
1727 xs->status = SCSI_OK;
1728 xs->resid = 0;
1729 }
1730 mfi_put_ccb(ccb);
1731 scsipi_done(xs);
1732 splx(s);
1733 return;
1734 }
1735
1736 mfi_post(sc, ccb);
1737
1738 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1739 ccb->ccb_dmamap->dm_nsegs);
1740
1741 splx(s);
1742 return;
1743
1744 stuffup:
1745 mfi_put_ccb(ccb);
1746 xs->error = XS_DRIVER_STUFFUP;
1747 scsipi_done(xs);
1748 splx(s);
1749 }
1750
1751 static int
1752 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1753 {
1754 struct mfi_softc *sc = ccb->ccb_sc;
1755 struct mfi_frame_header *hdr;
1756 bus_dma_segment_t *sgd;
1757 union mfi_sgl *sgl;
1758 int error, i;
1759
1760 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1761 (u_long)ccb->ccb_data);
1762
1763 if (!ccb->ccb_data)
1764 return 1;
1765
1766 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
1767 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
1768 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1769 if (error) {
1770 if (error == EFBIG) {
1771 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
1772 sc->sc_max_sgl);
1773 } else {
1774 aprint_error_dev(sc->sc_dev,
1775 "error %d loading dma map\n", error);
1776 }
1777 return 1;
1778 }
1779
1780 hdr = &ccb->ccb_frame->mfr_header;
1781 sgl = ccb->ccb_sgl;
1782 sgd = ccb->ccb_dmamap->dm_segs;
1783 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1784 if (((sc->sc_ioptype == MFI_IOP_SKINNY) ||
1785 (sc->sc_ioptype == MFI_IOP_TBOLT)) &&
1786 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
1787 hdr->mfh_cmd == MFI_CMD_LD_READ ||
1788 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
1789 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
1790 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
1791 sgl->sg_ieee[i].flags = 0;
1792 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1793 PRIx32 "\n",
1794 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1795 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1796 } else if (sc->sc_64bit_dma) {
1797 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1798 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1799 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1800 PRIx32 "\n",
1801 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1802 hdr->mfh_flags |= MFI_FRAME_SGL64;
1803 } else {
1804 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1805 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1806 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1807 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1808 hdr->mfh_flags |= MFI_FRAME_SGL32;
1809 }
1810 }
1811
1812 if (ccb->ccb_direction == MFI_DATA_IN) {
1813 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1814 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1815 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1816 } else {
1817 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1818 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1819 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1820 }
1821
1822 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1823 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1824 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1825
1826 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1827 " dm_nsegs: %d extra_frames: %d\n",
1828 DEVNAME(sc),
1829 hdr->mfh_sg_count,
1830 ccb->ccb_frame_size,
1831 sc->sc_frames_size,
1832 ccb->ccb_dmamap->dm_nsegs,
1833 ccb->ccb_extra_frames);
1834
1835 return 0;
1836 }
1837
1838 static int
1839 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1840 uint32_t len, void *buf, uint8_t *mbox, bool poll)
1841 {
1842 struct mfi_ccb *ccb;
1843 int rv = 1;
1844
1845 if ((ccb = mfi_get_ccb(sc)) == NULL)
1846 return rv;
1847 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1848 if (rv)
1849 return rv;
1850
1851 if (poll) {
1852 rv = 1;
1853 if (mfi_poll(ccb))
1854 goto done;
1855 } else {
1856 mfi_post(sc, ccb);
1857
1858 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1859 DEVNAME(sc));
1860 while (ccb->ccb_state != MFI_CCB_DONE)
1861 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1862
1863 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1864 goto done;
1865 }
1866 rv = 0;
1867
1868 done:
1869 mfi_put_ccb(ccb);
1870 return rv;
1871 }
1872
1873 static int
1874 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1875 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1876 {
1877 struct mfi_dcmd_frame *dcmd;
1878
1879 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1880
1881 dcmd = &ccb->ccb_frame->mfr_dcmd;
1882 memset(dcmd->mdf_mbox.b, 0, MFI_MBOX_SIZE);
1883 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1884 dcmd->mdf_header.mfh_timeout = 0;
1885
1886 dcmd->mdf_opcode = opc;
1887 dcmd->mdf_header.mfh_data_len = 0;
1888 ccb->ccb_direction = dir;
1889 ccb->ccb_xs = xs;
1890 ccb->ccb_done = mfi_mgmt_done;
1891
1892 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1893
1894 /* handle special opcodes */
1895 if (mbox)
1896 memcpy(dcmd->mdf_mbox.b, mbox, MFI_MBOX_SIZE);
1897
1898 if (dir != MFI_DATA_NONE) {
1899 dcmd->mdf_header.mfh_data_len = len;
1900 ccb->ccb_data = buf;
1901 ccb->ccb_len = len;
1902 ccb->ccb_sgl = &dcmd->mdf_sgl;
1903
1904 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1905 return 1;
1906 }
1907 return 0;
1908 }
1909
1910 static void
1911 mfi_mgmt_done(struct mfi_ccb *ccb)
1912 {
1913 struct scsipi_xfer *xs = ccb->ccb_xs;
1914 struct mfi_softc *sc = ccb->ccb_sc;
1915 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1916
1917 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1918 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1919
1920 if (ccb->ccb_data != NULL) {
1921 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1922 DEVNAME(sc));
1923 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1924 ccb->ccb_dmamap->dm_mapsize,
1925 (ccb->ccb_direction & MFI_DATA_IN) ?
1926 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1927
1928 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1929 }
1930
1931 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1932 ccb->ccb_flags |= MFI_CCB_F_ERR;
1933
1934 ccb->ccb_state = MFI_CCB_DONE;
1935 if (xs) {
1936 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1937 xs->error = XS_DRIVER_STUFFUP;
1938 } else {
1939 xs->error = XS_NOERROR;
1940 xs->status = SCSI_OK;
1941 xs->resid = 0;
1942 }
1943 mfi_put_ccb(ccb);
1944 scsipi_done(xs);
1945 } else
1946 wakeup(ccb);
1947 }
1948
1949 #if NBIO > 0
1950 int
1951 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1952 {
1953 struct mfi_softc *sc = device_private(dev);
1954 int error = 0;
1955 int s;
1956
1957 KERNEL_LOCK(1, curlwp);
1958 s = splbio();
1959
1960 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1961
1962 switch (cmd) {
1963 case BIOCINQ:
1964 DNPRINTF(MFI_D_IOCTL, "inq\n");
1965 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1966 break;
1967
1968 case BIOCVOL:
1969 DNPRINTF(MFI_D_IOCTL, "vol\n");
1970 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1971 break;
1972
1973 case BIOCDISK:
1974 DNPRINTF(MFI_D_IOCTL, "disk\n");
1975 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1976 break;
1977
1978 case BIOCALARM:
1979 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1980 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1981 break;
1982
1983 case BIOCBLINK:
1984 DNPRINTF(MFI_D_IOCTL, "blink\n");
1985 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1986 break;
1987
1988 case BIOCSETSTATE:
1989 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1990 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1991 break;
1992
1993 default:
1994 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1995 error = EINVAL;
1996 }
1997 splx(s);
1998 KERNEL_UNLOCK_ONE(curlwp);
1999
2000 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
2001 return error;
2002 }
2003
2004 static int
2005 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
2006 {
2007 struct mfi_conf *cfg;
2008 int rv = EINVAL;
2009
2010 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
2011
2012 if (mfi_get_info(sc)) {
2013 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
2014 DEVNAME(sc));
2015 return EIO;
2016 }
2017
2018 /* get figures */
2019 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2020 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
2021 sizeof *cfg, cfg, NULL, false))
2022 goto freeme;
2023
2024 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2025 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2026 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2027
2028 rv = 0;
2029 freeme:
2030 free(cfg, M_DEVBUF);
2031 return rv;
2032 }
2033
2034 static int
2035 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
2036 {
2037 int i, per, rv = EINVAL;
2038 uint8_t mbox[MFI_MBOX_SIZE];
2039
2040 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
2041 DEVNAME(sc), bv->bv_volid);
2042
2043 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
2044 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
2045 goto done;
2046
2047 i = bv->bv_volid;
2048 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2049 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
2050 DEVNAME(sc), mbox[0]);
2051
2052 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
2053 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false))
2054 goto done;
2055
2056 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2057 /* go do hotspares */
2058 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2059 goto done;
2060 }
2061
2062 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
2063
2064 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2065 case MFI_LD_OFFLINE:
2066 bv->bv_status = BIOC_SVOFFLINE;
2067 break;
2068
2069 case MFI_LD_PART_DEGRADED:
2070 case MFI_LD_DEGRADED:
2071 bv->bv_status = BIOC_SVDEGRADED;
2072 break;
2073
2074 case MFI_LD_ONLINE:
2075 bv->bv_status = BIOC_SVONLINE;
2076 break;
2077
2078 default:
2079 bv->bv_status = BIOC_SVINVALID;
2080 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
2081 DEVNAME(sc),
2082 sc->sc_ld_list.mll_list[i].mll_state);
2083 }
2084
2085 /* additional status can modify MFI status */
2086 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
2087 case MFI_LD_PROG_CC:
2088 bv->bv_status = BIOC_SVSCRUB;
2089 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
2090 bv->bv_percent = (per * 100) / 0xffff;
2091 bv->bv_seconds =
2092 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
2093 break;
2094
2095 case MFI_LD_PROG_BGI:
2096 bv->bv_status = BIOC_SVSCRUB;
2097 per = (int)sc->sc_ld_details.mld_progress.mlp_bgi.mp_progress;
2098 bv->bv_percent = (per * 100) / 0xffff;
2099 bv->bv_seconds =
2100 sc->sc_ld_details.mld_progress.mlp_bgi.mp_elapsed_seconds;
2101 break;
2102
2103 case MFI_LD_PROG_FGI:
2104 case MFI_LD_PROG_RECONSTRUCT:
2105 /* nothing yet */
2106 break;
2107 }
2108
2109 /*
2110 * The RAID levels are determined per the SNIA DDF spec, this is only
2111 * a subset that is valid for the MFI controller.
2112 */
2113 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
2114 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
2115 MFI_DDF_SRL_SPANNED)
2116 bv->bv_level *= 10;
2117
2118 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
2119 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
2120
2121 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
2122
2123 rv = 0;
2124 done:
2125 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
2126 DEVNAME(sc), rv);
2127 return rv;
2128 }
2129
2130 static int
2131 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
2132 {
2133 struct mfi_conf *cfg;
2134 struct mfi_array *ar;
2135 struct mfi_ld_cfg *ld;
2136 struct mfi_pd_details *pd;
2137 struct scsipi_inquiry_data *inqbuf;
2138 char vend[8+16+4+1];
2139 int i, rv = EINVAL;
2140 int arr, vol, disk;
2141 uint32_t size;
2142 uint8_t mbox[MFI_MBOX_SIZE];
2143
2144 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
2145 DEVNAME(sc), bd->bd_diskid);
2146
2147 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2148
2149 /* send single element command to retrieve size for full structure */
2150 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2151 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
2152 sizeof *cfg, cfg, NULL, false))
2153 goto freeme;
2154
2155 size = cfg->mfc_size;
2156 free(cfg, M_DEVBUF);
2157
2158 /* memory for read config */
2159 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2160 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
2161 size, cfg, NULL, false))
2162 goto freeme;
2163
2164 ar = cfg->mfc_array;
2165
2166 /* calculate offset to ld structure */
2167 ld = (struct mfi_ld_cfg *)(
2168 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2169 cfg->mfc_array_size * cfg->mfc_no_array);
2170
2171 vol = bd->bd_volid;
2172
2173 if (vol >= cfg->mfc_no_ld) {
2174 /* do hotspares */
2175 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
2176 goto freeme;
2177 }
2178
2179 /* find corresponding array for ld */
2180 for (i = 0, arr = 0; i < vol; i++)
2181 arr += ld[i].mlc_parm.mpa_span_depth;
2182
2183 /* offset disk into pd list */
2184 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
2185
2186 /* offset array index into the next spans */
2187 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
2188
2189 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
2190 switch (ar[arr].pd[disk].mar_pd_state){
2191 case MFI_PD_UNCONFIG_GOOD:
2192 bd->bd_status = BIOC_SDUNUSED;
2193 break;
2194
2195 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
2196 bd->bd_status = BIOC_SDHOTSPARE;
2197 break;
2198
2199 case MFI_PD_OFFLINE:
2200 bd->bd_status = BIOC_SDOFFLINE;
2201 break;
2202
2203 case MFI_PD_FAILED:
2204 bd->bd_status = BIOC_SDFAILED;
2205 break;
2206
2207 case MFI_PD_REBUILD:
2208 bd->bd_status = BIOC_SDREBUILD;
2209 break;
2210
2211 case MFI_PD_ONLINE:
2212 bd->bd_status = BIOC_SDONLINE;
2213 break;
2214
2215 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
2216 default:
2217 bd->bd_status = BIOC_SDINVALID;
2218 break;
2219
2220 }
2221
2222 /* get the remaining fields */
2223 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
2224 memset(pd, 0, sizeof(*pd));
2225 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2226 sizeof *pd, pd, mbox, false))
2227 goto freeme;
2228
2229 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
2230
2231 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
2232 bd->bd_channel = pd->mpd_enc_idx;
2233
2234 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2235 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
2236 vend[sizeof vend - 1] = '\0';
2237 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
2238
2239 /* XXX find a way to retrieve serial nr from drive */
2240 /* XXX find a way to get bd_procdev */
2241
2242 rv = 0;
2243 freeme:
2244 free(pd, M_DEVBUF);
2245 free(cfg, M_DEVBUF);
2246
2247 return rv;
2248 }
2249
2250 static int
2251 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
2252 {
2253 uint32_t opc, dir = MFI_DATA_NONE;
2254 int rv = 0;
2255 int8_t ret;
2256
2257 switch(ba->ba_opcode) {
2258 case BIOC_SADISABLE:
2259 opc = MR_DCMD_SPEAKER_DISABLE;
2260 break;
2261
2262 case BIOC_SAENABLE:
2263 opc = MR_DCMD_SPEAKER_ENABLE;
2264 break;
2265
2266 case BIOC_SASILENCE:
2267 opc = MR_DCMD_SPEAKER_SILENCE;
2268 break;
2269
2270 case BIOC_GASTATUS:
2271 opc = MR_DCMD_SPEAKER_GET;
2272 dir = MFI_DATA_IN;
2273 break;
2274
2275 case BIOC_SATEST:
2276 opc = MR_DCMD_SPEAKER_TEST;
2277 break;
2278
2279 default:
2280 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
2281 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
2282 return EINVAL;
2283 }
2284
2285 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
2286 rv = EINVAL;
2287 else
2288 if (ba->ba_opcode == BIOC_GASTATUS)
2289 ba->ba_status = ret;
2290 else
2291 ba->ba_status = 0;
2292
2293 return rv;
2294 }
2295
2296 static int
2297 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
2298 {
2299 int i, found, rv = EINVAL;
2300 uint8_t mbox[MFI_MBOX_SIZE];
2301 uint32_t cmd;
2302 struct mfi_pd_list *pd;
2303
2304 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
2305 bb->bb_status);
2306
2307 /* channel 0 means not in an enclosure so can't be blinked */
2308 if (bb->bb_channel == 0)
2309 return EINVAL;
2310
2311 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2312
2313 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2314 MFI_PD_LIST_SIZE, pd, NULL, false))
2315 goto done;
2316
2317 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2318 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
2319 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
2320 found = 1;
2321 break;
2322 }
2323
2324 if (!found)
2325 goto done;
2326
2327 memset(mbox, 0, sizeof mbox);
2328
2329 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2330
2331 switch (bb->bb_status) {
2332 case BIOC_SBUNBLINK:
2333 cmd = MR_DCMD_PD_UNBLINK;
2334 break;
2335
2336 case BIOC_SBBLINK:
2337 cmd = MR_DCMD_PD_BLINK;
2338 break;
2339
2340 case BIOC_SBALARM:
2341 default:
2342 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
2343 "opcode %x\n", DEVNAME(sc), bb->bb_status);
2344 goto done;
2345 }
2346
2347
2348 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false))
2349 goto done;
2350
2351 rv = 0;
2352 done:
2353 free(pd, M_DEVBUF);
2354 return rv;
2355 }
2356
2357 static int
2358 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2359 {
2360 struct mfi_pd_list *pd;
2361 int i, found, rv = EINVAL;
2362 uint8_t mbox[MFI_MBOX_SIZE];
2363
2364 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2365 bs->bs_status);
2366
2367 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2368
2369 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2370 MFI_PD_LIST_SIZE, pd, NULL, false))
2371 goto done;
2372
2373 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2374 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2375 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2376 found = 1;
2377 break;
2378 }
2379
2380 if (!found)
2381 goto done;
2382
2383 memset(mbox, 0, sizeof mbox);
2384
2385 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2386
2387 switch (bs->bs_status) {
2388 case BIOC_SSONLINE:
2389 mbox[2] = MFI_PD_ONLINE;
2390 break;
2391
2392 case BIOC_SSOFFLINE:
2393 mbox[2] = MFI_PD_OFFLINE;
2394 break;
2395
2396 case BIOC_SSHOTSPARE:
2397 mbox[2] = MFI_PD_HOTSPARE;
2398 break;
2399 /*
2400 case BIOC_SSREBUILD:
2401 break;
2402 */
2403 default:
2404 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2405 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2406 goto done;
2407 }
2408
2409
2410 if (mfi_mgmt_internal(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE,
2411 0, NULL, mbox, false))
2412 goto done;
2413
2414 rv = 0;
2415 done:
2416 free(pd, M_DEVBUF);
2417 return rv;
2418 }
2419
2420 static int
2421 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2422 {
2423 struct mfi_conf *cfg;
2424 struct mfi_hotspare *hs;
2425 struct mfi_pd_details *pd;
2426 struct bioc_disk *sdhs;
2427 struct bioc_vol *vdhs;
2428 struct scsipi_inquiry_data *inqbuf;
2429 char vend[8+16+4+1];
2430 int i, rv = EINVAL;
2431 uint32_t size;
2432 uint8_t mbox[MFI_MBOX_SIZE];
2433
2434 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2435
2436 if (!bio_hs)
2437 return EINVAL;
2438
2439 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2440
2441 /* send single element command to retrieve size for full structure */
2442 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2443 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
2444 sizeof *cfg, cfg, NULL, false))
2445 goto freeme;
2446
2447 size = cfg->mfc_size;
2448 free(cfg, M_DEVBUF);
2449
2450 /* memory for read config */
2451 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2452 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
2453 size, cfg, NULL, false))
2454 goto freeme;
2455
2456 /* calculate offset to hs structure */
2457 hs = (struct mfi_hotspare *)(
2458 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2459 cfg->mfc_array_size * cfg->mfc_no_array +
2460 cfg->mfc_ld_size * cfg->mfc_no_ld);
2461
2462 if (volid < cfg->mfc_no_ld)
2463 goto freeme; /* not a hotspare */
2464
2465 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2466 goto freeme; /* not a hotspare */
2467
2468 /* offset into hotspare structure */
2469 i = volid - cfg->mfc_no_ld;
2470
2471 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2472 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2473 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2474
2475 /* get pd fields */
2476 memset(mbox, 0, sizeof mbox);
2477 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2478 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2479 sizeof *pd, pd, mbox, false)) {
2480 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2481 DEVNAME(sc));
2482 goto freeme;
2483 }
2484
2485 switch (type) {
2486 case MFI_MGMT_VD:
2487 vdhs = bio_hs;
2488 vdhs->bv_status = BIOC_SVONLINE;
2489 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2490 vdhs->bv_level = -1; /* hotspare */
2491 vdhs->bv_nodisk = 1;
2492 break;
2493
2494 case MFI_MGMT_SD:
2495 sdhs = bio_hs;
2496 sdhs->bd_status = BIOC_SDHOTSPARE;
2497 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2498 sdhs->bd_channel = pd->mpd_enc_idx;
2499 sdhs->bd_target = pd->mpd_enc_slot;
2500 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2501 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2502 vend[sizeof vend - 1] = '\0';
2503 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2504 break;
2505
2506 default:
2507 goto freeme;
2508 }
2509
2510 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2511 rv = 0;
2512 freeme:
2513 free(pd, M_DEVBUF);
2514 free(cfg, M_DEVBUF);
2515
2516 return rv;
2517 }
2518
2519 static int
2520 mfi_destroy_sensors(struct mfi_softc *sc)
2521 {
2522 if (sc->sc_sme == NULL)
2523 return 0;
2524 sysmon_envsys_unregister(sc->sc_sme);
2525 sc->sc_sme = NULL;
2526 free(sc->sc_sensor, M_DEVBUF);
2527 return 0;
2528 }
2529
2530 static int
2531 mfi_create_sensors(struct mfi_softc *sc)
2532 {
2533 int i;
2534 int nsensors = sc->sc_ld_cnt + 1;
2535 int rv;
2536
2537 sc->sc_sme = sysmon_envsys_create();
2538 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2539 M_DEVBUF, M_WAITOK | M_ZERO);
2540
2541 /* BBU */
2542 sc->sc_sensor[0].units = ENVSYS_INDICATOR;
2543 sc->sc_sensor[0].state = ENVSYS_SINVALID;
2544 sc->sc_sensor[0].value_cur = 0;
2545 /* Enable monitoring for BBU state changes, if present */
2546 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
2547 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
2548 snprintf(sc->sc_sensor[0].desc,
2549 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
2550 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
2551 goto out;
2552
2553 for (i = 1; i < nsensors; i++) {
2554 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2555 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2556 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2557 /* Enable monitoring for drive state changes */
2558 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2559 /* logical drives */
2560 snprintf(sc->sc_sensor[i].desc,
2561 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2562 DEVNAME(sc), i - 1);
2563 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2564 &sc->sc_sensor[i]))
2565 goto out;
2566 }
2567
2568 sc->sc_sme->sme_name = DEVNAME(sc);
2569 sc->sc_sme->sme_cookie = sc;
2570 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2571 rv = sysmon_envsys_register(sc->sc_sme);
2572 if (rv != 0) {
2573 aprint_error_dev(sc->sc_dev,
2574 "unable to register with sysmon (rv = %d)\n", rv);
2575 goto out;
2576 }
2577 return 0;
2578
2579 out:
2580 free(sc->sc_sensor, M_DEVBUF);
2581 sysmon_envsys_destroy(sc->sc_sme);
2582 sc->sc_sme = NULL;
2583 return EINVAL;
2584 }
2585
2586 static void
2587 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2588 {
2589 struct mfi_softc *sc = sme->sme_cookie;
2590 struct bioc_vol bv;
2591 int s;
2592 int error;
2593
2594 if (edata->sensor >= sc->sc_ld_cnt + 1)
2595 return;
2596
2597 if (edata->sensor == 0) {
2598 /* BBU */
2599 struct mfi_bbu_status bbu_stat;
2600 int bbu_status;
2601 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
2602 return;
2603
2604 KERNEL_LOCK(1, curlwp);
2605 s = splbio();
2606 bbu_status = mfi_get_bbu(sc, &bbu_stat);
2607 splx(s);
2608 KERNEL_UNLOCK_ONE(curlwp);
2609 switch(bbu_status) {
2610 case MFI_BBU_GOOD:
2611 edata->value_cur = 1;
2612 edata->state = ENVSYS_SVALID;
2613 if (!sc->sc_bbuok)
2614 aprint_normal_dev(sc->sc_dev,
2615 "BBU state changed to good\n");
2616 sc->sc_bbuok = true;
2617 break;
2618 case MFI_BBU_BAD:
2619 edata->value_cur = 0;
2620 edata->state = ENVSYS_SCRITICAL;
2621 if (sc->sc_bbuok)
2622 aprint_normal_dev(sc->sc_dev,
2623 "BBU state changed to bad\n");
2624 sc->sc_bbuok = false;
2625 break;
2626 case MFI_BBU_UNKNOWN:
2627 default:
2628 edata->value_cur = 0;
2629 edata->state = ENVSYS_SINVALID;
2630 sc->sc_bbuok = false;
2631 break;
2632 }
2633 return;
2634 }
2635
2636 memset(&bv, 0, sizeof(bv));
2637 bv.bv_volid = edata->sensor - 1;
2638 KERNEL_LOCK(1, curlwp);
2639 s = splbio();
2640 error = mfi_ioctl_vol(sc, &bv);
2641 splx(s);
2642 KERNEL_UNLOCK_ONE(curlwp);
2643 if (error)
2644 bv.bv_status = BIOC_SVINVALID;
2645
2646 bio_vol_to_envsys(edata, &bv);
2647 }
2648
2649 #endif /* NBIO > 0 */
2650
2651 static uint32_t
2652 mfi_xscale_fw_state(struct mfi_softc *sc)
2653 {
2654 return mfi_read(sc, MFI_OMSG0);
2655 }
2656
2657 static void
2658 mfi_xscale_intr_dis(struct mfi_softc *sc)
2659 {
2660 mfi_write(sc, MFI_OMSK, 0);
2661 }
2662
2663 static void
2664 mfi_xscale_intr_ena(struct mfi_softc *sc)
2665 {
2666 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2667 }
2668
2669 static int
2670 mfi_xscale_intr(struct mfi_softc *sc)
2671 {
2672 uint32_t status;
2673
2674 status = mfi_read(sc, MFI_OSTS);
2675 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2676 return 0;
2677
2678 /* write status back to acknowledge interrupt */
2679 mfi_write(sc, MFI_OSTS, status);
2680 return 1;
2681 }
2682
2683 static void
2684 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2685 {
2686 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2687 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2688 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2689 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2690 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2691 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2692
2693 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2694 ccb->ccb_extra_frames);
2695 ccb->ccb_state = MFI_CCB_RUNNING;
2696 }
2697
2698 static uint32_t
2699 mfi_ppc_fw_state(struct mfi_softc *sc)
2700 {
2701 return mfi_read(sc, MFI_OSP);
2702 }
2703
2704 static void
2705 mfi_ppc_intr_dis(struct mfi_softc *sc)
2706 {
2707 /* Taking a wild guess --dyoung */
2708 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2709 mfi_write(sc, MFI_ODC, 0xffffffff);
2710 }
2711
2712 static void
2713 mfi_ppc_intr_ena(struct mfi_softc *sc)
2714 {
2715 mfi_write(sc, MFI_ODC, 0xffffffff);
2716 mfi_write(sc, MFI_OMSK, ~0x80000004);
2717 }
2718
2719 static int
2720 mfi_ppc_intr(struct mfi_softc *sc)
2721 {
2722 uint32_t status;
2723
2724 status = mfi_read(sc, MFI_OSTS);
2725 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2726 return 0;
2727
2728 /* write status back to acknowledge interrupt */
2729 mfi_write(sc, MFI_ODC, status);
2730 return 1;
2731 }
2732
2733 static void
2734 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2735 {
2736 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2737 (ccb->ccb_extra_frames << 1));
2738 ccb->ccb_state = MFI_CCB_RUNNING;
2739 }
2740
2741 u_int32_t
2742 mfi_gen2_fw_state(struct mfi_softc *sc)
2743 {
2744 return (mfi_read(sc, MFI_OSP));
2745 }
2746
2747 void
2748 mfi_gen2_intr_dis(struct mfi_softc *sc)
2749 {
2750 mfi_write(sc, MFI_OMSK, 0xffffffff);
2751 mfi_write(sc, MFI_ODC, 0xffffffff);
2752 }
2753
2754 void
2755 mfi_gen2_intr_ena(struct mfi_softc *sc)
2756 {
2757 mfi_write(sc, MFI_ODC, 0xffffffff);
2758 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2759 }
2760
2761 int
2762 mfi_gen2_intr(struct mfi_softc *sc)
2763 {
2764 u_int32_t status;
2765
2766 status = mfi_read(sc, MFI_OSTS);
2767 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2768 return (0);
2769
2770 /* write status back to acknowledge interrupt */
2771 mfi_write(sc, MFI_ODC, status);
2772
2773 return (1);
2774 }
2775
2776 void
2777 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2778 {
2779 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2780 (ccb->ccb_extra_frames << 1));
2781 ccb->ccb_state = MFI_CCB_RUNNING;
2782 }
2783
2784 u_int32_t
2785 mfi_skinny_fw_state(struct mfi_softc *sc)
2786 {
2787 return (mfi_read(sc, MFI_OSP));
2788 }
2789
2790 void
2791 mfi_skinny_intr_dis(struct mfi_softc *sc)
2792 {
2793 mfi_write(sc, MFI_OMSK, 0);
2794 }
2795
2796 void
2797 mfi_skinny_intr_ena(struct mfi_softc *sc)
2798 {
2799 mfi_write(sc, MFI_OMSK, ~0x00000001);
2800 }
2801
2802 int
2803 mfi_skinny_intr(struct mfi_softc *sc)
2804 {
2805 u_int32_t status;
2806
2807 status = mfi_read(sc, MFI_OSTS);
2808 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2809 return (0);
2810
2811 /* write status back to acknowledge interrupt */
2812 mfi_write(sc, MFI_OSTS, status);
2813
2814 return (1);
2815 }
2816
2817 void
2818 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2819 {
2820 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2821 (ccb->ccb_extra_frames << 1));
2822 mfi_write(sc, MFI_IQPH, 0x00000000);
2823 ccb->ccb_state = MFI_CCB_RUNNING;
2824 }
2825
2826 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
2827
2828 void
2829 mfi_tbolt_intr_ena(struct mfi_softc *sc)
2830 {
2831 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
2832 mfi_read(sc, MFI_OMSK);
2833 }
2834
2835 void
2836 mfi_tbolt_intr_dis(struct mfi_softc *sc)
2837 {
2838 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
2839 mfi_read(sc, MFI_OMSK);
2840 }
2841
2842 int
2843 mfi_tbolt_intr(struct mfi_softc *sc)
2844 {
2845 int32_t status;
2846
2847 status = mfi_read(sc, MFI_OSTS);
2848
2849 if (ISSET(status, 0x1)) {
2850 mfi_write(sc, MFI_OSTS, status);
2851 mfi_read(sc, MFI_OSTS);
2852 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
2853 return 0;
2854 return 1;
2855 }
2856 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
2857 return 0;
2858 mfi_read(sc, MFI_OSTS);
2859 return 1;
2860 }
2861
2862 u_int32_t
2863 mfi_tbolt_fw_state(struct mfi_softc *sc)
2864 {
2865 return mfi_read(sc, MFI_OSP);
2866 }
2867
2868 void
2869 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2870 {
2871 if (sc->sc_MFA_enabled) {
2872 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
2873 mfi_tbolt_build_mpt_ccb(ccb);
2874 mfi_write(sc, MFI_IQPL,
2875 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
2876 mfi_write(sc, MFI_IQPH,
2877 ccb->ccb_tb_request_desc.words >> 32);
2878 ccb->ccb_state = MFI_CCB_RUNNING;
2879 return;
2880 }
2881 uint64_t bus_add = ccb->ccb_pframe;
2882 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
2883 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2884 mfi_write(sc, MFI_IQPL, bus_add);
2885 mfi_write(sc, MFI_IQPH, bus_add >> 32);
2886 ccb->ccb_state = MFI_CCB_RUNNING;
2887 }
2888
2889 static void
2890 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
2891 {
2892 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
2893 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
2894 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
2895
2896 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2897 io_req->SGLOffset0 =
2898 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
2899 io_req->ChainOffset =
2900 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
2901
2902 mpi25_ieee_chain =
2903 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
2904 mpi25_ieee_chain->Address = ccb->ccb_pframe;
2905
2906 /*
2907 In MFI pass thru, nextChainOffset will always be zero to
2908 indicate the end of the chain.
2909 */
2910 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
2911 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2912
2913 /* setting the length to the maximum length */
2914 mpi25_ieee_chain->Length = 1024;
2915
2916 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2917 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2918 ccb->ccb_flags |= MFI_CCB_F_TBOLT;
2919 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
2920 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2921 ccb->ccb_tb_pio_request -
2922 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2923 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
2924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2925 }
2926
2927 /*
2928 * Description:
2929 * This function will prepare message pools for the Thunderbolt controller
2930 */
2931 static int
2932 mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
2933 {
2934 uint32_t offset = 0;
2935 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2936
2937 /* Request Descriptors alignment restrictions */
2938 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2939
2940 /* Skip request message pool */
2941 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
2942
2943 /* Reply Frame Pool is initialized */
2944 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
2945 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2946
2947 offset = (uintptr_t)sc->sc_reply_frame_pool
2948 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2949 sc->sc_reply_frame_busaddr =
2950 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
2951
2952 /* initializing reply address to 0xFFFFFFFF */
2953 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
2954 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
2955
2956 /* Skip Reply Frame Pool */
2957 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2958 sc->sc_reply_pool_limit = (void *)addr;
2959
2960 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2961 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
2962
2963 /* initialize the last_reply_idx to 0 */
2964 sc->sc_last_reply_idx = 0;
2965 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
2966 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
2967 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
2968 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
2969 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
2970 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2971 return 0;
2972 }
2973
2974 /*
2975 * This routine prepare and issue INIT2 frame to the Firmware
2976 */
2977
2978 static int
2979 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
2980 {
2981 struct mpi2_ioc_init_request *mpi2IocInit;
2982 struct mfi_init_frame *mfi_init;
2983 struct mfi_ccb *ccb;
2984 bus_addr_t phyAddress;
2985 mfi_address *mfiAddressTemp;
2986 int s;
2987 char *verbuf;
2988 char wqbuf[10];
2989
2990 /* Check if initialization is already completed */
2991 if (sc->sc_MFA_enabled) {
2992 return 1;
2993 }
2994
2995 mpi2IocInit =
2996 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
2997
2998 s = splbio();
2999 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3000 splx(s);
3001 return (EBUSY);
3002 }
3003
3004
3005 mfi_init = &ccb->ccb_frame->mfr_init;
3006
3007 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
3008 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
3009 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3010
3011 /* set MsgVersion and HeaderVersion host driver was built with */
3012 mpi2IocInit->MsgVersion = MPI2_VERSION;
3013 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
3014 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
3015 mpi2IocInit->ReplyDescriptorPostQueueDepth =
3016 (uint16_t)sc->sc_reply_pool_size;
3017 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
3018
3019 /* Get physical address of reply frame pool */
3020 phyAddress = sc->sc_reply_frame_busaddr;
3021 mfiAddressTemp =
3022 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
3023 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3024 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3025
3026 /* Get physical address of request message pool */
3027 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
3028 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
3029 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3030 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3031
3032 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
3033 mpi2IocInit->TimeStamp = time_uptime;
3034
3035 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
3036 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
3037 MEGASAS_VERSION);
3038 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3039 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
3040 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
3041 mfi_init->driver_ver_hi =
3042 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
3043
3044 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3045 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3046 BUS_DMASYNC_PREWRITE);
3047 /* Get the physical address of the mpi2 ioc init command */
3048 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init);
3049 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
3050 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
3051
3052 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
3053 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
3054 if (mfi_poll(ccb) != 0) {
3055 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
3056 "command at 0x%" PRIx64 "\n",
3057 (uint64_t)ccb->ccb_pframe);
3058 splx(s);
3059 return 1;
3060 }
3061 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3062 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3063 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3064 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3065 BUS_DMASYNC_POSTWRITE);
3066 mfi_put_ccb(ccb);
3067 splx(s);
3068
3069 if (mfi_init->mif_header.mfh_cmd_status == 0) {
3070 sc->sc_MFA_enabled = 1;
3071 }
3072 else {
3073 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
3074 mfi_init->mif_header.mfh_cmd_status);
3075 return 1;
3076 }
3077
3078 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
3079 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
3080 sc, PRIBIO, IPL_BIO, 0) != 0) {
3081 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
3082 return 1;
3083 }
3084 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3085 return 0;
3086 }
3087
3088 int
3089 mfi_tbolt_intrh(void *arg)
3090 {
3091 struct mfi_softc *sc = arg;
3092 struct mfi_ccb *ccb;
3093 union mfi_mpi2_reply_descriptor *desc;
3094 int smid, num_completed;
3095
3096 if (!mfi_tbolt_intr(sc))
3097 return 0;
3098
3099 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
3100 (u_long)sc, (u_long)sc->sc_last_reply_idx);
3101
3102 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
3103
3104 desc = (union mfi_mpi2_reply_descriptor *)
3105 ((uintptr_t)sc->sc_reply_frame_pool +
3106 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3107
3108 bus_dmamap_sync(sc->sc_dmat,
3109 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3110 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3111 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3112 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3113 num_completed = 0;
3114 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
3115 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
3116 smid = desc->header.SMID;
3117 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
3118 ccb = &sc->sc_ccb[smid - 1];
3119 DNPRINTF(MFI_D_INTR,
3120 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
3121 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
3122 sc->sc_last_reply_idx, desc->words, ccb);
3123 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
3124 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
3125 ccb->ccb_tb_io_request->ChainOffset != 0) {
3126 bus_dmamap_sync(sc->sc_dmat,
3127 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3128 ccb->ccb_tb_psg_frame -
3129 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3130 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD);
3131 }
3132 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
3133 bus_dmamap_sync(sc->sc_dmat,
3134 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3135 ccb->ccb_tb_pio_request -
3136 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3137 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3138 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3139 }
3140 if (ccb->ccb_done)
3141 ccb->ccb_done(ccb);
3142 else
3143 ccb->ccb_state = MFI_CCB_DONE;
3144 sc->sc_last_reply_idx++;
3145 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
3146 sc->sc_last_reply_idx = 0;
3147 }
3148 desc->words = ~0x0;
3149 /* Get the next reply descriptor */
3150 desc = (union mfi_mpi2_reply_descriptor *)
3151 ((uintptr_t)sc->sc_reply_frame_pool +
3152 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3153 num_completed++;
3154 }
3155 if (num_completed == 0)
3156 return 0;
3157
3158 bus_dmamap_sync(sc->sc_dmat,
3159 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3160 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3161 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3163 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
3164 return 1;
3165 }
3166
3167
3168 int
3169 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
3170 uint64_t blockno, uint32_t blockcnt)
3171 {
3172 struct scsipi_periph *periph = xs->xs_periph;
3173 struct mfi_mpi2_request_raid_scsi_io *io_req;
3174 int sge_count;
3175
3176 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
3177 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
3178 periph->periph_target);
3179
3180 if (!xs->data)
3181 return 1;
3182
3183 ccb->ccb_done = mfi_tbolt_scsi_ld_done;
3184 ccb->ccb_xs = xs;
3185 ccb->ccb_data = xs->data;
3186 ccb->ccb_len = xs->datalen;
3187
3188 io_req = ccb->ccb_tb_io_request;
3189
3190 /* Just the CDB length,rest of the Flags are zero */
3191 io_req->IoFlags = xs->cmdlen;
3192 memset(io_req->CDB.CDB32, 0, 32);
3193 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
3194
3195 io_req->RaidContext.TargetID = periph->periph_target;
3196 io_req->RaidContext.Status = 0;
3197 io_req->RaidContext.exStatus = 0;
3198 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
3199 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
3200 io_req->DevHandle = periph->periph_target;
3201
3202 ccb->ccb_tb_request_desc.header.RequestFlags =
3203 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3204 io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
3205
3206 if (xs->xs_control & XS_CTL_DATA_IN) {
3207 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
3208 ccb->ccb_direction = MFI_DATA_IN;
3209 } else {
3210 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
3211 ccb->ccb_direction = MFI_DATA_OUT;
3212 }
3213
3214 sge_count = mfi_tbolt_create_sgl(ccb,
3215 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
3216 );
3217 if (sge_count < 0)
3218 return 1;
3219 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
3220 io_req->RaidContext.numSGE = sge_count;
3221 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3222 io_req->SGLOffset0 =
3223 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
3224
3225 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
3226 io_req->SenseBufferLength = MFI_SENSE_SIZE;
3227
3228 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
3229 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
3230 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3231 ccb->ccb_tb_pio_request -
3232 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3233 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3235
3236 return 0;
3237 }
3238
3239
3240 static void
3241 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
3242 {
3243 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
3244 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
3245 io_req->RaidContext.exStatus);
3246 }
3247
3248 static int
3249 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
3250 {
3251 struct mfi_softc *sc = ccb->ccb_sc;
3252 bus_dma_segment_t *sgd;
3253 int error, i, sge_idx, sge_count;
3254 struct mfi_mpi2_request_raid_scsi_io *io_req;
3255 struct mpi25_ieee_sge_chain64 *sgl_ptr;
3256
3257 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
3258 (u_long)ccb->ccb_data);
3259
3260 if (!ccb->ccb_data)
3261 return -1;
3262
3263 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
3264 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
3265 ccb->ccb_data, ccb->ccb_len, NULL, flags);
3266 if (error) {
3267 if (error == EFBIG)
3268 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
3269 sc->sc_max_sgl);
3270 else
3271 aprint_error_dev(sc->sc_dev,
3272 "error %d loading dma map\n", error);
3273 return -1;
3274 }
3275
3276 io_req = ccb->ccb_tb_io_request;
3277 sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
3278 sge_count = ccb->ccb_dmamap->dm_nsegs;
3279 sgd = ccb->ccb_dmamap->dm_segs;
3280 KASSERT(sge_count <= sc->sc_max_sgl);
3281 KASSERT(sge_count <=
3282 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
3283 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
3284
3285 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
3286 /* One element to store the chain info */
3287 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
3288 DNPRINTF(MFI_D_DMA,
3289 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n",
3290 sge_idx, sge_count, ccb->ccb_tb_pio_request);
3291 } else {
3292 sge_idx = sge_count;
3293 }
3294
3295 for (i = 0; i < sge_idx; i++) {
3296 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3297 sgl_ptr->Length = htole32(sgd[i].ds_len);
3298 sgl_ptr->Flags = 0;
3299 if (sge_idx < sge_count) {
3300 DNPRINTF(MFI_D_DMA,
3301 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3302 " flags 0x%x\n", sgl_ptr, i,
3303 sgl_ptr->Address, sgl_ptr->Length,
3304 sgl_ptr->Flags);
3305 }
3306 sgl_ptr++;
3307 }
3308 io_req->ChainOffset = 0;
3309 if (sge_idx < sge_count) {
3310 struct mpi25_ieee_sge_chain64 *sg_chain;
3311 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
3312 sg_chain = sgl_ptr;
3313 /* Prepare chain element */
3314 sg_chain->NextChainOffset = 0;
3315 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3316 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
3317 sg_chain->Length = (sizeof(mpi2_sge_io_union) *
3318 (sge_count - sge_idx));
3319 sg_chain->Address = ccb->ccb_tb_psg_frame;
3320 DNPRINTF(MFI_D_DMA,
3321 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
3322 " flags 0x%x\n", sg_chain, sg_chain->Address,
3323 sg_chain->Length, sg_chain->Flags);
3324 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
3325 for (; i < sge_count; i++) {
3326 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3327 sgl_ptr->Length = htole32(sgd[i].ds_len);
3328 sgl_ptr->Flags = 0;
3329 DNPRINTF(MFI_D_DMA,
3330 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3331 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
3332 sgl_ptr->Length, sgl_ptr->Flags);
3333 sgl_ptr++;
3334 }
3335 bus_dmamap_sync(sc->sc_dmat,
3336 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3337 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3338 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD);
3339 }
3340
3341 if (ccb->ccb_direction == MFI_DATA_IN) {
3342 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3343 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3344 } else {
3345 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3346 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
3347 }
3348 return sge_count;
3349 }
3350
3351 /*
3352 * The ThunderBolt HW has an option for the driver to directly
3353 * access the underlying disks and operate on the RAID. To
3354 * do this there needs to be a capability to keep the RAID controller
3355 * and driver in sync. The FreeBSD driver does not take advantage
3356 * of this feature since it adds a lot of complexity and slows down
3357 * performance. Performance is gained by using the controller's
3358 * cache etc.
3359 *
3360 * Even though this driver doesn't access the disks directly, an
3361 * AEN like command is used to inform the RAID firmware to "sync"
3362 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
3363 * command in write mode will return when the RAID firmware has
3364 * detected a change to the RAID state. Examples of this type
3365 * of change are removing a disk. Once the command returns then
3366 * the driver needs to acknowledge this and "sync" all LD's again.
3367 * This repeats until we shutdown. Then we need to cancel this
3368 * pending command.
3369 *
3370 * If this is not done right the RAID firmware will not remove a
3371 * pulled drive and the RAID won't go degraded etc. Effectively,
3372 * stopping any RAID mangement to functions.
3373 *
3374 * Doing another LD sync, requires the use of an event since the
3375 * driver needs to do a mfi_wait_command and can't do that in an
3376 * interrupt thread.
3377 *
3378 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
3379 * That requires a bunch of structure and it is simpler to just do
3380 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
3381 */
3382
3383 void
3384 mfi_tbolt_sync_map_info(struct work *w, void *v)
3385 {
3386 struct mfi_softc *sc = v;
3387 int i;
3388 struct mfi_ccb *ccb = NULL;
3389 uint8_t mbox[MFI_MBOX_SIZE];
3390 struct mfi_ld *ld_sync;
3391 size_t ld_size;
3392 int s;
3393
3394 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
3395 again:
3396 ld_sync = NULL;
3397 s = splbio();
3398 if (sc->sc_ldsync_ccb != NULL) {
3399 splx(s);
3400 return;
3401 }
3402
3403 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
3404 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
3405 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
3406 goto err;
3407 }
3408
3409 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
3410
3411 ld_sync = malloc(ld_size, M_DEVBUF, M_WAITOK | M_ZERO);
3412 if (ld_sync == NULL) {
3413 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
3414 goto err;
3415 }
3416 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3417 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
3418 }
3419
3420 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3421 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
3422 goto err;
3423 }
3424 sc->sc_ldsync_ccb = ccb;
3425
3426 memset(mbox, 0, MFI_MBOX_SIZE);
3427 mbox[0] = sc->sc_ld_list.mll_no_ld;
3428 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
3429 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
3430 ld_size, ld_sync, mbox)) {
3431 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
3432 goto err;
3433 }
3434 /*
3435 * we won't sleep on this command, so we have to override
3436 * the callback set up by mfi_mgmt()
3437 */
3438 ccb->ccb_done = mfi_sync_map_complete;
3439
3440 mfi_post(sc, ccb);
3441 splx(s);
3442 return;
3443
3444 err:
3445 if (ld_sync)
3446 free(ld_sync, M_DEVBUF);
3447 if (ccb)
3448 mfi_put_ccb(ccb);
3449 sc->sc_ldsync_ccb = NULL;
3450 splx(s);
3451 kpause("ldsyncp", 0, hz, NULL);
3452 goto again;
3453 }
3454
3455 static void
3456 mfi_sync_map_complete(struct mfi_ccb *ccb)
3457 {
3458 struct mfi_softc *sc = ccb->ccb_sc;
3459 bool aborted = !sc->sc_running;
3460
3461 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
3462 DEVNAME(ccb->ccb_sc));
3463 KASSERT(sc->sc_ldsync_ccb == ccb);
3464 mfi_mgmt_done(ccb);
3465 free(ccb->ccb_data, M_DEVBUF);
3466 if (ccb->ccb_flags & MFI_CCB_F_ERR) {
3467 aprint_error_dev(sc->sc_dev, "sync command failed\n");
3468 aborted = true;
3469 }
3470 mfi_put_ccb(ccb);
3471 sc->sc_ldsync_ccb = NULL;
3472
3473 /* set it up again so the driver can catch more events */
3474 if (!aborted) {
3475 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3476 }
3477 }
3478
3479 static int
3480 mfifopen(dev_t dev, int flag, int mode, struct lwp *l)
3481 {
3482 struct mfi_softc *sc;
3483
3484 if ((sc = device_lookup_private(&mfi_cd, minor(dev))) == NULL)
3485 return (ENXIO);
3486 return (0);
3487 }
3488
3489 static int
3490 mfifclose(dev_t dev, int flag, int mode, struct lwp *l)
3491 {
3492 return (0);
3493 }
3494
3495 static int
3496 mfifioctl(dev_t dev, u_long cmd, void *data, int flag,
3497 struct lwp *l)
3498 {
3499 struct mfi_softc *sc;
3500 struct mfi_ioc_packet *ioc = data;
3501 uint8_t *udata;
3502 struct mfi_ccb *ccb = NULL;
3503 int ctx, i, s, error;
3504 union mfi_sense_ptr sense_ptr;
3505
3506 switch(cmd) {
3507 case MFI_CMD:
3508 sc = device_lookup_private(&mfi_cd, ioc->mfi_adapter_no);
3509 break;
3510 default:
3511 return ENOTTY;
3512 }
3513 if (sc == NULL)
3514 return (ENXIO);
3515 if (sc->sc_opened)
3516 return (EBUSY);
3517
3518 switch(cmd) {
3519 case MFI_CMD:
3520 error = kauth_authorize_device_passthru(l->l_cred, dev,
3521 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
3522 if (error)
3523 return error;
3524 if (ioc->mfi_sge_count > MAX_IOCTL_SGE)
3525 return EINVAL;
3526 s = splbio();
3527 if ((ccb = mfi_get_ccb(sc)) == NULL)
3528 return ENOMEM;
3529 ccb->ccb_data = NULL;
3530 ctx = ccb->ccb_frame->mfr_header.mfh_context;
3531 memcpy(ccb->ccb_frame, ioc->mfi_frame.raw,
3532 sizeof(*ccb->ccb_frame));
3533 ccb->ccb_frame->mfr_header.mfh_context = ctx;
3534 ccb->ccb_frame->mfr_header.mfh_scsi_status = 0;
3535 ccb->ccb_frame->mfr_header.mfh_pad0 = 0;
3536 ccb->ccb_frame_size =
3537 (sizeof(union mfi_sgl) * ioc->mfi_sge_count) +
3538 ioc->mfi_sgl_off;
3539 if (ioc->mfi_sge_count > 0) {
3540 ccb->ccb_sgl = (union mfi_sgl *)
3541 &ccb->ccb_frame->mfr_bytes[ioc->mfi_sgl_off];
3542 }
3543 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_READ)
3544 ccb->ccb_direction = MFI_DATA_IN;
3545 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_WRITE)
3546 ccb->ccb_direction = MFI_DATA_OUT;
3547 ccb->ccb_len = ccb->ccb_frame->mfr_header.mfh_data_len;
3548 if (ccb->ccb_len > MAXPHYS) {
3549 error = ENOMEM;
3550 goto out;
3551 }
3552 if (ccb->ccb_len &&
3553 (ccb->ccb_direction & (MFI_DATA_IN | MFI_DATA_OUT)) != 0) {
3554 udata = malloc(ccb->ccb_len, M_DEVBUF, M_WAITOK|M_ZERO);
3555 if (udata == NULL) {
3556 error = ENOMEM;
3557 goto out;
3558 }
3559 ccb->ccb_data = udata;
3560 if (ccb->ccb_direction & MFI_DATA_OUT) {
3561 for (i = 0; i < ioc->mfi_sge_count; i++) {
3562 error = copyin(ioc->mfi_sgl[i].iov_base,
3563 udata, ioc->mfi_sgl[i].iov_len);
3564 if (error)
3565 goto out;
3566 udata = &udata[
3567 ioc->mfi_sgl[i].iov_len];
3568 }
3569 }
3570 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) {
3571 error = EIO;
3572 goto out;
3573 }
3574 }
3575 if (ccb->ccb_frame->mfr_header.mfh_cmd == MFI_CMD_PD_SCSI_IO) {
3576 ccb->ccb_frame->mfr_io.mif_sense_addr_lo =
3577 htole32(ccb->ccb_psense);
3578 ccb->ccb_frame->mfr_io.mif_sense_addr_hi = 0;
3579 }
3580 ccb->ccb_done = mfi_mgmt_done;
3581 mfi_post(sc, ccb);
3582 while (ccb->ccb_state != MFI_CCB_DONE)
3583 tsleep(ccb, PRIBIO, "mfi_fioc", 0);
3584
3585 if (ccb->ccb_direction & MFI_DATA_IN) {
3586 udata = ccb->ccb_data;
3587 for (i = 0; i < ioc->mfi_sge_count; i++) {
3588 error = copyout(udata,
3589 ioc->mfi_sgl[i].iov_base,
3590 ioc->mfi_sgl[i].iov_len);
3591 if (error)
3592 goto out;
3593 udata = &udata[
3594 ioc->mfi_sgl[i].iov_len];
3595 }
3596 }
3597 if (ioc->mfi_sense_len) {
3598 memcpy(&sense_ptr.sense_ptr_data[0],
3599 &ioc->mfi_frame.raw[ioc->mfi_sense_off],
3600 sizeof(sense_ptr.sense_ptr_data));
3601 error = copyout(ccb->ccb_sense,
3602 sense_ptr.user_space,
3603 sizeof(sense_ptr.sense_ptr_data));
3604 if (error)
3605 goto out;
3606 }
3607 memcpy(ioc->mfi_frame.raw, ccb->ccb_frame,
3608 sizeof(*ccb->ccb_frame));
3609 break;
3610 default:
3611 printf("mfifioctl unhandled cmd 0x%lx\n", cmd);
3612 return ENOTTY;
3613 }
3614
3615 out:
3616 if (ccb->ccb_data)
3617 free(ccb->ccb_data, M_DEVBUF);
3618 if (ccb)
3619 mfi_put_ccb(ccb);
3620 splx(s);
3621 return error;
3622 }
3623