mfi.c revision 1.46 1 /* $NetBSD: mfi.c,v 1.46 2012/08/26 16:22:32 bouyer Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2012 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44 /*-
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 *
49 * Copyright 1994-2009 The FreeBSD Project.
50 * All rights reserved.
51 *
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 *
70 * The views and conclusions contained in the software and documentation
71 * are those of the authors and should not be interpreted as representing
72 * official policies,either expressed or implied, of the FreeBSD Project.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.46 2012/08/26 16:22:32 bouyer Exp $");
77
78 #include "bio.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/buf.h>
83 #include <sys/ioctl.h>
84 #include <sys/device.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc.h>
88 #include <sys/cpu.h>
89
90 #include <uvm/uvm_param.h>
91
92 #include <sys/bus.h>
93
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsi_all.h>
96 #include <dev/scsipi/scsi_spc.h>
97 #include <dev/scsipi/scsipi_disk.h>
98 #include <dev/scsipi/scsi_disk.h>
99 #include <dev/scsipi/scsiconf.h>
100
101 #include <dev/ic/mfireg.h>
102 #include <dev/ic/mfivar.h>
103
104 #if NBIO > 0
105 #include <dev/biovar.h>
106 #endif /* NBIO > 0 */
107
108 #ifdef MFI_DEBUG
109 uint32_t mfi_debug = 0
110 /* | MFI_D_CMD */
111 /* | MFI_D_INTR */
112 /* | MFI_D_MISC */
113 /* | MFI_D_DMA */
114 /* | MFI_D_IOCTL */
115 /* | MFI_D_RW */
116 /* | MFI_D_MEM */
117 /* | MFI_D_CCB */
118 /* | MFI_D_SYNC */
119 ;
120 #endif
121
122 static void mfi_scsipi_request(struct scsipi_channel *,
123 scsipi_adapter_req_t, void *);
124 static void mfiminphys(struct buf *bp);
125
126 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
127 static void mfi_put_ccb(struct mfi_ccb *);
128 static int mfi_init_ccb(struct mfi_softc *);
129
130 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
131 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
132
133 static int mfi_transition_firmware(struct mfi_softc *);
134 static int mfi_initialize_firmware(struct mfi_softc *);
135 static int mfi_get_info(struct mfi_softc *);
136 static int mfi_get_bbu(struct mfi_softc *,
137 struct mfi_bbu_status *);
138 /* return codes for mfi_get_bbu */
139 #define MFI_BBU_GOOD 0
140 #define MFI_BBU_BAD 1
141 #define MFI_BBU_UNKNOWN 2
142 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
143 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
144 static int mfi_poll(struct mfi_ccb *);
145 static int mfi_create_sgl(struct mfi_ccb *, int);
146
147 /* commands */
148 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
149 static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
150 uint64_t, uint32_t);
151 static void mfi_scsi_ld_done(struct mfi_ccb *);
152 static void mfi_scsi_xs_done(struct mfi_ccb *, int, int);
153 static int mfi_mgmt_internal(struct mfi_softc *, uint32_t,
154 uint32_t, uint32_t, void *, uint8_t *, bool);
155 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
156 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
157 static void mfi_mgmt_done(struct mfi_ccb *);
158
159 #if NBIO > 0
160 static int mfi_ioctl(device_t, u_long, void *);
161 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
162 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
163 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
164 static int mfi_ioctl_alarm(struct mfi_softc *,
165 struct bioc_alarm *);
166 static int mfi_ioctl_blink(struct mfi_softc *sc,
167 struct bioc_blink *);
168 static int mfi_ioctl_setstate(struct mfi_softc *,
169 struct bioc_setstate *);
170 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
171 static int mfi_create_sensors(struct mfi_softc *);
172 static int mfi_destroy_sensors(struct mfi_softc *);
173 static void mfi_sensor_refresh(struct sysmon_envsys *,
174 envsys_data_t *);
175 #endif /* NBIO > 0 */
176 static bool mfi_shutdown(device_t, int);
177 static bool mfi_suspend(device_t, const pmf_qual_t *);
178 static bool mfi_resume(device_t, const pmf_qual_t *);
179
180 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
181 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
182 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
183 static int mfi_xscale_intr(struct mfi_softc *sc);
184 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
185
186 static const struct mfi_iop_ops mfi_iop_xscale = {
187 mfi_xscale_fw_state,
188 mfi_xscale_intr_dis,
189 mfi_xscale_intr_ena,
190 mfi_xscale_intr,
191 mfi_xscale_post,
192 mfi_scsi_ld_io,
193 };
194
195 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
196 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
197 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
198 static int mfi_ppc_intr(struct mfi_softc *sc);
199 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
200
201 static const struct mfi_iop_ops mfi_iop_ppc = {
202 mfi_ppc_fw_state,
203 mfi_ppc_intr_dis,
204 mfi_ppc_intr_ena,
205 mfi_ppc_intr,
206 mfi_ppc_post,
207 mfi_scsi_ld_io,
208 };
209
210 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
211 void mfi_gen2_intr_ena(struct mfi_softc *sc);
212 void mfi_gen2_intr_dis(struct mfi_softc *sc);
213 int mfi_gen2_intr(struct mfi_softc *sc);
214 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
215
216 static const struct mfi_iop_ops mfi_iop_gen2 = {
217 mfi_gen2_fw_state,
218 mfi_gen2_intr_dis,
219 mfi_gen2_intr_ena,
220 mfi_gen2_intr,
221 mfi_gen2_post,
222 mfi_scsi_ld_io,
223 };
224
225 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
226 void mfi_skinny_intr_dis(struct mfi_softc *);
227 void mfi_skinny_intr_ena(struct mfi_softc *);
228 int mfi_skinny_intr(struct mfi_softc *);
229 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
230
231 static const struct mfi_iop_ops mfi_iop_skinny = {
232 mfi_skinny_fw_state,
233 mfi_skinny_intr_dis,
234 mfi_skinny_intr_ena,
235 mfi_skinny_intr,
236 mfi_skinny_post,
237 mfi_scsi_ld_io,
238 };
239
240 static int mfi_tbolt_init_desc_pool(struct mfi_softc *);
241 static int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
242 static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
243 int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
244 uint64_t, uint32_t);
245 static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
246 static int mfi_tbolt_create_sgl(struct mfi_ccb *, int);
247 void mfi_tbolt_sync_map_info(struct work *, void *);
248 static void mfi_sync_map_complete(struct mfi_ccb *);
249
250 u_int32_t mfi_tbolt_fw_state(struct mfi_softc *);
251 void mfi_tbolt_intr_dis(struct mfi_softc *);
252 void mfi_tbolt_intr_ena(struct mfi_softc *);
253 int mfi_tbolt_intr(struct mfi_softc *sc);
254 void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
255
256 static const struct mfi_iop_ops mfi_iop_tbolt = {
257 mfi_tbolt_fw_state,
258 mfi_tbolt_intr_dis,
259 mfi_tbolt_intr_ena,
260 mfi_tbolt_intr,
261 mfi_tbolt_post,
262 mfi_tbolt_scsi_ld_io,
263 };
264
265 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
266 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
267 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
268 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
269 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
270
271 static struct mfi_ccb *
272 mfi_get_ccb(struct mfi_softc *sc)
273 {
274 struct mfi_ccb *ccb;
275 int s;
276
277 s = splbio();
278 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
279 if (ccb) {
280 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
281 ccb->ccb_state = MFI_CCB_READY;
282 }
283 splx(s);
284
285 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
286 if (__predict_false(ccb == NULL && sc->sc_running))
287 aprint_error_dev(sc->sc_dev, "out of ccb\n");
288
289 return ccb;
290 }
291
292 static void
293 mfi_put_ccb(struct mfi_ccb *ccb)
294 {
295 struct mfi_softc *sc = ccb->ccb_sc;
296 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
297 int s;
298
299 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
300
301 hdr->mfh_cmd_status = 0x0;
302 hdr->mfh_flags = 0x0;
303 ccb->ccb_state = MFI_CCB_FREE;
304 ccb->ccb_xs = NULL;
305 ccb->ccb_flags = 0;
306 ccb->ccb_done = NULL;
307 ccb->ccb_direction = 0;
308 ccb->ccb_frame_size = 0;
309 ccb->ccb_extra_frames = 0;
310 ccb->ccb_sgl = NULL;
311 ccb->ccb_data = NULL;
312 ccb->ccb_len = 0;
313 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
314 /* erase tb_request_desc but preserve SMID */
315 int index = ccb->ccb_tb_request_desc.header.SMID;
316 ccb->ccb_tb_request_desc.words = 0;
317 ccb->ccb_tb_request_desc.header.SMID = index;
318 }
319 s = splbio();
320 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
321 splx(s);
322 }
323
324 static int
325 mfi_destroy_ccb(struct mfi_softc *sc)
326 {
327 struct mfi_ccb *ccb;
328 uint32_t i;
329
330 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
331
332
333 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
334 /* create a dma map for transfer */
335 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
336 }
337
338 if (i < sc->sc_max_cmds)
339 return EBUSY;
340
341 free(sc->sc_ccb, M_DEVBUF);
342
343 return 0;
344 }
345
346 static int
347 mfi_init_ccb(struct mfi_softc *sc)
348 {
349 struct mfi_ccb *ccb;
350 uint32_t i;
351 int error;
352 bus_addr_t io_req_base_phys;
353 uint8_t *io_req_base;
354 int offset;
355
356 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
357
358 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
359 M_DEVBUF, M_WAITOK|M_ZERO);
360 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
361 /*
362 * The first 256 bytes (SMID 0) is not used.
363 * Don't add to the cmd list.
364 */
365 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) +
366 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
367 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) +
368 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
369 }
370
371 for (i = 0; i < sc->sc_max_cmds; i++) {
372 ccb = &sc->sc_ccb[i];
373
374 ccb->ccb_sc = sc;
375
376 /* select i'th frame */
377 ccb->ccb_frame = (union mfi_frame *)
378 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
379 ccb->ccb_pframe =
380 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
381 ccb->ccb_frame->mfr_header.mfh_context = i;
382
383 /* select i'th sense */
384 ccb->ccb_sense = (struct mfi_sense *)
385 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
386 ccb->ccb_psense =
387 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
388
389 /* create a dma map for transfer */
390 error = bus_dmamap_create(sc->sc_datadmat,
391 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
392 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
393 if (error) {
394 aprint_error_dev(sc->sc_dev,
395 "cannot create ccb dmamap (%d)\n", error);
396 goto destroy;
397 }
398 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
399 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
400 ccb->ccb_tb_io_request =
401 (struct mfi_mpi2_request_raid_scsi_io *)
402 (io_req_base + offset);
403 ccb->ccb_tb_pio_request =
404 io_req_base_phys + offset;
405 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
406 ccb->ccb_tb_sg_frame =
407 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
408 offset);
409 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
410 offset;
411 /* SMID 0 is reserved. Set SMID/index from 1 */
412 ccb->ccb_tb_request_desc.header.SMID = i + 1;
413 }
414
415 DNPRINTF(MFI_D_CCB,
416 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
417 ccb->ccb_frame->mfr_header.mfh_context, ccb,
418 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
419 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
420 (u_long)ccb->ccb_dmamap);
421
422 /* add ccb to queue */
423 mfi_put_ccb(ccb);
424 }
425
426 return 0;
427 destroy:
428 /* free dma maps and ccb memory */
429 while (i) {
430 i--;
431 ccb = &sc->sc_ccb[i];
432 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
433 }
434
435 free(sc->sc_ccb, M_DEVBUF);
436
437 return 1;
438 }
439
440 static uint32_t
441 mfi_read(struct mfi_softc *sc, bus_size_t r)
442 {
443 uint32_t rv;
444
445 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
446 BUS_SPACE_BARRIER_READ);
447 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
448
449 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
450 return rv;
451 }
452
453 static void
454 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
455 {
456 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
457
458 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
459 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
460 BUS_SPACE_BARRIER_WRITE);
461 }
462
463 static struct mfi_mem *
464 mfi_allocmem(struct mfi_softc *sc, size_t size)
465 {
466 struct mfi_mem *mm;
467 int nsegs;
468
469 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
470 (long)size);
471
472 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
473 if (mm == NULL)
474 return NULL;
475
476 mm->am_size = size;
477
478 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
479 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
480 goto amfree;
481
482 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
483 &nsegs, BUS_DMA_NOWAIT) != 0)
484 goto destroy;
485
486 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
487 BUS_DMA_NOWAIT) != 0)
488 goto free;
489
490 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
491 BUS_DMA_NOWAIT) != 0)
492 goto unmap;
493
494 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
495 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
496
497 memset(mm->am_kva, 0, size);
498 return mm;
499
500 unmap:
501 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
502 free:
503 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
504 destroy:
505 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
506 amfree:
507 free(mm, M_DEVBUF);
508
509 return NULL;
510 }
511
512 static void
513 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
514 {
515 struct mfi_mem *mm = *mmp;
516
517 if (mm == NULL)
518 return;
519
520 *mmp = NULL;
521
522 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
523
524 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
525 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
526 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
527 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
528 free(mm, M_DEVBUF);
529 }
530
531 static int
532 mfi_transition_firmware(struct mfi_softc *sc)
533 {
534 uint32_t fw_state, cur_state;
535 int max_wait, i;
536
537 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
538
539 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
540 fw_state);
541
542 while (fw_state != MFI_STATE_READY) {
543 DNPRINTF(MFI_D_MISC,
544 "%s: waiting for firmware to become ready\n",
545 DEVNAME(sc));
546 cur_state = fw_state;
547 switch (fw_state) {
548 case MFI_STATE_FAULT:
549 aprint_error_dev(sc->sc_dev, "firmware fault\n");
550 return 1;
551 case MFI_STATE_WAIT_HANDSHAKE:
552 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
553 sc->sc_ioptype == MFI_IOP_TBOLT)
554 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
555 else
556 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
557 max_wait = 2;
558 break;
559 case MFI_STATE_OPERATIONAL:
560 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
561 sc->sc_ioptype == MFI_IOP_TBOLT)
562 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
563 else
564 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
565 max_wait = 10;
566 break;
567 case MFI_STATE_UNDEFINED:
568 case MFI_STATE_BB_INIT:
569 max_wait = 2;
570 break;
571 case MFI_STATE_FW_INIT:
572 case MFI_STATE_DEVICE_SCAN:
573 case MFI_STATE_FLUSH_CACHE:
574 max_wait = 20;
575 break;
576 case MFI_STATE_BOOT_MESSAGE_PENDING:
577 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
578 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
579 max_wait = 180;
580 break;
581 }
582 /* FALLTHROUGH */
583 default:
584 aprint_error_dev(sc->sc_dev,
585 "unknown firmware state %d\n", fw_state);
586 return 1;
587 }
588 for (i = 0; i < (max_wait * 10); i++) {
589 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
590 if (fw_state == cur_state)
591 DELAY(100000);
592 else
593 break;
594 }
595 if (fw_state == cur_state) {
596 aprint_error_dev(sc->sc_dev,
597 "firmware stuck in state %#x\n", fw_state);
598 return 1;
599 }
600 }
601
602 return 0;
603 }
604
605 static int
606 mfi_initialize_firmware(struct mfi_softc *sc)
607 {
608 struct mfi_ccb *ccb;
609 struct mfi_init_frame *init;
610 struct mfi_init_qinfo *qinfo;
611
612 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
613
614 if ((ccb = mfi_get_ccb(sc)) == NULL)
615 return 1;
616
617 init = &ccb->ccb_frame->mfr_init;
618 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
619
620 memset(qinfo, 0, sizeof *qinfo);
621 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
622 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
623 offsetof(struct mfi_prod_cons, mpc_reply_q));
624 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
625 offsetof(struct mfi_prod_cons, mpc_producer));
626 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
627 offsetof(struct mfi_prod_cons, mpc_consumer));
628
629 init->mif_header.mfh_cmd = MFI_CMD_INIT;
630 init->mif_header.mfh_data_len = sizeof *qinfo;
631 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
632
633 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
634 DEVNAME(sc),
635 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
636 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
637
638 if (mfi_poll(ccb)) {
639 aprint_error_dev(sc->sc_dev,
640 "mfi_initialize_firmware failed\n");
641 return 1;
642 }
643
644 mfi_put_ccb(ccb);
645
646 return 0;
647 }
648
649 static int
650 mfi_get_info(struct mfi_softc *sc)
651 {
652 #ifdef MFI_DEBUG
653 int i;
654 #endif
655 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
656
657 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
658 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
659 return 1;
660
661 #ifdef MFI_DEBUG
662
663 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
664 printf("%s: active FW %s Version %s date %s time %s\n",
665 DEVNAME(sc),
666 sc->sc_info.mci_image_component[i].mic_name,
667 sc->sc_info.mci_image_component[i].mic_version,
668 sc->sc_info.mci_image_component[i].mic_build_date,
669 sc->sc_info.mci_image_component[i].mic_build_time);
670 }
671
672 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
673 printf("%s: pending FW %s Version %s date %s time %s\n",
674 DEVNAME(sc),
675 sc->sc_info.mci_pending_image_component[i].mic_name,
676 sc->sc_info.mci_pending_image_component[i].mic_version,
677 sc->sc_info.mci_pending_image_component[i].mic_build_date,
678 sc->sc_info.mci_pending_image_component[i].mic_build_time);
679 }
680
681 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
682 DEVNAME(sc),
683 sc->sc_info.mci_max_arms,
684 sc->sc_info.mci_max_spans,
685 sc->sc_info.mci_max_arrays,
686 sc->sc_info.mci_max_lds,
687 sc->sc_info.mci_product_name);
688
689 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
690 DEVNAME(sc),
691 sc->sc_info.mci_serial_number,
692 sc->sc_info.mci_hw_present,
693 sc->sc_info.mci_current_fw_time,
694 sc->sc_info.mci_max_cmds,
695 sc->sc_info.mci_max_sg_elements);
696
697 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
698 DEVNAME(sc),
699 sc->sc_info.mci_max_request_size,
700 sc->sc_info.mci_lds_present,
701 sc->sc_info.mci_lds_degraded,
702 sc->sc_info.mci_lds_offline,
703 sc->sc_info.mci_pd_present);
704
705 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
706 DEVNAME(sc),
707 sc->sc_info.mci_pd_disks_present,
708 sc->sc_info.mci_pd_disks_pred_failure,
709 sc->sc_info.mci_pd_disks_failed);
710
711 printf("%s: nvram %d mem %d flash %d\n",
712 DEVNAME(sc),
713 sc->sc_info.mci_nvram_size,
714 sc->sc_info.mci_memory_size,
715 sc->sc_info.mci_flash_size);
716
717 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
718 DEVNAME(sc),
719 sc->sc_info.mci_ram_correctable_errors,
720 sc->sc_info.mci_ram_uncorrectable_errors,
721 sc->sc_info.mci_cluster_allowed,
722 sc->sc_info.mci_cluster_active);
723
724 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
725 DEVNAME(sc),
726 sc->sc_info.mci_max_strips_per_io,
727 sc->sc_info.mci_raid_levels,
728 sc->sc_info.mci_adapter_ops,
729 sc->sc_info.mci_ld_ops);
730
731 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
732 DEVNAME(sc),
733 sc->sc_info.mci_stripe_sz_ops.min,
734 sc->sc_info.mci_stripe_sz_ops.max,
735 sc->sc_info.mci_pd_ops,
736 sc->sc_info.mci_pd_mix_support);
737
738 printf("%s: ecc_bucket %d pckg_prop %s\n",
739 DEVNAME(sc),
740 sc->sc_info.mci_ecc_bucket_count,
741 sc->sc_info.mci_package_version);
742
743 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
744 DEVNAME(sc),
745 sc->sc_info.mci_properties.mcp_seq_num,
746 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
747 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
748 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
749
750 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
751 DEVNAME(sc),
752 sc->sc_info.mci_properties.mcp_rebuild_rate,
753 sc->sc_info.mci_properties.mcp_patrol_read_rate,
754 sc->sc_info.mci_properties.mcp_bgi_rate,
755 sc->sc_info.mci_properties.mcp_cc_rate);
756
757 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
758 DEVNAME(sc),
759 sc->sc_info.mci_properties.mcp_recon_rate,
760 sc->sc_info.mci_properties.mcp_cache_flush_interval,
761 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
762 sc->sc_info.mci_properties.mcp_spinup_delay,
763 sc->sc_info.mci_properties.mcp_cluster_enable);
764
765 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
766 DEVNAME(sc),
767 sc->sc_info.mci_properties.mcp_coercion_mode,
768 sc->sc_info.mci_properties.mcp_alarm_enable,
769 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
770 sc->sc_info.mci_properties.mcp_disable_battery_warn,
771 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
772
773 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
774 DEVNAME(sc),
775 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
776 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
777 sc->sc_info.mci_properties.mcp_expose_encl_devices);
778
779 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
780 DEVNAME(sc),
781 sc->sc_info.mci_pci.mip_vendor,
782 sc->sc_info.mci_pci.mip_device,
783 sc->sc_info.mci_pci.mip_subvendor,
784 sc->sc_info.mci_pci.mip_subdevice);
785
786 printf("%s: type %#x port_count %d port_addr ",
787 DEVNAME(sc),
788 sc->sc_info.mci_host.mih_type,
789 sc->sc_info.mci_host.mih_port_count);
790
791 for (i = 0; i < 8; i++)
792 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
793 printf("\n");
794
795 printf("%s: type %.x port_count %d port_addr ",
796 DEVNAME(sc),
797 sc->sc_info.mci_device.mid_type,
798 sc->sc_info.mci_device.mid_port_count);
799
800 for (i = 0; i < 8; i++) {
801 printf("%.0" PRIx64 " ",
802 sc->sc_info.mci_device.mid_port_addr[i]);
803 }
804 printf("\n");
805 #endif /* MFI_DEBUG */
806
807 return 0;
808 }
809
810 static int
811 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
812 {
813 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
814
815 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
816 sizeof(*stat), stat, NULL, cold ? true : false))
817 return MFI_BBU_UNKNOWN;
818 #ifdef MFI_DEBUG
819 printf("bbu type %d, voltage %d, current %d, temperature %d, "
820 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
821 stat->temperature, stat->fw_status);
822 printf("details: ");
823 switch(stat->battery_type) {
824 case MFI_BBU_TYPE_IBBU:
825 printf("guage %d relative charge %d charger state %d "
826 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
827 stat->detail.ibbu.relative_charge ,
828 stat->detail.ibbu.charger_system_state ,
829 stat->detail.ibbu.charger_system_ctrl);
830 printf("\tcurrent %d abs charge %d max error %d\n",
831 stat->detail.ibbu.charging_current ,
832 stat->detail.ibbu.absolute_charge ,
833 stat->detail.ibbu.max_error);
834 break;
835 case MFI_BBU_TYPE_BBU:
836 printf("guage %d relative charge %d charger state %d\n",
837 stat->detail.ibbu.gas_guage_status,
838 stat->detail.bbu.relative_charge ,
839 stat->detail.bbu.charger_status );
840 printf("\trem capacity %d fyll capacity %d SOH %d\n",
841 stat->detail.bbu.remaining_capacity ,
842 stat->detail.bbu.full_charge_capacity ,
843 stat->detail.bbu.is_SOH_good);
844 default:
845 printf("\n");
846 }
847 #endif
848 switch(stat->battery_type) {
849 case MFI_BBU_TYPE_BBU:
850 return (stat->detail.bbu.is_SOH_good ?
851 MFI_BBU_GOOD : MFI_BBU_BAD);
852 case MFI_BBU_TYPE_NONE:
853 return MFI_BBU_UNKNOWN;
854 default:
855 if (stat->fw_status &
856 (MFI_BBU_STATE_PACK_MISSING |
857 MFI_BBU_STATE_VOLTAGE_LOW |
858 MFI_BBU_STATE_TEMPERATURE_HIGH |
859 MFI_BBU_STATE_LEARN_CYC_FAIL |
860 MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
861 MFI_BBU_STATE_I2C_ERR_DETECT))
862 return MFI_BBU_BAD;
863 return MFI_BBU_GOOD;
864 }
865 }
866
867 static void
868 mfiminphys(struct buf *bp)
869 {
870 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
871
872 /* XXX currently using MFI_MAXFER = MAXPHYS */
873 if (bp->b_bcount > MFI_MAXFER)
874 bp->b_bcount = MFI_MAXFER;
875 minphys(bp);
876 }
877
878 int
879 mfi_rescan(device_t self, const char *ifattr, const int *locators)
880 {
881 struct mfi_softc *sc = device_private(self);
882
883 if (sc->sc_child != NULL)
884 return 0;
885
886 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
887 scsiprint, NULL);
888
889 return 0;
890 }
891
892 void
893 mfi_childdetached(device_t self, device_t child)
894 {
895 struct mfi_softc *sc = device_private(self);
896
897 KASSERT(self == sc->sc_dev);
898 KASSERT(child == sc->sc_child);
899
900 if (child == sc->sc_child)
901 sc->sc_child = NULL;
902 }
903
904 int
905 mfi_detach(struct mfi_softc *sc, int flags)
906 {
907 int error;
908
909 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
910
911 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
912 return error;
913
914 #if NBIO > 0
915 mfi_destroy_sensors(sc);
916 bio_unregister(sc->sc_dev);
917 #endif /* NBIO > 0 */
918
919 mfi_intr_disable(sc);
920 mfi_shutdown(sc->sc_dev, 0);
921
922 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
923 workqueue_destroy(sc->sc_ldsync_wq);
924 mfi_put_ccb(sc->sc_ldsync_ccb);
925 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
926 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
927 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
928 }
929
930 if ((error = mfi_destroy_ccb(sc)) != 0)
931 return error;
932
933 mfi_freemem(sc, &sc->sc_sense);
934
935 mfi_freemem(sc, &sc->sc_frames);
936
937 mfi_freemem(sc, &sc->sc_pcq);
938
939 return 0;
940 }
941
942 static bool
943 mfi_shutdown(device_t dev, int how)
944 {
945 struct mfi_softc *sc = device_private(dev);
946 uint8_t mbox[MFI_MBOX_SIZE];
947 int s = splbio();
948 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
949 if (sc->sc_running) {
950 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
951 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
952 MFI_DATA_NONE, 0, NULL, mbox, true)) {
953 aprint_error_dev(dev, "shutdown: cache flush failed\n");
954 goto fail;
955 }
956
957 mbox[0] = 0;
958 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
959 MFI_DATA_NONE, 0, NULL, mbox, true)) {
960 aprint_error_dev(dev, "shutdown: "
961 "firmware shutdown failed\n");
962 goto fail;
963 }
964 sc->sc_running = false;
965 }
966 splx(s);
967 return true;
968 fail:
969 splx(s);
970 return false;
971 }
972
973 static bool
974 mfi_suspend(device_t dev, const pmf_qual_t *q)
975 {
976 /* XXX to be implemented */
977 return false;
978 }
979
980 static bool
981 mfi_resume(device_t dev, const pmf_qual_t *q)
982 {
983 /* XXX to be implemented */
984 return false;
985 }
986
987 int
988 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
989 {
990 struct scsipi_adapter *adapt = &sc->sc_adapt;
991 struct scsipi_channel *chan = &sc->sc_chan;
992 uint32_t status, frames, max_sgl;
993 int i;
994
995 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
996
997 sc->sc_ioptype = iop;
998
999 switch (iop) {
1000 case MFI_IOP_XSCALE:
1001 sc->sc_iop = &mfi_iop_xscale;
1002 break;
1003 case MFI_IOP_PPC:
1004 sc->sc_iop = &mfi_iop_ppc;
1005 break;
1006 case MFI_IOP_GEN2:
1007 sc->sc_iop = &mfi_iop_gen2;
1008 break;
1009 case MFI_IOP_SKINNY:
1010 sc->sc_iop = &mfi_iop_skinny;
1011 break;
1012 case MFI_IOP_TBOLT:
1013 sc->sc_iop = &mfi_iop_tbolt;
1014 break;
1015 default:
1016 panic("%s: unknown iop %d", DEVNAME(sc), iop);
1017 }
1018
1019 if (mfi_transition_firmware(sc))
1020 return 1;
1021
1022 TAILQ_INIT(&sc->sc_ccb_freeq);
1023
1024 status = mfi_fw_state(sc);
1025 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
1026 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
1027 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1028 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1029 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1030 } else if (sc->sc_64bit_dma) {
1031 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1032 sc->sc_sgl_size = sizeof(struct mfi_sg64);
1033 } else {
1034 sc->sc_max_sgl = max_sgl;
1035 sc->sc_sgl_size = sizeof(struct mfi_sg32);
1036 }
1037 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
1038 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
1039
1040 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1041 uint32_t tb_mem_size;
1042 /* for Alignment */
1043 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;
1044
1045 tb_mem_size +=
1046 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
1047 sc->sc_reply_pool_size =
1048 ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
1049 tb_mem_size +=
1050 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
1051
1052 /* this is for SGL's */
1053 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
1054 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
1055 if (sc->sc_tbolt_reqmsgpool == NULL) {
1056 aprint_error_dev(sc->sc_dev,
1057 "unable to allocate thunderbolt "
1058 "request message pool\n");
1059 goto nopcq;
1060 }
1061 if (mfi_tbolt_init_desc_pool(sc)) {
1062 aprint_error_dev(sc->sc_dev,
1063 "Thunderbolt pool preparation error\n");
1064 goto nopcq;
1065 }
1066
1067 /*
1068 * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
1069 * we are taking it diffrent from what we have allocated for
1070 * Request and reply descriptors to avoid confusion later
1071 */
1072 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
1073 sizeof(struct mpi2_ioc_init_request));
1074 if (sc->sc_tbolt_ioc_init == NULL) {
1075 aprint_error_dev(sc->sc_dev,
1076 "unable to allocate thunderbolt IOC init memory");
1077 goto nopcq;
1078 }
1079
1080 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
1081 MEGASAS_MAX_NAME*sizeof(bus_addr_t));
1082 if (sc->sc_tbolt_verbuf == NULL) {
1083 aprint_error_dev(sc->sc_dev,
1084 "unable to allocate thunderbolt version buffer\n");
1085 goto nopcq;
1086 }
1087
1088 }
1089 /* consumer/producer and reply queue memory */
1090 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
1091 sizeof(struct mfi_prod_cons));
1092 if (sc->sc_pcq == NULL) {
1093 aprint_error_dev(sc->sc_dev,
1094 "unable to allocate reply queue memory\n");
1095 goto nopcq;
1096 }
1097 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1098 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1099 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1100
1101 /* frame memory */
1102 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
1103 MFI_FRAME_SIZE + 1;
1104 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
1105 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
1106 if (sc->sc_frames == NULL) {
1107 aprint_error_dev(sc->sc_dev,
1108 "unable to allocate frame memory\n");
1109 goto noframe;
1110 }
1111 /* XXX hack, fix this */
1112 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
1113 aprint_error_dev(sc->sc_dev,
1114 "improper frame alignment (%#llx) FIXME\n",
1115 (long long int)MFIMEM_DVA(sc->sc_frames));
1116 goto noframe;
1117 }
1118
1119 /* sense memory */
1120 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
1121 if (sc->sc_sense == NULL) {
1122 aprint_error_dev(sc->sc_dev,
1123 "unable to allocate sense memory\n");
1124 goto nosense;
1125 }
1126
1127 /* now that we have all memory bits go initialize ccbs */
1128 if (mfi_init_ccb(sc)) {
1129 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
1130 goto noinit;
1131 }
1132
1133 /* kickstart firmware with all addresses and pointers */
1134 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1135 if (mfi_tbolt_init_MFI_queue(sc)) {
1136 aprint_error_dev(sc->sc_dev,
1137 "could not initialize firmware\n");
1138 goto noinit;
1139 }
1140 } else {
1141 if (mfi_initialize_firmware(sc)) {
1142 aprint_error_dev(sc->sc_dev,
1143 "could not initialize firmware\n");
1144 goto noinit;
1145 }
1146 }
1147 sc->sc_running = true;
1148
1149 if (mfi_get_info(sc)) {
1150 aprint_error_dev(sc->sc_dev,
1151 "could not retrieve controller information\n");
1152 goto noinit;
1153 }
1154 aprint_normal_dev(sc->sc_dev,
1155 "%s version %s\n",
1156 sc->sc_info.mci_product_name,
1157 sc->sc_info.mci_package_version);
1158
1159
1160 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
1161 sc->sc_info.mci_lds_present,
1162 sc->sc_info.mci_memory_size);
1163 sc->sc_bbuok = false;
1164 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
1165 struct mfi_bbu_status bbu_stat;
1166 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
1167 aprint_normal("BBU type ");
1168 switch (bbu_stat.battery_type) {
1169 case MFI_BBU_TYPE_BBU:
1170 aprint_normal("BBU");
1171 break;
1172 case MFI_BBU_TYPE_IBBU:
1173 aprint_normal("IBBU");
1174 break;
1175 default:
1176 aprint_normal("unknown type %d", bbu_stat.battery_type);
1177 }
1178 aprint_normal(", status ");
1179 switch(mfi_bbu_status) {
1180 case MFI_BBU_GOOD:
1181 aprint_normal("good\n");
1182 sc->sc_bbuok = true;
1183 break;
1184 case MFI_BBU_BAD:
1185 aprint_normal("bad\n");
1186 break;
1187 case MFI_BBU_UNKNOWN:
1188 aprint_normal("unknown\n");
1189 break;
1190 default:
1191 panic("mfi_bbu_status");
1192 }
1193 } else {
1194 aprint_normal("BBU not present\n");
1195 }
1196
1197 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
1198 sc->sc_max_ld = sc->sc_ld_cnt;
1199 for (i = 0; i < sc->sc_ld_cnt; i++)
1200 sc->sc_ld[i].ld_present = 1;
1201
1202 memset(adapt, 0, sizeof(*adapt));
1203 adapt->adapt_dev = sc->sc_dev;
1204 adapt->adapt_nchannels = 1;
1205 /* keep a few commands for management */
1206 if (sc->sc_max_cmds > 4)
1207 adapt->adapt_openings = sc->sc_max_cmds - 4;
1208 else
1209 adapt->adapt_openings = sc->sc_max_cmds;
1210 adapt->adapt_max_periph = adapt->adapt_openings;
1211 adapt->adapt_request = mfi_scsipi_request;
1212 adapt->adapt_minphys = mfiminphys;
1213
1214 memset(chan, 0, sizeof(*chan));
1215 chan->chan_adapter = adapt;
1216 chan->chan_bustype = &scsi_sas_bustype;
1217 chan->chan_channel = 0;
1218 chan->chan_flags = 0;
1219 chan->chan_nluns = 8;
1220 chan->chan_ntargets = MFI_MAX_LD;
1221 chan->chan_id = MFI_MAX_LD;
1222
1223 mfi_rescan(sc->sc_dev, "scsi", NULL);
1224
1225 /* enable interrupts */
1226 mfi_intr_enable(sc);
1227
1228 #if NBIO > 0
1229 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
1230 panic("%s: controller registration failed", DEVNAME(sc));
1231 if (mfi_create_sensors(sc) != 0)
1232 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
1233 #endif /* NBIO > 0 */
1234 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
1235 mfi_shutdown)) {
1236 aprint_error_dev(sc->sc_dev,
1237 "couldn't establish power handler\n");
1238 }
1239
1240 return 0;
1241 noinit:
1242 mfi_freemem(sc, &sc->sc_sense);
1243 nosense:
1244 mfi_freemem(sc, &sc->sc_frames);
1245 noframe:
1246 mfi_freemem(sc, &sc->sc_pcq);
1247 nopcq:
1248 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1249 if (sc->sc_tbolt_reqmsgpool)
1250 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
1251 if (sc->sc_tbolt_verbuf)
1252 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
1253 }
1254 return 1;
1255 }
1256
1257 static int
1258 mfi_poll(struct mfi_ccb *ccb)
1259 {
1260 struct mfi_softc *sc = ccb->ccb_sc;
1261 struct mfi_frame_header *hdr;
1262 int to = 0;
1263 int rv = 0;
1264
1265 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
1266
1267 hdr = &ccb->ccb_frame->mfr_header;
1268 hdr->mfh_cmd_status = 0xff;
1269 if (!sc->sc_MFA_enabled)
1270 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1271
1272 /* no callback, caller is supposed to do the cleanup */
1273 ccb->ccb_done = NULL;
1274
1275 mfi_post(sc, ccb);
1276 if (sc->sc_MFA_enabled) {
1277 /*
1278 * depending on the command type, result may be posted
1279 * to *hdr, or not. In addition it seems there's
1280 * no way to avoid posting the SMID to the reply queue.
1281 * So pool using the interrupt routine.
1282 */
1283 while (ccb->ccb_state != MFI_CCB_DONE) {
1284 delay(1000);
1285 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1286 rv = 1;
1287 break;
1288 }
1289 mfi_tbolt_intrh(sc);
1290 }
1291 } else {
1292 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1293 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1294 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1295
1296 while (hdr->mfh_cmd_status == 0xff) {
1297 delay(1000);
1298 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1299 rv = 1;
1300 break;
1301 }
1302 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1303 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1304 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1305 }
1306 }
1307 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1308 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1309 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1310
1311 if (ccb->ccb_data != NULL) {
1312 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1313 DEVNAME(sc));
1314 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1315 ccb->ccb_dmamap->dm_mapsize,
1316 (ccb->ccb_direction & MFI_DATA_IN) ?
1317 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1318
1319 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1320 }
1321
1322 if (rv != 0) {
1323 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
1324 hdr->mfh_context);
1325 ccb->ccb_flags |= MFI_CCB_F_ERR;
1326 return 1;
1327 }
1328
1329 return 0;
1330 }
1331
1332 int
1333 mfi_intr(void *arg)
1334 {
1335 struct mfi_softc *sc = arg;
1336 struct mfi_prod_cons *pcq;
1337 struct mfi_ccb *ccb;
1338 uint32_t producer, consumer, ctx;
1339 int claimed = 0;
1340
1341 if (!mfi_my_intr(sc))
1342 return 0;
1343
1344 pcq = MFIMEM_KVA(sc->sc_pcq);
1345
1346 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
1347 (u_long)sc, (u_long)pcq);
1348
1349 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1350 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1351 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1352
1353 producer = pcq->mpc_producer;
1354 consumer = pcq->mpc_consumer;
1355
1356 while (consumer != producer) {
1357 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1358 DEVNAME(sc), producer, consumer);
1359
1360 ctx = pcq->mpc_reply_q[consumer];
1361 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1362 if (ctx == MFI_INVALID_CTX)
1363 aprint_error_dev(sc->sc_dev,
1364 "invalid context, p: %d c: %d\n",
1365 producer, consumer);
1366 else {
1367 /* XXX remove from queue and call scsi_done */
1368 ccb = &sc->sc_ccb[ctx];
1369 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1370 DEVNAME(sc), ctx);
1371 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1372 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1373 sc->sc_frames_size,
1374 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1375 ccb->ccb_done(ccb);
1376
1377 claimed = 1;
1378 }
1379 consumer++;
1380 if (consumer == (sc->sc_max_cmds + 1))
1381 consumer = 0;
1382 }
1383
1384 pcq->mpc_consumer = consumer;
1385 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1386 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1388
1389 return claimed;
1390 }
1391
1392 static int
1393 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
1394 uint32_t blockcnt)
1395 {
1396 struct scsipi_periph *periph = xs->xs_periph;
1397 struct mfi_io_frame *io;
1398
1399 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
1400 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1401 periph->periph_target);
1402
1403 if (!xs->data)
1404 return 1;
1405
1406 io = &ccb->ccb_frame->mfr_io;
1407 if (xs->xs_control & XS_CTL_DATA_IN) {
1408 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1409 ccb->ccb_direction = MFI_DATA_IN;
1410 } else {
1411 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1412 ccb->ccb_direction = MFI_DATA_OUT;
1413 }
1414 io->mif_header.mfh_target_id = periph->periph_target;
1415 io->mif_header.mfh_timeout = 0;
1416 io->mif_header.mfh_flags = 0;
1417 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1418 io->mif_header.mfh_data_len= blockcnt;
1419 io->mif_lba_hi = (blockno >> 32);
1420 io->mif_lba_lo = (blockno & 0xffffffff);
1421 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1422 io->mif_sense_addr_hi = 0;
1423
1424 ccb->ccb_done = mfi_scsi_ld_done;
1425 ccb->ccb_xs = xs;
1426 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1427 ccb->ccb_sgl = &io->mif_sgl;
1428 ccb->ccb_data = xs->data;
1429 ccb->ccb_len = xs->datalen;
1430
1431 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1432 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1433 return 1;
1434
1435 return 0;
1436 }
1437
1438 static void
1439 mfi_scsi_ld_done(struct mfi_ccb *ccb)
1440 {
1441 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1442 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
1443 }
1444
1445 static void
1446 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
1447 {
1448 struct scsipi_xfer *xs = ccb->ccb_xs;
1449 struct mfi_softc *sc = ccb->ccb_sc;
1450
1451 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1452 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1453
1454 if (xs->data != NULL) {
1455 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1456 DEVNAME(sc));
1457 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1458 ccb->ccb_dmamap->dm_mapsize,
1459 (xs->xs_control & XS_CTL_DATA_IN) ?
1460 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1461
1462 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1463 }
1464
1465 if (status != MFI_STAT_OK) {
1466 xs->error = XS_DRIVER_STUFFUP;
1467 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1468 DEVNAME(sc), status);
1469
1470 if (scsi_status != 0) {
1471 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1472 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1473 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1474 DNPRINTF(MFI_D_INTR,
1475 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1476 DEVNAME(sc), scsi_status,
1477 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1478 memset(&xs->sense, 0, sizeof(xs->sense));
1479 memcpy(&xs->sense, ccb->ccb_sense,
1480 sizeof(struct scsi_sense_data));
1481 xs->error = XS_SENSE;
1482 }
1483 } else {
1484 xs->error = XS_NOERROR;
1485 xs->status = SCSI_OK;
1486 xs->resid = 0;
1487 }
1488
1489 mfi_put_ccb(ccb);
1490 scsipi_done(xs);
1491 }
1492
1493 static int
1494 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1495 {
1496 struct mfi_pass_frame *pf;
1497 struct scsipi_periph *periph = xs->xs_periph;
1498
1499 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1500 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1501 periph->periph_target);
1502
1503 pf = &ccb->ccb_frame->mfr_pass;
1504 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1505 pf->mpf_header.mfh_target_id = periph->periph_target;
1506 pf->mpf_header.mfh_lun_id = 0;
1507 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1508 pf->mpf_header.mfh_timeout = 0;
1509 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1510 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1511
1512 pf->mpf_sense_addr_hi = 0;
1513 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1514
1515 memset(pf->mpf_cdb, 0, 16);
1516 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1517
1518 ccb->ccb_done = mfi_scsi_ld_done;
1519 ccb->ccb_xs = xs;
1520 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1521 ccb->ccb_sgl = &pf->mpf_sgl;
1522
1523 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1524 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1525 MFI_DATA_IN : MFI_DATA_OUT;
1526 else
1527 ccb->ccb_direction = MFI_DATA_NONE;
1528
1529 if (xs->data) {
1530 ccb->ccb_data = xs->data;
1531 ccb->ccb_len = xs->datalen;
1532
1533 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1534 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1535 return 1;
1536 }
1537
1538 return 0;
1539 }
1540
1541 static void
1542 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1543 void *arg)
1544 {
1545 struct scsipi_periph *periph;
1546 struct scsipi_xfer *xs;
1547 struct scsipi_adapter *adapt = chan->chan_adapter;
1548 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1549 struct mfi_ccb *ccb;
1550 struct scsi_rw_6 *rw;
1551 struct scsipi_rw_10 *rwb;
1552 struct scsipi_rw_12 *rw12;
1553 struct scsipi_rw_16 *rw16;
1554 uint64_t blockno;
1555 uint32_t blockcnt;
1556 uint8_t target;
1557 uint8_t mbox[MFI_MBOX_SIZE];
1558 int s;
1559
1560 switch (req) {
1561 case ADAPTER_REQ_GROW_RESOURCES:
1562 /* Not supported. */
1563 return;
1564 case ADAPTER_REQ_SET_XFER_MODE:
1565 {
1566 struct scsipi_xfer_mode *xm = arg;
1567 xm->xm_mode = PERIPH_CAP_TQING;
1568 xm->xm_period = 0;
1569 xm->xm_offset = 0;
1570 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
1571 return;
1572 }
1573 case ADAPTER_REQ_RUN_XFER:
1574 break;
1575 }
1576
1577 xs = arg;
1578
1579 periph = xs->xs_periph;
1580 target = periph->periph_target;
1581
1582 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
1583 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
1584 periph->periph_target, periph->periph_lun);
1585
1586 s = splbio();
1587 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1588 periph->periph_lun != 0) {
1589 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1590 DEVNAME(sc), target);
1591 xs->error = XS_SELTIMEOUT;
1592 scsipi_done(xs);
1593 splx(s);
1594 return;
1595 }
1596 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
1597 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
1598 /* the cache is stable storage, don't flush */
1599 xs->error = XS_NOERROR;
1600 xs->status = SCSI_OK;
1601 xs->resid = 0;
1602 scsipi_done(xs);
1603 splx(s);
1604 return;
1605 }
1606
1607 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1608 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1609 xs->error = XS_RESOURCE_SHORTAGE;
1610 scsipi_done(xs);
1611 splx(s);
1612 return;
1613 }
1614
1615 switch (xs->cmd->opcode) {
1616 /* IO path */
1617 case READ_16:
1618 case WRITE_16:
1619 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1620 blockno = _8btol(rw16->addr);
1621 blockcnt = _4btol(rw16->length);
1622 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1623 goto stuffup;
1624 }
1625 break;
1626
1627 case READ_12:
1628 case WRITE_12:
1629 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1630 blockno = _4btol(rw12->addr);
1631 blockcnt = _4btol(rw12->length);
1632 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1633 goto stuffup;
1634 }
1635 break;
1636
1637 case READ_10:
1638 case WRITE_10:
1639 rwb = (struct scsipi_rw_10 *)xs->cmd;
1640 blockno = _4btol(rwb->addr);
1641 blockcnt = _2btol(rwb->length);
1642 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1643 goto stuffup;
1644 }
1645 break;
1646
1647 case SCSI_READ_6_COMMAND:
1648 case SCSI_WRITE_6_COMMAND:
1649 rw = (struct scsi_rw_6 *)xs->cmd;
1650 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1651 blockcnt = rw->length ? rw->length : 0x100;
1652 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1653 goto stuffup;
1654 }
1655 break;
1656
1657 case SCSI_SYNCHRONIZE_CACHE_10:
1658 case SCSI_SYNCHRONIZE_CACHE_16:
1659 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1660 if (mfi_mgmt(ccb, xs,
1661 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1662 goto stuffup;
1663 }
1664 break;
1665
1666 /* hand it of to the firmware and let it deal with it */
1667 case SCSI_TEST_UNIT_READY:
1668 /* save off sd? after autoconf */
1669 if (!cold) /* XXX bogus */
1670 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1671 sizeof(sc->sc_ld[target].ld_dev));
1672 /* FALLTHROUGH */
1673
1674 default:
1675 if (mfi_scsi_ld(ccb, xs)) {
1676 goto stuffup;
1677 }
1678 break;
1679 }
1680
1681 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1682
1683 if (xs->xs_control & XS_CTL_POLL) {
1684 if (mfi_poll(ccb)) {
1685 /* XXX check for sense in ccb->ccb_sense? */
1686 aprint_error_dev(sc->sc_dev,
1687 "mfi_scsipi_request poll failed\n");
1688 memset(&xs->sense, 0, sizeof(xs->sense));
1689 xs->sense.scsi_sense.response_code =
1690 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1691 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1692 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1693 xs->error = XS_SENSE;
1694 xs->status = SCSI_CHECK;
1695 } else {
1696 DNPRINTF(MFI_D_DMA,
1697 "%s: mfi_scsipi_request poll complete %d\n",
1698 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1699 xs->error = XS_NOERROR;
1700 xs->status = SCSI_OK;
1701 xs->resid = 0;
1702 }
1703 mfi_put_ccb(ccb);
1704 scsipi_done(xs);
1705 splx(s);
1706 return;
1707 }
1708
1709 mfi_post(sc, ccb);
1710
1711 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1712 ccb->ccb_dmamap->dm_nsegs);
1713
1714 splx(s);
1715 return;
1716
1717 stuffup:
1718 mfi_put_ccb(ccb);
1719 xs->error = XS_DRIVER_STUFFUP;
1720 scsipi_done(xs);
1721 splx(s);
1722 }
1723
1724 static int
1725 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1726 {
1727 struct mfi_softc *sc = ccb->ccb_sc;
1728 struct mfi_frame_header *hdr;
1729 bus_dma_segment_t *sgd;
1730 union mfi_sgl *sgl;
1731 int error, i;
1732
1733 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1734 (u_long)ccb->ccb_data);
1735
1736 if (!ccb->ccb_data)
1737 return 1;
1738
1739 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
1740 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
1741 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1742 if (error) {
1743 if (error == EFBIG) {
1744 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
1745 sc->sc_max_sgl);
1746 } else {
1747 aprint_error_dev(sc->sc_dev,
1748 "error %d loading dma map\n", error);
1749 }
1750 return 1;
1751 }
1752
1753 hdr = &ccb->ccb_frame->mfr_header;
1754 sgl = ccb->ccb_sgl;
1755 sgd = ccb->ccb_dmamap->dm_segs;
1756 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1757 if (sc->sc_ioptype == MFI_IOP_TBOLT &&
1758 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
1759 hdr->mfh_cmd == MFI_CMD_LD_READ ||
1760 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
1761 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
1762 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
1763 sgl->sg_ieee[i].flags = 0;
1764 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1765 PRIx32 "\n",
1766 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1767 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1768 } else if (sc->sc_64bit_dma) {
1769 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1770 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1771 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1772 PRIx32 "\n",
1773 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1774 hdr->mfh_flags |= MFI_FRAME_SGL64;
1775 } else {
1776 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1777 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1778 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1779 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1780 hdr->mfh_flags |= MFI_FRAME_SGL32;
1781 }
1782 }
1783
1784 if (ccb->ccb_direction == MFI_DATA_IN) {
1785 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1786 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1787 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1788 } else {
1789 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1790 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1791 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1792 }
1793
1794 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1795 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1796 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1797
1798 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1799 " dm_nsegs: %d extra_frames: %d\n",
1800 DEVNAME(sc),
1801 hdr->mfh_sg_count,
1802 ccb->ccb_frame_size,
1803 sc->sc_frames_size,
1804 ccb->ccb_dmamap->dm_nsegs,
1805 ccb->ccb_extra_frames);
1806
1807 return 0;
1808 }
1809
1810 static int
1811 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1812 uint32_t len, void *buf, uint8_t *mbox, bool poll)
1813 {
1814 struct mfi_ccb *ccb;
1815 int rv = 1;
1816
1817 if ((ccb = mfi_get_ccb(sc)) == NULL)
1818 return rv;
1819 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1820 if (rv)
1821 return rv;
1822
1823 if (poll) {
1824 rv = 1;
1825 if (mfi_poll(ccb))
1826 goto done;
1827 } else {
1828 mfi_post(sc, ccb);
1829
1830 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1831 DEVNAME(sc));
1832 while (ccb->ccb_state != MFI_CCB_DONE)
1833 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1834
1835 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1836 goto done;
1837 }
1838 rv = 0;
1839
1840 done:
1841 mfi_put_ccb(ccb);
1842 return rv;
1843 }
1844
1845 static int
1846 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1847 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1848 {
1849 struct mfi_dcmd_frame *dcmd;
1850
1851 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1852
1853 dcmd = &ccb->ccb_frame->mfr_dcmd;
1854 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1855 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1856 dcmd->mdf_header.mfh_timeout = 0;
1857
1858 dcmd->mdf_opcode = opc;
1859 dcmd->mdf_header.mfh_data_len = 0;
1860 ccb->ccb_direction = dir;
1861 ccb->ccb_xs = xs;
1862 ccb->ccb_done = mfi_mgmt_done;
1863
1864 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1865
1866 /* handle special opcodes */
1867 if (mbox)
1868 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1869
1870 if (dir != MFI_DATA_NONE) {
1871 dcmd->mdf_header.mfh_data_len = len;
1872 ccb->ccb_data = buf;
1873 ccb->ccb_len = len;
1874 ccb->ccb_sgl = &dcmd->mdf_sgl;
1875
1876 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1877 return 1;
1878 }
1879 return 0;
1880 }
1881
1882 static void
1883 mfi_mgmt_done(struct mfi_ccb *ccb)
1884 {
1885 struct scsipi_xfer *xs = ccb->ccb_xs;
1886 struct mfi_softc *sc = ccb->ccb_sc;
1887 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1888
1889 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1890 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1891
1892 if (ccb->ccb_data != NULL) {
1893 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1894 DEVNAME(sc));
1895 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1896 ccb->ccb_dmamap->dm_mapsize,
1897 (ccb->ccb_direction & MFI_DATA_IN) ?
1898 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1899
1900 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1901 }
1902
1903 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1904 ccb->ccb_flags |= MFI_CCB_F_ERR;
1905
1906 ccb->ccb_state = MFI_CCB_DONE;
1907 if (xs) {
1908 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1909 xs->error = XS_DRIVER_STUFFUP;
1910 } else {
1911 xs->error = XS_NOERROR;
1912 xs->status = SCSI_OK;
1913 xs->resid = 0;
1914 }
1915 mfi_put_ccb(ccb);
1916 scsipi_done(xs);
1917 } else
1918 wakeup(ccb);
1919 }
1920
1921 #if NBIO > 0
1922 int
1923 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1924 {
1925 struct mfi_softc *sc = device_private(dev);
1926 int error = 0;
1927 int s;
1928
1929 KERNEL_LOCK(1, curlwp);
1930 s = splbio();
1931
1932 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1933
1934 switch (cmd) {
1935 case BIOCINQ:
1936 DNPRINTF(MFI_D_IOCTL, "inq\n");
1937 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1938 break;
1939
1940 case BIOCVOL:
1941 DNPRINTF(MFI_D_IOCTL, "vol\n");
1942 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1943 break;
1944
1945 case BIOCDISK:
1946 DNPRINTF(MFI_D_IOCTL, "disk\n");
1947 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1948 break;
1949
1950 case BIOCALARM:
1951 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1952 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1953 break;
1954
1955 case BIOCBLINK:
1956 DNPRINTF(MFI_D_IOCTL, "blink\n");
1957 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1958 break;
1959
1960 case BIOCSETSTATE:
1961 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1962 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1963 break;
1964
1965 default:
1966 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1967 error = EINVAL;
1968 }
1969 splx(s);
1970 KERNEL_UNLOCK_ONE(curlwp);
1971
1972 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1973 return error;
1974 }
1975
1976 static int
1977 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1978 {
1979 struct mfi_conf *cfg;
1980 int rv = EINVAL;
1981
1982 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1983
1984 if (mfi_get_info(sc)) {
1985 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1986 DEVNAME(sc));
1987 return EIO;
1988 }
1989
1990 /* get figures */
1991 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1992 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1993 sizeof *cfg, cfg, NULL, false))
1994 goto freeme;
1995
1996 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1997 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1998 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1999
2000 rv = 0;
2001 freeme:
2002 free(cfg, M_DEVBUF);
2003 return rv;
2004 }
2005
2006 static int
2007 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
2008 {
2009 int i, per, rv = EINVAL;
2010 uint8_t mbox[MFI_MBOX_SIZE];
2011
2012 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
2013 DEVNAME(sc), bv->bv_volid);
2014
2015 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
2016 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
2017 goto done;
2018
2019 i = bv->bv_volid;
2020 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2021 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
2022 DEVNAME(sc), mbox[0]);
2023
2024 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
2025 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false))
2026 goto done;
2027
2028 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2029 /* go do hotspares */
2030 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2031 goto done;
2032 }
2033
2034 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
2035
2036 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2037 case MFI_LD_OFFLINE:
2038 bv->bv_status = BIOC_SVOFFLINE;
2039 break;
2040
2041 case MFI_LD_PART_DEGRADED:
2042 case MFI_LD_DEGRADED:
2043 bv->bv_status = BIOC_SVDEGRADED;
2044 break;
2045
2046 case MFI_LD_ONLINE:
2047 bv->bv_status = BIOC_SVONLINE;
2048 break;
2049
2050 default:
2051 bv->bv_status = BIOC_SVINVALID;
2052 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
2053 DEVNAME(sc),
2054 sc->sc_ld_list.mll_list[i].mll_state);
2055 }
2056
2057 /* additional status can modify MFI status */
2058 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
2059 case MFI_LD_PROG_CC:
2060 case MFI_LD_PROG_BGI:
2061 bv->bv_status = BIOC_SVSCRUB;
2062 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
2063 bv->bv_percent = (per * 100) / 0xffff;
2064 bv->bv_seconds =
2065 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
2066 break;
2067
2068 case MFI_LD_PROG_FGI:
2069 case MFI_LD_PROG_RECONSTRUCT:
2070 /* nothing yet */
2071 break;
2072 }
2073
2074 /*
2075 * The RAID levels are determined per the SNIA DDF spec, this is only
2076 * a subset that is valid for the MFI contrller.
2077 */
2078 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
2079 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
2080 MFI_DDF_SRL_SPANNED)
2081 bv->bv_level *= 10;
2082
2083 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
2084 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
2085
2086 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
2087
2088 rv = 0;
2089 done:
2090 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
2091 DEVNAME(sc), rv);
2092 return rv;
2093 }
2094
2095 static int
2096 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
2097 {
2098 struct mfi_conf *cfg;
2099 struct mfi_array *ar;
2100 struct mfi_ld_cfg *ld;
2101 struct mfi_pd_details *pd;
2102 struct scsipi_inquiry_data *inqbuf;
2103 char vend[8+16+4+1];
2104 int i, rv = EINVAL;
2105 int arr, vol, disk;
2106 uint32_t size;
2107 uint8_t mbox[MFI_MBOX_SIZE];
2108
2109 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
2110 DEVNAME(sc), bd->bd_diskid);
2111
2112 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2113
2114 /* send single element command to retrieve size for full structure */
2115 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2116 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2117 sizeof *cfg, cfg, NULL, false))
2118 goto freeme;
2119
2120 size = cfg->mfc_size;
2121 free(cfg, M_DEVBUF);
2122
2123 /* memory for read config */
2124 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2125 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2126 size, cfg, NULL, false))
2127 goto freeme;
2128
2129 ar = cfg->mfc_array;
2130
2131 /* calculate offset to ld structure */
2132 ld = (struct mfi_ld_cfg *)(
2133 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2134 cfg->mfc_array_size * cfg->mfc_no_array);
2135
2136 vol = bd->bd_volid;
2137
2138 if (vol >= cfg->mfc_no_ld) {
2139 /* do hotspares */
2140 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
2141 goto freeme;
2142 }
2143
2144 /* find corresponding array for ld */
2145 for (i = 0, arr = 0; i < vol; i++)
2146 arr += ld[i].mlc_parm.mpa_span_depth;
2147
2148 /* offset disk into pd list */
2149 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
2150
2151 /* offset array index into the next spans */
2152 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
2153
2154 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
2155 switch (ar[arr].pd[disk].mar_pd_state){
2156 case MFI_PD_UNCONFIG_GOOD:
2157 bd->bd_status = BIOC_SDUNUSED;
2158 break;
2159
2160 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
2161 bd->bd_status = BIOC_SDHOTSPARE;
2162 break;
2163
2164 case MFI_PD_OFFLINE:
2165 bd->bd_status = BIOC_SDOFFLINE;
2166 break;
2167
2168 case MFI_PD_FAILED:
2169 bd->bd_status = BIOC_SDFAILED;
2170 break;
2171
2172 case MFI_PD_REBUILD:
2173 bd->bd_status = BIOC_SDREBUILD;
2174 break;
2175
2176 case MFI_PD_ONLINE:
2177 bd->bd_status = BIOC_SDONLINE;
2178 break;
2179
2180 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
2181 default:
2182 bd->bd_status = BIOC_SDINVALID;
2183 break;
2184
2185 }
2186
2187 /* get the remaining fields */
2188 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
2189 memset(pd, 0, sizeof(*pd));
2190 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2191 sizeof *pd, pd, mbox, false))
2192 goto freeme;
2193
2194 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
2195
2196 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
2197 bd->bd_channel = pd->mpd_enc_idx;
2198
2199 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2200 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
2201 vend[sizeof vend - 1] = '\0';
2202 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
2203
2204 /* XXX find a way to retrieve serial nr from drive */
2205 /* XXX find a way to get bd_procdev */
2206
2207 rv = 0;
2208 freeme:
2209 free(pd, M_DEVBUF);
2210 free(cfg, M_DEVBUF);
2211
2212 return rv;
2213 }
2214
2215 static int
2216 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
2217 {
2218 uint32_t opc, dir = MFI_DATA_NONE;
2219 int rv = 0;
2220 int8_t ret;
2221
2222 switch(ba->ba_opcode) {
2223 case BIOC_SADISABLE:
2224 opc = MR_DCMD_SPEAKER_DISABLE;
2225 break;
2226
2227 case BIOC_SAENABLE:
2228 opc = MR_DCMD_SPEAKER_ENABLE;
2229 break;
2230
2231 case BIOC_SASILENCE:
2232 opc = MR_DCMD_SPEAKER_SILENCE;
2233 break;
2234
2235 case BIOC_GASTATUS:
2236 opc = MR_DCMD_SPEAKER_GET;
2237 dir = MFI_DATA_IN;
2238 break;
2239
2240 case BIOC_SATEST:
2241 opc = MR_DCMD_SPEAKER_TEST;
2242 break;
2243
2244 default:
2245 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
2246 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
2247 return EINVAL;
2248 }
2249
2250 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
2251 rv = EINVAL;
2252 else
2253 if (ba->ba_opcode == BIOC_GASTATUS)
2254 ba->ba_status = ret;
2255 else
2256 ba->ba_status = 0;
2257
2258 return rv;
2259 }
2260
2261 static int
2262 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
2263 {
2264 int i, found, rv = EINVAL;
2265 uint8_t mbox[MFI_MBOX_SIZE];
2266 uint32_t cmd;
2267 struct mfi_pd_list *pd;
2268
2269 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
2270 bb->bb_status);
2271
2272 /* channel 0 means not in an enclosure so can't be blinked */
2273 if (bb->bb_channel == 0)
2274 return EINVAL;
2275
2276 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2277
2278 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2279 MFI_PD_LIST_SIZE, pd, NULL, false))
2280 goto done;
2281
2282 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2283 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
2284 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
2285 found = 1;
2286 break;
2287 }
2288
2289 if (!found)
2290 goto done;
2291
2292 memset(mbox, 0, sizeof mbox);
2293
2294 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2295
2296 switch (bb->bb_status) {
2297 case BIOC_SBUNBLINK:
2298 cmd = MR_DCMD_PD_UNBLINK;
2299 break;
2300
2301 case BIOC_SBBLINK:
2302 cmd = MR_DCMD_PD_BLINK;
2303 break;
2304
2305 case BIOC_SBALARM:
2306 default:
2307 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
2308 "opcode %x\n", DEVNAME(sc), bb->bb_status);
2309 goto done;
2310 }
2311
2312
2313 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false))
2314 goto done;
2315
2316 rv = 0;
2317 done:
2318 free(pd, M_DEVBUF);
2319 return rv;
2320 }
2321
2322 static int
2323 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2324 {
2325 struct mfi_pd_list *pd;
2326 int i, found, rv = EINVAL;
2327 uint8_t mbox[MFI_MBOX_SIZE];
2328 uint32_t cmd;
2329
2330 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2331 bs->bs_status);
2332
2333 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2334
2335 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2336 MFI_PD_LIST_SIZE, pd, NULL, false))
2337 goto done;
2338
2339 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2340 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2341 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2342 found = 1;
2343 break;
2344 }
2345
2346 if (!found)
2347 goto done;
2348
2349 memset(mbox, 0, sizeof mbox);
2350
2351 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2352
2353 switch (bs->bs_status) {
2354 case BIOC_SSONLINE:
2355 mbox[2] = MFI_PD_ONLINE;
2356 cmd = MD_DCMD_PD_SET_STATE;
2357 break;
2358
2359 case BIOC_SSOFFLINE:
2360 mbox[2] = MFI_PD_OFFLINE;
2361 cmd = MD_DCMD_PD_SET_STATE;
2362 break;
2363
2364 case BIOC_SSHOTSPARE:
2365 mbox[2] = MFI_PD_HOTSPARE;
2366 cmd = MD_DCMD_PD_SET_STATE;
2367 break;
2368 /*
2369 case BIOC_SSREBUILD:
2370 cmd = MD_DCMD_PD_REBUILD;
2371 break;
2372 */
2373 default:
2374 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2375 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2376 goto done;
2377 }
2378
2379
2380 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
2381 0, NULL, mbox, false))
2382 goto done;
2383
2384 rv = 0;
2385 done:
2386 free(pd, M_DEVBUF);
2387 return rv;
2388 }
2389
2390 static int
2391 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2392 {
2393 struct mfi_conf *cfg;
2394 struct mfi_hotspare *hs;
2395 struct mfi_pd_details *pd;
2396 struct bioc_disk *sdhs;
2397 struct bioc_vol *vdhs;
2398 struct scsipi_inquiry_data *inqbuf;
2399 char vend[8+16+4+1];
2400 int i, rv = EINVAL;
2401 uint32_t size;
2402 uint8_t mbox[MFI_MBOX_SIZE];
2403
2404 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2405
2406 if (!bio_hs)
2407 return EINVAL;
2408
2409 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2410
2411 /* send single element command to retrieve size for full structure */
2412 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2413 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2414 sizeof *cfg, cfg, NULL, false))
2415 goto freeme;
2416
2417 size = cfg->mfc_size;
2418 free(cfg, M_DEVBUF);
2419
2420 /* memory for read config */
2421 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2422 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2423 size, cfg, NULL, false))
2424 goto freeme;
2425
2426 /* calculate offset to hs structure */
2427 hs = (struct mfi_hotspare *)(
2428 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2429 cfg->mfc_array_size * cfg->mfc_no_array +
2430 cfg->mfc_ld_size * cfg->mfc_no_ld);
2431
2432 if (volid < cfg->mfc_no_ld)
2433 goto freeme; /* not a hotspare */
2434
2435 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2436 goto freeme; /* not a hotspare */
2437
2438 /* offset into hotspare structure */
2439 i = volid - cfg->mfc_no_ld;
2440
2441 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2442 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2443 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2444
2445 /* get pd fields */
2446 memset(mbox, 0, sizeof mbox);
2447 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2448 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2449 sizeof *pd, pd, mbox, false)) {
2450 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2451 DEVNAME(sc));
2452 goto freeme;
2453 }
2454
2455 switch (type) {
2456 case MFI_MGMT_VD:
2457 vdhs = bio_hs;
2458 vdhs->bv_status = BIOC_SVONLINE;
2459 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2460 vdhs->bv_level = -1; /* hotspare */
2461 vdhs->bv_nodisk = 1;
2462 break;
2463
2464 case MFI_MGMT_SD:
2465 sdhs = bio_hs;
2466 sdhs->bd_status = BIOC_SDHOTSPARE;
2467 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2468 sdhs->bd_channel = pd->mpd_enc_idx;
2469 sdhs->bd_target = pd->mpd_enc_slot;
2470 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2471 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2472 vend[sizeof vend - 1] = '\0';
2473 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2474 break;
2475
2476 default:
2477 goto freeme;
2478 }
2479
2480 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2481 rv = 0;
2482 freeme:
2483 free(pd, M_DEVBUF);
2484 free(cfg, M_DEVBUF);
2485
2486 return rv;
2487 }
2488
2489 static int
2490 mfi_destroy_sensors(struct mfi_softc *sc)
2491 {
2492 if (sc->sc_sme == NULL)
2493 return 0;
2494 sysmon_envsys_unregister(sc->sc_sme);
2495 sc->sc_sme = NULL;
2496 free(sc->sc_sensor, M_DEVBUF);
2497 return 0;
2498 }
2499
2500 static int
2501 mfi_create_sensors(struct mfi_softc *sc)
2502 {
2503 int i;
2504 int nsensors = sc->sc_ld_cnt + 1;
2505 int rv;
2506
2507 sc->sc_sme = sysmon_envsys_create();
2508 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2509 M_DEVBUF, M_NOWAIT | M_ZERO);
2510 if (sc->sc_sensor == NULL) {
2511 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
2512 return ENOMEM;
2513 }
2514
2515 /* BBU */
2516 sc->sc_sensor[0].units = ENVSYS_INDICATOR;
2517 sc->sc_sensor[0].state = ENVSYS_SINVALID;
2518 sc->sc_sensor[0].value_cur = 0;
2519 /* Enable monitoring for BBU state changes, if present */
2520 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
2521 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
2522 snprintf(sc->sc_sensor[0].desc,
2523 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
2524 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
2525 goto out;
2526
2527 for (i = 1; i < nsensors; i++) {
2528 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2529 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2530 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2531 /* Enable monitoring for drive state changes */
2532 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2533 /* logical drives */
2534 snprintf(sc->sc_sensor[i].desc,
2535 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2536 DEVNAME(sc), i - 1);
2537 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2538 &sc->sc_sensor[i]))
2539 goto out;
2540 }
2541
2542 sc->sc_sme->sme_name = DEVNAME(sc);
2543 sc->sc_sme->sme_cookie = sc;
2544 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2545 rv = sysmon_envsys_register(sc->sc_sme);
2546 if (rv != 0) {
2547 aprint_error_dev(sc->sc_dev,
2548 "unable to register with sysmon (rv = %d)\n", rv);
2549 goto out;
2550 }
2551 return 0;
2552
2553 out:
2554 free(sc->sc_sensor, M_DEVBUF);
2555 sysmon_envsys_destroy(sc->sc_sme);
2556 sc->sc_sme = NULL;
2557 return EINVAL;
2558 }
2559
2560 static void
2561 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2562 {
2563 struct mfi_softc *sc = sme->sme_cookie;
2564 struct bioc_vol bv;
2565 int s;
2566 int error;
2567
2568 if (edata->sensor >= sc->sc_ld_cnt + 1)
2569 return;
2570
2571 if (edata->sensor == 0) {
2572 /* BBU */
2573 struct mfi_bbu_status bbu_stat;
2574 int bbu_status;
2575 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
2576 return;
2577
2578 KERNEL_LOCK(1, curlwp);
2579 s = splbio();
2580 bbu_status = mfi_get_bbu(sc, &bbu_stat);
2581 splx(s);
2582 KERNEL_UNLOCK_ONE(curlwp);
2583 switch(bbu_status) {
2584 case MFI_BBU_GOOD:
2585 edata->value_cur = 1;
2586 edata->state = ENVSYS_SVALID;
2587 if (!sc->sc_bbuok)
2588 aprint_normal_dev(sc->sc_dev,
2589 "BBU state changed to good\n");
2590 sc->sc_bbuok = true;
2591 break;
2592 case MFI_BBU_BAD:
2593 edata->value_cur = 0;
2594 edata->state = ENVSYS_SCRITICAL;
2595 if (sc->sc_bbuok)
2596 aprint_normal_dev(sc->sc_dev,
2597 "BBU state changed to bad\n");
2598 sc->sc_bbuok = false;
2599 break;
2600 case MFI_BBU_UNKNOWN:
2601 default:
2602 edata->value_cur = 0;
2603 edata->state = ENVSYS_SINVALID;
2604 sc->sc_bbuok = false;
2605 break;
2606 }
2607 return;
2608 }
2609
2610 memset(&bv, 0, sizeof(bv));
2611 bv.bv_volid = edata->sensor - 1;
2612 KERNEL_LOCK(1, curlwp);
2613 s = splbio();
2614 error = mfi_ioctl_vol(sc, &bv);
2615 splx(s);
2616 KERNEL_UNLOCK_ONE(curlwp);
2617 if (error)
2618 return;
2619
2620 switch(bv.bv_status) {
2621 case BIOC_SVOFFLINE:
2622 edata->value_cur = ENVSYS_DRIVE_FAIL;
2623 edata->state = ENVSYS_SCRITICAL;
2624 break;
2625
2626 case BIOC_SVDEGRADED:
2627 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2628 edata->state = ENVSYS_SCRITICAL;
2629 break;
2630
2631 case BIOC_SVSCRUB:
2632 case BIOC_SVONLINE:
2633 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2634 edata->state = ENVSYS_SVALID;
2635 break;
2636
2637 case BIOC_SVINVALID:
2638 /* FALLTRHOUGH */
2639 default:
2640 edata->value_cur = 0; /* unknown */
2641 edata->state = ENVSYS_SINVALID;
2642 }
2643 }
2644
2645 #endif /* NBIO > 0 */
2646
2647 static uint32_t
2648 mfi_xscale_fw_state(struct mfi_softc *sc)
2649 {
2650 return mfi_read(sc, MFI_OMSG0);
2651 }
2652
2653 static void
2654 mfi_xscale_intr_dis(struct mfi_softc *sc)
2655 {
2656 mfi_write(sc, MFI_OMSK, 0);
2657 }
2658
2659 static void
2660 mfi_xscale_intr_ena(struct mfi_softc *sc)
2661 {
2662 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2663 }
2664
2665 static int
2666 mfi_xscale_intr(struct mfi_softc *sc)
2667 {
2668 uint32_t status;
2669
2670 status = mfi_read(sc, MFI_OSTS);
2671 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2672 return 0;
2673
2674 /* write status back to acknowledge interrupt */
2675 mfi_write(sc, MFI_OSTS, status);
2676 return 1;
2677 }
2678
2679 static void
2680 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2681 {
2682 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2683 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2684 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2685 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2686 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2687 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2688
2689 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2690 ccb->ccb_extra_frames);
2691 ccb->ccb_state = MFI_CCB_RUNNING;
2692 }
2693
2694 static uint32_t
2695 mfi_ppc_fw_state(struct mfi_softc *sc)
2696 {
2697 return mfi_read(sc, MFI_OSP);
2698 }
2699
2700 static void
2701 mfi_ppc_intr_dis(struct mfi_softc *sc)
2702 {
2703 /* Taking a wild guess --dyoung */
2704 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2705 mfi_write(sc, MFI_ODC, 0xffffffff);
2706 }
2707
2708 static void
2709 mfi_ppc_intr_ena(struct mfi_softc *sc)
2710 {
2711 mfi_write(sc, MFI_ODC, 0xffffffff);
2712 mfi_write(sc, MFI_OMSK, ~0x80000004);
2713 }
2714
2715 static int
2716 mfi_ppc_intr(struct mfi_softc *sc)
2717 {
2718 uint32_t status;
2719
2720 status = mfi_read(sc, MFI_OSTS);
2721 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2722 return 0;
2723
2724 /* write status back to acknowledge interrupt */
2725 mfi_write(sc, MFI_ODC, status);
2726 return 1;
2727 }
2728
2729 static void
2730 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2731 {
2732 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2733 (ccb->ccb_extra_frames << 1));
2734 ccb->ccb_state = MFI_CCB_RUNNING;
2735 }
2736
2737 u_int32_t
2738 mfi_gen2_fw_state(struct mfi_softc *sc)
2739 {
2740 return (mfi_read(sc, MFI_OSP));
2741 }
2742
2743 void
2744 mfi_gen2_intr_dis(struct mfi_softc *sc)
2745 {
2746 mfi_write(sc, MFI_OMSK, 0xffffffff);
2747 mfi_write(sc, MFI_ODC, 0xffffffff);
2748 }
2749
2750 void
2751 mfi_gen2_intr_ena(struct mfi_softc *sc)
2752 {
2753 mfi_write(sc, MFI_ODC, 0xffffffff);
2754 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2755 }
2756
2757 int
2758 mfi_gen2_intr(struct mfi_softc *sc)
2759 {
2760 u_int32_t status;
2761
2762 status = mfi_read(sc, MFI_OSTS);
2763 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2764 return (0);
2765
2766 /* write status back to acknowledge interrupt */
2767 mfi_write(sc, MFI_ODC, status);
2768
2769 return (1);
2770 }
2771
2772 void
2773 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2774 {
2775 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2776 (ccb->ccb_extra_frames << 1));
2777 ccb->ccb_state = MFI_CCB_RUNNING;
2778 }
2779
2780 u_int32_t
2781 mfi_skinny_fw_state(struct mfi_softc *sc)
2782 {
2783 return (mfi_read(sc, MFI_OSP));
2784 }
2785
2786 void
2787 mfi_skinny_intr_dis(struct mfi_softc *sc)
2788 {
2789 mfi_write(sc, MFI_OMSK, 0);
2790 }
2791
2792 void
2793 mfi_skinny_intr_ena(struct mfi_softc *sc)
2794 {
2795 mfi_write(sc, MFI_OMSK, ~0x00000001);
2796 }
2797
2798 int
2799 mfi_skinny_intr(struct mfi_softc *sc)
2800 {
2801 u_int32_t status;
2802
2803 status = mfi_read(sc, MFI_OSTS);
2804 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2805 return (0);
2806
2807 /* write status back to acknowledge interrupt */
2808 mfi_write(sc, MFI_OSTS, status);
2809
2810 return (1);
2811 }
2812
2813 void
2814 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2815 {
2816 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2817 (ccb->ccb_extra_frames << 1));
2818 mfi_write(sc, MFI_IQPH, 0x00000000);
2819 ccb->ccb_state = MFI_CCB_RUNNING;
2820 }
2821
2822 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
2823
2824 void
2825 mfi_tbolt_intr_ena(struct mfi_softc *sc)
2826 {
2827 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
2828 mfi_read(sc, MFI_OMSK);
2829 }
2830
2831 void
2832 mfi_tbolt_intr_dis(struct mfi_softc *sc)
2833 {
2834 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
2835 mfi_read(sc, MFI_OMSK);
2836 }
2837
2838 int
2839 mfi_tbolt_intr(struct mfi_softc *sc)
2840 {
2841 int32_t status;
2842
2843 status = mfi_read(sc, MFI_OSTS);
2844
2845 if (ISSET(status, 0x1)) {
2846 mfi_write(sc, MFI_OSTS, status);
2847 mfi_read(sc, MFI_OSTS);
2848 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
2849 return 0;
2850 return 1;
2851 }
2852 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
2853 return 0;
2854 mfi_read(sc, MFI_OSTS);
2855 return 1;
2856 }
2857
2858 u_int32_t
2859 mfi_tbolt_fw_state(struct mfi_softc *sc)
2860 {
2861 return mfi_read(sc, MFI_OSP);
2862 }
2863
2864 void
2865 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2866 {
2867 if (sc->sc_MFA_enabled) {
2868 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
2869 mfi_tbolt_build_mpt_ccb(ccb);
2870 mfi_write(sc, MFI_IQPL,
2871 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
2872 mfi_write(sc, MFI_IQPH,
2873 ccb->ccb_tb_request_desc.words >> 32);
2874 ccb->ccb_state = MFI_CCB_RUNNING;
2875 return;
2876 }
2877 uint64_t bus_add = ccb->ccb_pframe;
2878 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
2879 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2880 mfi_write(sc, MFI_IQPL, bus_add);
2881 mfi_write(sc, MFI_IQPH, bus_add >> 32);
2882 ccb->ccb_state = MFI_CCB_RUNNING;
2883 }
2884
2885 static void
2886 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
2887 {
2888 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
2889 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
2890 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
2891
2892 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2893 io_req->SGLOffset0 =
2894 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
2895 io_req->ChainOffset =
2896 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
2897
2898 mpi25_ieee_chain =
2899 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
2900 mpi25_ieee_chain->Address = ccb->ccb_pframe;
2901
2902 /*
2903 In MFI pass thru, nextChainOffset will always be zero to
2904 indicate the end of the chain.
2905 */
2906 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
2907 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2908
2909 /* setting the length to the maximum length */
2910 mpi25_ieee_chain->Length = 1024;
2911
2912 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2913 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2914 ccb->ccb_flags |= MFI_CCB_F_TBOLT;
2915 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
2916 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2917 ccb->ccb_tb_pio_request -
2918 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2919 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
2920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2921 }
2922
2923 /*
2924 * Description:
2925 * This function will prepare message pools for the Thunderbolt controller
2926 */
2927 static int
2928 mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
2929 {
2930 uint32_t offset = 0;
2931 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2932
2933 /* Request Decriptors alignement restrictions */
2934 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2935
2936 /* Skip request message pool */
2937 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
2938
2939 /* Reply Frame Pool is initialized */
2940 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
2941 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2942
2943 offset = (uintptr_t)sc->sc_reply_frame_pool
2944 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2945 sc->sc_reply_frame_busaddr =
2946 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
2947
2948 /* initializing reply address to 0xFFFFFFFF */
2949 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
2950 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
2951
2952 /* Skip Reply Frame Pool */
2953 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2954 sc->sc_reply_pool_limit = (void *)addr;
2955
2956 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2957 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
2958
2959 /* initialize the last_reply_idx to 0 */
2960 sc->sc_last_reply_idx = 0;
2961 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
2962 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
2963 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
2964 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
2965 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
2966 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2967 return 0;
2968 }
2969
2970 /*
2971 * This routine prepare and issue INIT2 frame to the Firmware
2972 */
2973
2974 static int
2975 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
2976 {
2977 struct mpi2_ioc_init_request *mpi2IocInit;
2978 struct mfi_init_frame *mfi_init;
2979 struct mfi_ccb *ccb;
2980 bus_addr_t phyAddress;
2981 mfi_address *mfiAddressTemp;
2982 int s;
2983 char *verbuf;
2984 char wqbuf[10];
2985
2986 /* Check if initialization is already completed */
2987 if (sc->sc_MFA_enabled) {
2988 return 1;
2989 }
2990
2991 mpi2IocInit =
2992 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
2993
2994 s = splbio();
2995 if ((ccb = mfi_get_ccb(sc)) == NULL) {
2996 splx(s);
2997 return (EBUSY);
2998 }
2999
3000
3001 mfi_init = &ccb->ccb_frame->mfr_init;
3002
3003 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
3004 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
3005 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3006
3007 /* set MsgVersion and HeaderVersion host driver was built with */
3008 mpi2IocInit->MsgVersion = MPI2_VERSION;
3009 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
3010 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
3011 mpi2IocInit->ReplyDescriptorPostQueueDepth =
3012 (uint16_t)sc->sc_reply_pool_size;
3013 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
3014
3015 /* Get physical address of reply frame pool */
3016 phyAddress = sc->sc_reply_frame_busaddr;
3017 mfiAddressTemp =
3018 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
3019 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3020 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3021
3022 /* Get physical address of request message pool */
3023 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
3024 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
3025 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3026 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3027
3028 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
3029 mpi2IocInit->TimeStamp = time_uptime;
3030
3031 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
3032 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
3033 MEGASAS_VERSION);
3034 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3035 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
3036 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
3037 mfi_init->driver_ver_hi =
3038 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
3039
3040 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3041 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3042 BUS_DMASYNC_PREWRITE);
3043 /* Get the physical address of the mpi2 ioc init command */
3044 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init);
3045 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
3046 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
3047
3048 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
3049 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
3050 if (mfi_poll(ccb) != 0) {
3051 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
3052 "command at 0x%" PRIx64 "\n",
3053 (uint64_t)ccb->ccb_pframe);
3054 splx(s);
3055 return 1;
3056 }
3057 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3058 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3059 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3060 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3061 BUS_DMASYNC_POSTWRITE);
3062 mfi_put_ccb(ccb);
3063 splx(s);
3064
3065 if (mfi_init->mif_header.mfh_cmd_status == 0) {
3066 sc->sc_MFA_enabled = 1;
3067 }
3068 else {
3069 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
3070 mfi_init->mif_header.mfh_cmd_status);
3071 return 1;
3072 }
3073
3074 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
3075 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
3076 sc, PRIBIO, IPL_BIO, 0) != 0) {
3077 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
3078 return 1;
3079 }
3080 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3081 return 0;
3082 }
3083
3084 int
3085 mfi_tbolt_intrh(void *arg)
3086 {
3087 struct mfi_softc *sc = arg;
3088 struct mfi_ccb *ccb;
3089 union mfi_mpi2_reply_descriptor *desc;
3090 int smid, num_completed;
3091
3092 if (!mfi_tbolt_intr(sc))
3093 return 0;
3094
3095 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
3096 (u_long)sc, (u_long)sc->sc_last_reply_idx);
3097
3098 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
3099
3100 desc = (union mfi_mpi2_reply_descriptor *)
3101 ((uintptr_t)sc->sc_reply_frame_pool +
3102 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3103
3104 bus_dmamap_sync(sc->sc_dmat,
3105 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3106 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3107 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3108 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3109 num_completed = 0;
3110 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
3111 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
3112 smid = desc->header.SMID;
3113 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
3114 ccb = &sc->sc_ccb[smid - 1];
3115 DNPRINTF(MFI_D_INTR,
3116 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
3117 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
3118 sc->sc_last_reply_idx, desc->words, ccb);
3119 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
3120 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
3121 ccb->ccb_tb_io_request->ChainOffset != 0) {
3122 bus_dmamap_sync(sc->sc_dmat,
3123 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3124 ccb->ccb_tb_psg_frame -
3125 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3126 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD);
3127 }
3128 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
3129 bus_dmamap_sync(sc->sc_dmat,
3130 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3131 ccb->ccb_tb_pio_request -
3132 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3133 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3134 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3135 }
3136 if (ccb->ccb_done)
3137 ccb->ccb_done(ccb);
3138 else
3139 ccb->ccb_state = MFI_CCB_DONE;
3140 sc->sc_last_reply_idx++;
3141 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
3142 sc->sc_last_reply_idx = 0;
3143 }
3144 desc->words = ~0x0;
3145 /* Get the next reply descriptor */
3146 desc = (union mfi_mpi2_reply_descriptor *)
3147 ((uintptr_t)sc->sc_reply_frame_pool +
3148 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3149 num_completed++;
3150 }
3151 if (num_completed == 0)
3152 return 0;
3153
3154 bus_dmamap_sync(sc->sc_dmat,
3155 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3156 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3157 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3158 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3159 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
3160 return 1;
3161 }
3162
3163
3164 int
3165 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
3166 uint64_t blockno, uint32_t blockcnt)
3167 {
3168 struct scsipi_periph *periph = xs->xs_periph;
3169 struct mfi_mpi2_request_raid_scsi_io *io_req;
3170 int sge_count;
3171
3172 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
3173 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
3174 periph->periph_target);
3175
3176 if (!xs->data)
3177 return 1;
3178
3179 ccb->ccb_done = mfi_tbolt_scsi_ld_done;
3180 ccb->ccb_xs = xs;
3181 ccb->ccb_data = xs->data;
3182 ccb->ccb_len = xs->datalen;
3183
3184 io_req = ccb->ccb_tb_io_request;
3185
3186 /* Just the CDB length,rest of the Flags are zero */
3187 io_req->IoFlags = xs->cmdlen;
3188 memset(io_req->CDB.CDB32, 0, 32);
3189 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
3190
3191 io_req->RaidContext.TargetID = periph->periph_target;
3192 io_req->RaidContext.Status = 0;
3193 io_req->RaidContext.exStatus = 0;
3194 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
3195 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
3196 io_req->DevHandle = periph->periph_target;
3197
3198 ccb->ccb_tb_request_desc.header.RequestFlags =
3199 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3200 io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
3201
3202 if (xs->xs_control & XS_CTL_DATA_IN) {
3203 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
3204 ccb->ccb_direction = MFI_DATA_IN;
3205 } else {
3206 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
3207 ccb->ccb_direction = MFI_DATA_OUT;
3208 }
3209
3210 sge_count = mfi_tbolt_create_sgl(ccb,
3211 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
3212 );
3213 if (sge_count < 0)
3214 return 1;
3215 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
3216 io_req->RaidContext.numSGE = sge_count;
3217 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3218 io_req->SGLOffset0 =
3219 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
3220
3221 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
3222 io_req->SenseBufferLength = MFI_SENSE_SIZE;
3223
3224 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
3225 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
3226 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3227 ccb->ccb_tb_pio_request -
3228 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3229 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3230 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3231
3232 return 0;
3233 }
3234
3235
3236 static void
3237 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
3238 {
3239 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
3240 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
3241 io_req->RaidContext.exStatus);
3242 }
3243
3244 static int
3245 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
3246 {
3247 struct mfi_softc *sc = ccb->ccb_sc;
3248 bus_dma_segment_t *sgd;
3249 int error, i, sge_idx, sge_count;
3250 struct mfi_mpi2_request_raid_scsi_io *io_req;
3251 struct mpi25_ieee_sge_chain64 *sgl_ptr;
3252
3253 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
3254 (u_long)ccb->ccb_data);
3255
3256 if (!ccb->ccb_data)
3257 return -1;
3258
3259 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
3260 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
3261 ccb->ccb_data, ccb->ccb_len, NULL, flags);
3262 if (error) {
3263 if (error == EFBIG)
3264 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
3265 sc->sc_max_sgl);
3266 else
3267 aprint_error_dev(sc->sc_dev,
3268 "error %d loading dma map\n", error);
3269 return -1;
3270 }
3271
3272 io_req = ccb->ccb_tb_io_request;
3273 sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
3274 sge_count = ccb->ccb_dmamap->dm_nsegs;
3275 sgd = ccb->ccb_dmamap->dm_segs;
3276 KASSERT(sge_count <= sc->sc_max_sgl);
3277 KASSERT(sge_count <=
3278 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
3279 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
3280
3281 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
3282 /* One element to store the chain info */
3283 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
3284 DNPRINTF(MFI_D_DMA,
3285 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n",
3286 sge_idx, sge_count, ccb->ccb_tb_pio_request);
3287 } else {
3288 sge_idx = sge_count;
3289 }
3290
3291 for (i = 0; i < sge_idx; i++) {
3292 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3293 sgl_ptr->Length = htole32(sgd[i].ds_len);
3294 sgl_ptr->Flags = 0;
3295 if (sge_idx < sge_count) {
3296 DNPRINTF(MFI_D_DMA,
3297 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3298 " flags 0x%x\n", sgl_ptr, i,
3299 sgl_ptr->Address, sgl_ptr->Length,
3300 sgl_ptr->Flags);
3301 }
3302 sgl_ptr++;
3303 }
3304 io_req->ChainOffset = 0;
3305 if (sge_idx < sge_count) {
3306 struct mpi25_ieee_sge_chain64 *sg_chain;
3307 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
3308 sg_chain = sgl_ptr;
3309 /* Prepare chain element */
3310 sg_chain->NextChainOffset = 0;
3311 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3312 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
3313 sg_chain->Length = (sizeof(mpi2_sge_io_union) *
3314 (sge_count - sge_idx));
3315 sg_chain->Address = ccb->ccb_tb_psg_frame;
3316 DNPRINTF(MFI_D_DMA,
3317 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
3318 " flags 0x%x\n", sg_chain, sg_chain->Address,
3319 sg_chain->Length, sg_chain->Flags);
3320 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
3321 for (; i < sge_count; i++) {
3322 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3323 sgl_ptr->Length = htole32(sgd[i].ds_len);
3324 sgl_ptr->Flags = 0;
3325 DNPRINTF(MFI_D_DMA,
3326 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3327 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
3328 sgl_ptr->Length, sgl_ptr->Flags);
3329 sgl_ptr++;
3330 }
3331 bus_dmamap_sync(sc->sc_dmat,
3332 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3333 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3334 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD);
3335 }
3336
3337 if (ccb->ccb_direction == MFI_DATA_IN) {
3338 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3339 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3340 } else {
3341 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3342 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
3343 }
3344 return sge_count;
3345 }
3346
3347 /*
3348 * The ThunderBolt HW has an option for the driver to directly
3349 * access the underlying disks and operate on the RAID. To
3350 * do this there needs to be a capability to keep the RAID controller
3351 * and driver in sync. The FreeBSD driver does not take advantage
3352 * of this feature since it adds a lot of complexity and slows down
3353 * performance. Performance is gained by using the controller's
3354 * cache etc.
3355 *
3356 * Even though this driver doesn't access the disks directly, an
3357 * AEN like command is used to inform the RAID firmware to "sync"
3358 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
3359 * command in write mode will return when the RAID firmware has
3360 * detected a change to the RAID state. Examples of this type
3361 * of change are removing a disk. Once the command returns then
3362 * the driver needs to acknowledge this and "sync" all LD's again.
3363 * This repeats until we shutdown. Then we need to cancel this
3364 * pending command.
3365 *
3366 * If this is not done right the RAID firmware will not remove a
3367 * pulled drive and the RAID won't go degraded etc. Effectively,
3368 * stopping any RAID mangement to functions.
3369 *
3370 * Doing another LD sync, requires the use of an event since the
3371 * driver needs to do a mfi_wait_command and can't do that in an
3372 * interrupt thread.
3373 *
3374 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
3375 * That requires a bunch of structure and it is simplier to just do
3376 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
3377 */
3378
3379 void
3380 mfi_tbolt_sync_map_info(struct work *w, void *v)
3381 {
3382 struct mfi_softc *sc = v;
3383 int i;
3384 struct mfi_ccb *ccb = NULL;
3385 uint8_t mbox[MFI_MBOX_SIZE];
3386 struct mfi_ld *ld_sync = NULL;
3387 size_t ld_size;
3388 int s;
3389
3390 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
3391 again:
3392 s = splbio();
3393 if (sc->sc_ldsync_ccb != NULL) {
3394 splx(s);
3395 return;
3396 }
3397
3398 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
3399 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
3400 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
3401 goto err;
3402 }
3403
3404 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
3405
3406 ld_sync = (struct mfi_ld *) malloc(ld_size, M_DEVBUF,
3407 M_WAITOK | M_ZERO);
3408 if (ld_sync == NULL) {
3409 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
3410 goto err;
3411 }
3412 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3413 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
3414 }
3415
3416 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3417 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
3418 free(ld_sync, M_DEVBUF);
3419 goto err;
3420 }
3421 sc->sc_ldsync_ccb = ccb;
3422
3423 memset(mbox, 0, MFI_MBOX_SIZE);
3424 mbox[0] = sc->sc_ld_list.mll_no_ld;
3425 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
3426 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
3427 ld_size, ld_sync, mbox)) {
3428 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
3429 goto err;
3430 }
3431 /*
3432 * we won't sleep on this command, so we have to override
3433 * the callback set up by mfi_mgmt()
3434 */
3435 ccb->ccb_done = mfi_sync_map_complete;
3436
3437 mfi_post(sc, ccb);
3438 splx(s);
3439 return;
3440
3441 err:
3442 if (ld_sync)
3443 free(ld_sync, M_DEVBUF);
3444 if (ccb)
3445 mfi_put_ccb(ccb);
3446 sc->sc_ldsync_ccb = NULL;
3447 splx(s);
3448 kpause("ldsyncp", 0, hz, NULL);
3449 goto again;
3450 }
3451
3452 static void
3453 mfi_sync_map_complete(struct mfi_ccb *ccb)
3454 {
3455 struct mfi_softc *sc = ccb->ccb_sc;
3456 bool aborted = !sc->sc_running;
3457
3458 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
3459 DEVNAME(ccb->ccb_sc));
3460 KASSERT(sc->sc_ldsync_ccb == ccb);
3461 mfi_mgmt_done(ccb);
3462 free(ccb->ccb_data, M_DEVBUF);
3463 if (ccb->ccb_flags & MFI_CCB_F_ERR) {
3464 aprint_error_dev(sc->sc_dev, "sync command failed\n");
3465 aborted = true;
3466 }
3467 mfi_put_ccb(ccb);
3468 sc->sc_ldsync_ccb = NULL;
3469
3470 /* set it up again so the driver can catch more events */
3471 if (!aborted) {
3472 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3473 }
3474 }
3475