mfi.c revision 1.45 1 /* $NetBSD: mfi.c,v 1.45 2012/08/26 16:05:29 bouyer Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2012 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44 /*-
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 *
49 * Copyright 1994-2009 The FreeBSD Project.
50 * All rights reserved.
51 *
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 *
70 * The views and conclusions contained in the software and documentation
71 * are those of the authors and should not be interpreted as representing
72 * official policies,either expressed or implied, of the FreeBSD Project.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.45 2012/08/26 16:05:29 bouyer Exp $");
77
78 #include "bio.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/buf.h>
83 #include <sys/ioctl.h>
84 #include <sys/device.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc.h>
88 #include <sys/cpu.h>
89
90 #include <uvm/uvm_param.h>
91
92 #include <sys/bus.h>
93
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsi_all.h>
96 #include <dev/scsipi/scsi_spc.h>
97 #include <dev/scsipi/scsipi_disk.h>
98 #include <dev/scsipi/scsi_disk.h>
99 #include <dev/scsipi/scsiconf.h>
100
101 #include <dev/ic/mfireg.h>
102 #include <dev/ic/mfivar.h>
103
104 #if NBIO > 0
105 #include <dev/biovar.h>
106 #endif /* NBIO > 0 */
107
108 #ifdef MFI_DEBUG
109 uint32_t mfi_debug = 0
110 /* | MFI_D_CMD */
111 /* | MFI_D_INTR */
112 /* | MFI_D_MISC */
113 /* | MFI_D_DMA */
114 /* | MFI_D_IOCTL */
115 /* | MFI_D_RW */
116 /* | MFI_D_MEM */
117 /* | MFI_D_CCB */
118 /* | MFI_D_SYNC */
119 ;
120 #endif
121
122 static void mfi_scsipi_request(struct scsipi_channel *,
123 scsipi_adapter_req_t, void *);
124 static void mfiminphys(struct buf *bp);
125
126 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
127 static void mfi_put_ccb(struct mfi_ccb *);
128 static int mfi_init_ccb(struct mfi_softc *);
129
130 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
131 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
132
133 static int mfi_transition_firmware(struct mfi_softc *);
134 static int mfi_initialize_firmware(struct mfi_softc *);
135 static int mfi_get_info(struct mfi_softc *);
136 static int mfi_get_bbu(struct mfi_softc *,
137 struct mfi_bbu_status *);
138 /* return codes for mfi_get_bbu */
139 #define MFI_BBU_GOOD 0
140 #define MFI_BBU_BAD 1
141 #define MFI_BBU_UNKNOWN 2
142 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
143 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
144 static int mfi_poll(struct mfi_ccb *);
145 static int mfi_create_sgl(struct mfi_ccb *, int);
146
147 /* commands */
148 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
149 static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
150 uint64_t, uint32_t);
151 static void mfi_scsi_ld_done(struct mfi_ccb *);
152 static void mfi_scsi_xs_done(struct mfi_ccb *, int, int);
153 static int mfi_mgmt_internal(struct mfi_softc *, uint32_t,
154 uint32_t, uint32_t, void *, uint8_t *, bool);
155 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
156 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
157 static void mfi_mgmt_done(struct mfi_ccb *);
158
159 #if NBIO > 0
160 static int mfi_ioctl(device_t, u_long, void *);
161 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
162 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
163 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
164 static int mfi_ioctl_alarm(struct mfi_softc *,
165 struct bioc_alarm *);
166 static int mfi_ioctl_blink(struct mfi_softc *sc,
167 struct bioc_blink *);
168 static int mfi_ioctl_setstate(struct mfi_softc *,
169 struct bioc_setstate *);
170 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
171 static int mfi_create_sensors(struct mfi_softc *);
172 static int mfi_destroy_sensors(struct mfi_softc *);
173 static void mfi_sensor_refresh(struct sysmon_envsys *,
174 envsys_data_t *);
175 #endif /* NBIO > 0 */
176 static bool mfi_shutdown(device_t, int);
177 static bool mfi_suspend(device_t, const pmf_qual_t *);
178 static bool mfi_resume(device_t, const pmf_qual_t *);
179
180 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
181 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
182 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
183 static int mfi_xscale_intr(struct mfi_softc *sc);
184 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
185
186 static const struct mfi_iop_ops mfi_iop_xscale = {
187 mfi_xscale_fw_state,
188 mfi_xscale_intr_dis,
189 mfi_xscale_intr_ena,
190 mfi_xscale_intr,
191 mfi_xscale_post,
192 mfi_scsi_ld_io,
193 };
194
195 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
196 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
197 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
198 static int mfi_ppc_intr(struct mfi_softc *sc);
199 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
200
201 static const struct mfi_iop_ops mfi_iop_ppc = {
202 mfi_ppc_fw_state,
203 mfi_ppc_intr_dis,
204 mfi_ppc_intr_ena,
205 mfi_ppc_intr,
206 mfi_ppc_post,
207 mfi_scsi_ld_io,
208 };
209
210 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
211 void mfi_gen2_intr_ena(struct mfi_softc *sc);
212 void mfi_gen2_intr_dis(struct mfi_softc *sc);
213 int mfi_gen2_intr(struct mfi_softc *sc);
214 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
215
216 static const struct mfi_iop_ops mfi_iop_gen2 = {
217 mfi_gen2_fw_state,
218 mfi_gen2_intr_dis,
219 mfi_gen2_intr_ena,
220 mfi_gen2_intr,
221 mfi_gen2_post,
222 mfi_scsi_ld_io,
223 };
224
225 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
226 void mfi_skinny_intr_dis(struct mfi_softc *);
227 void mfi_skinny_intr_ena(struct mfi_softc *);
228 int mfi_skinny_intr(struct mfi_softc *);
229 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
230
231 static const struct mfi_iop_ops mfi_iop_skinny = {
232 mfi_skinny_fw_state,
233 mfi_skinny_intr_dis,
234 mfi_skinny_intr_ena,
235 mfi_skinny_intr,
236 mfi_skinny_post,
237 mfi_scsi_ld_io,
238 };
239
240 static int mfi_tbolt_init_desc_pool(struct mfi_softc *);
241 static int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
242 static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
243 int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
244 uint64_t, uint32_t);
245 static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
246 static int mfi_tbolt_create_sgl(struct mfi_ccb *, int);
247 void mfi_tbolt_sync_map_info(struct work *, void *);
248 static void mfi_sync_map_complete(struct mfi_ccb *);
249
250 u_int32_t mfi_tbolt_fw_state(struct mfi_softc *);
251 void mfi_tbolt_intr_dis(struct mfi_softc *);
252 void mfi_tbolt_intr_ena(struct mfi_softc *);
253 int mfi_tbolt_intr(struct mfi_softc *sc);
254 void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
255
256 static const struct mfi_iop_ops mfi_iop_tbolt = {
257 mfi_tbolt_fw_state,
258 mfi_tbolt_intr_dis,
259 mfi_tbolt_intr_ena,
260 mfi_tbolt_intr,
261 mfi_tbolt_post,
262 mfi_tbolt_scsi_ld_io,
263 };
264
265 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
266 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
267 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
268 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
269 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
270
271 static struct mfi_ccb *
272 mfi_get_ccb(struct mfi_softc *sc)
273 {
274 struct mfi_ccb *ccb;
275 int s;
276
277 s = splbio();
278 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
279 if (ccb) {
280 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
281 ccb->ccb_state = MFI_CCB_READY;
282 }
283 splx(s);
284
285 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
286 if (__predict_false(ccb == NULL && sc->sc_running))
287 aprint_error_dev(sc->sc_dev, "out of ccb\n");
288
289 return ccb;
290 }
291
292 static void
293 mfi_put_ccb(struct mfi_ccb *ccb)
294 {
295 struct mfi_softc *sc = ccb->ccb_sc;
296 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
297 int s;
298
299 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
300
301 hdr->mfh_cmd_status = 0x0;
302 hdr->mfh_flags = 0x0;
303 ccb->ccb_state = MFI_CCB_FREE;
304 ccb->ccb_xs = NULL;
305 ccb->ccb_flags = 0;
306 ccb->ccb_done = NULL;
307 ccb->ccb_direction = 0;
308 ccb->ccb_frame_size = 0;
309 ccb->ccb_extra_frames = 0;
310 ccb->ccb_sgl = NULL;
311 ccb->ccb_data = NULL;
312 ccb->ccb_len = 0;
313 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
314 /* erase tb_request_desc but preserve SMID */
315 int index = ccb->ccb_tb_request_desc.header.SMID;
316 ccb->ccb_tb_request_desc.words = 0;
317 ccb->ccb_tb_request_desc.header.SMID = index;
318 }
319 s = splbio();
320 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
321 splx(s);
322 }
323
324 static int
325 mfi_destroy_ccb(struct mfi_softc *sc)
326 {
327 struct mfi_ccb *ccb;
328 uint32_t i;
329
330 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
331
332
333 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
334 /* create a dma map for transfer */
335 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
336 }
337
338 if (i < sc->sc_max_cmds)
339 return EBUSY;
340
341 free(sc->sc_ccb, M_DEVBUF);
342
343 return 0;
344 }
345
346 static int
347 mfi_init_ccb(struct mfi_softc *sc)
348 {
349 struct mfi_ccb *ccb;
350 uint32_t i;
351 int error;
352 bus_addr_t io_req_base_phys;
353 uint8_t *io_req_base;
354 int offset;
355
356 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
357
358 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
359 M_DEVBUF, M_WAITOK|M_ZERO);
360 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
361 /*
362 * The first 256 bytes (SMID 0) is not used.
363 * Don't add to the cmd list.
364 */
365 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) +
366 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
367 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) +
368 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
369 }
370
371 for (i = 0; i < sc->sc_max_cmds; i++) {
372 ccb = &sc->sc_ccb[i];
373
374 ccb->ccb_sc = sc;
375
376 /* select i'th frame */
377 ccb->ccb_frame = (union mfi_frame *)
378 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
379 ccb->ccb_pframe =
380 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
381 ccb->ccb_frame->mfr_header.mfh_context = i;
382
383 /* select i'th sense */
384 ccb->ccb_sense = (struct mfi_sense *)
385 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
386 ccb->ccb_psense =
387 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
388
389 /* create a dma map for transfer */
390 error = bus_dmamap_create(sc->sc_datadmat,
391 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
392 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
393 if (error) {
394 aprint_error_dev(sc->sc_dev,
395 "cannot create ccb dmamap (%d)\n", error);
396 goto destroy;
397 }
398 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
399 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
400 ccb->ccb_tb_io_request =
401 (struct mfi_mpi2_request_raid_scsi_io *)
402 (io_req_base + offset);
403 ccb->ccb_tb_pio_request =
404 io_req_base_phys + offset;
405 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
406 ccb->ccb_tb_sg_frame =
407 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
408 offset);
409 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
410 offset;
411 /* SMID 0 is reserved. Set SMID/index from 1 */
412 ccb->ccb_tb_request_desc.header.SMID = i + 1;
413 }
414
415 DNPRINTF(MFI_D_CCB,
416 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
417 ccb->ccb_frame->mfr_header.mfh_context, ccb,
418 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
419 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
420 (u_long)ccb->ccb_dmamap);
421
422 /* add ccb to queue */
423 mfi_put_ccb(ccb);
424 }
425
426 return 0;
427 destroy:
428 /* free dma maps and ccb memory */
429 while (i) {
430 i--;
431 ccb = &sc->sc_ccb[i];
432 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
433 }
434
435 free(sc->sc_ccb, M_DEVBUF);
436
437 return 1;
438 }
439
440 static uint32_t
441 mfi_read(struct mfi_softc *sc, bus_size_t r)
442 {
443 uint32_t rv;
444
445 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
446 BUS_SPACE_BARRIER_READ);
447 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
448
449 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
450 return rv;
451 }
452
453 static void
454 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
455 {
456 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
457
458 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
459 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
460 BUS_SPACE_BARRIER_WRITE);
461 }
462
463 static struct mfi_mem *
464 mfi_allocmem(struct mfi_softc *sc, size_t size)
465 {
466 struct mfi_mem *mm;
467 int nsegs;
468
469 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
470 (long)size);
471
472 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
473 if (mm == NULL)
474 return NULL;
475
476 mm->am_size = size;
477
478 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
479 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
480 goto amfree;
481
482 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
483 &nsegs, BUS_DMA_NOWAIT) != 0)
484 goto destroy;
485
486 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
487 BUS_DMA_NOWAIT) != 0)
488 goto free;
489
490 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
491 BUS_DMA_NOWAIT) != 0)
492 goto unmap;
493
494 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
495 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
496
497 memset(mm->am_kva, 0, size);
498 return mm;
499
500 unmap:
501 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
502 free:
503 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
504 destroy:
505 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
506 amfree:
507 free(mm, M_DEVBUF);
508
509 return NULL;
510 }
511
512 static void
513 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
514 {
515 struct mfi_mem *mm = *mmp;
516
517 if (mm == NULL)
518 return;
519
520 *mmp = NULL;
521
522 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
523
524 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
525 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
526 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
527 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
528 free(mm, M_DEVBUF);
529 }
530
531 static int
532 mfi_transition_firmware(struct mfi_softc *sc)
533 {
534 uint32_t fw_state, cur_state;
535 int max_wait, i;
536
537 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
538
539 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
540 fw_state);
541
542 while (fw_state != MFI_STATE_READY) {
543 DNPRINTF(MFI_D_MISC,
544 "%s: waiting for firmware to become ready\n",
545 DEVNAME(sc));
546 cur_state = fw_state;
547 switch (fw_state) {
548 case MFI_STATE_FAULT:
549 aprint_error_dev(sc->sc_dev, "firmware fault\n");
550 return 1;
551 case MFI_STATE_WAIT_HANDSHAKE:
552 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
553 sc->sc_ioptype == MFI_IOP_TBOLT)
554 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
555 else
556 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
557 max_wait = 2;
558 break;
559 case MFI_STATE_OPERATIONAL:
560 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
561 sc->sc_ioptype == MFI_IOP_TBOLT)
562 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
563 else
564 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
565 max_wait = 10;
566 break;
567 case MFI_STATE_UNDEFINED:
568 case MFI_STATE_BB_INIT:
569 max_wait = 2;
570 break;
571 case MFI_STATE_FW_INIT:
572 case MFI_STATE_DEVICE_SCAN:
573 case MFI_STATE_FLUSH_CACHE:
574 max_wait = 20;
575 break;
576 case MFI_STATE_BOOT_MESSAGE_PENDING:
577 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
578 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
579 max_wait = 180;
580 break;
581 }
582 /* FALLTHROUGH */
583 default:
584 aprint_error_dev(sc->sc_dev,
585 "unknown firmware state %d\n", fw_state);
586 return 1;
587 }
588 for (i = 0; i < (max_wait * 10); i++) {
589 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
590 if (fw_state == cur_state)
591 DELAY(100000);
592 else
593 break;
594 }
595 if (fw_state == cur_state) {
596 aprint_error_dev(sc->sc_dev,
597 "firmware stuck in state %#x\n", fw_state);
598 return 1;
599 }
600 }
601
602 return 0;
603 }
604
605 static int
606 mfi_initialize_firmware(struct mfi_softc *sc)
607 {
608 struct mfi_ccb *ccb;
609 struct mfi_init_frame *init;
610 struct mfi_init_qinfo *qinfo;
611
612 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
613
614 if ((ccb = mfi_get_ccb(sc)) == NULL)
615 return 1;
616
617 init = &ccb->ccb_frame->mfr_init;
618 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
619
620 memset(qinfo, 0, sizeof *qinfo);
621 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
622 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
623 offsetof(struct mfi_prod_cons, mpc_reply_q));
624 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
625 offsetof(struct mfi_prod_cons, mpc_producer));
626 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
627 offsetof(struct mfi_prod_cons, mpc_consumer));
628
629 init->mif_header.mfh_cmd = MFI_CMD_INIT;
630 init->mif_header.mfh_data_len = sizeof *qinfo;
631 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
632
633 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
634 DEVNAME(sc),
635 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
636 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
637
638 if (mfi_poll(ccb)) {
639 aprint_error_dev(sc->sc_dev,
640 "mfi_initialize_firmware failed\n");
641 return 1;
642 }
643
644 mfi_put_ccb(ccb);
645
646 return 0;
647 }
648
649 static int
650 mfi_get_info(struct mfi_softc *sc)
651 {
652 #ifdef MFI_DEBUG
653 int i;
654 #endif
655 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
656
657 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
658 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
659 return 1;
660
661 #ifdef MFI_DEBUG
662
663 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
664 printf("%s: active FW %s Version %s date %s time %s\n",
665 DEVNAME(sc),
666 sc->sc_info.mci_image_component[i].mic_name,
667 sc->sc_info.mci_image_component[i].mic_version,
668 sc->sc_info.mci_image_component[i].mic_build_date,
669 sc->sc_info.mci_image_component[i].mic_build_time);
670 }
671
672 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
673 printf("%s: pending FW %s Version %s date %s time %s\n",
674 DEVNAME(sc),
675 sc->sc_info.mci_pending_image_component[i].mic_name,
676 sc->sc_info.mci_pending_image_component[i].mic_version,
677 sc->sc_info.mci_pending_image_component[i].mic_build_date,
678 sc->sc_info.mci_pending_image_component[i].mic_build_time);
679 }
680
681 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
682 DEVNAME(sc),
683 sc->sc_info.mci_max_arms,
684 sc->sc_info.mci_max_spans,
685 sc->sc_info.mci_max_arrays,
686 sc->sc_info.mci_max_lds,
687 sc->sc_info.mci_product_name);
688
689 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
690 DEVNAME(sc),
691 sc->sc_info.mci_serial_number,
692 sc->sc_info.mci_hw_present,
693 sc->sc_info.mci_current_fw_time,
694 sc->sc_info.mci_max_cmds,
695 sc->sc_info.mci_max_sg_elements);
696
697 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
698 DEVNAME(sc),
699 sc->sc_info.mci_max_request_size,
700 sc->sc_info.mci_lds_present,
701 sc->sc_info.mci_lds_degraded,
702 sc->sc_info.mci_lds_offline,
703 sc->sc_info.mci_pd_present);
704
705 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
706 DEVNAME(sc),
707 sc->sc_info.mci_pd_disks_present,
708 sc->sc_info.mci_pd_disks_pred_failure,
709 sc->sc_info.mci_pd_disks_failed);
710
711 printf("%s: nvram %d mem %d flash %d\n",
712 DEVNAME(sc),
713 sc->sc_info.mci_nvram_size,
714 sc->sc_info.mci_memory_size,
715 sc->sc_info.mci_flash_size);
716
717 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
718 DEVNAME(sc),
719 sc->sc_info.mci_ram_correctable_errors,
720 sc->sc_info.mci_ram_uncorrectable_errors,
721 sc->sc_info.mci_cluster_allowed,
722 sc->sc_info.mci_cluster_active);
723
724 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
725 DEVNAME(sc),
726 sc->sc_info.mci_max_strips_per_io,
727 sc->sc_info.mci_raid_levels,
728 sc->sc_info.mci_adapter_ops,
729 sc->sc_info.mci_ld_ops);
730
731 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
732 DEVNAME(sc),
733 sc->sc_info.mci_stripe_sz_ops.min,
734 sc->sc_info.mci_stripe_sz_ops.max,
735 sc->sc_info.mci_pd_ops,
736 sc->sc_info.mci_pd_mix_support);
737
738 printf("%s: ecc_bucket %d pckg_prop %s\n",
739 DEVNAME(sc),
740 sc->sc_info.mci_ecc_bucket_count,
741 sc->sc_info.mci_package_version);
742
743 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
744 DEVNAME(sc),
745 sc->sc_info.mci_properties.mcp_seq_num,
746 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
747 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
748 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
749
750 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
751 DEVNAME(sc),
752 sc->sc_info.mci_properties.mcp_rebuild_rate,
753 sc->sc_info.mci_properties.mcp_patrol_read_rate,
754 sc->sc_info.mci_properties.mcp_bgi_rate,
755 sc->sc_info.mci_properties.mcp_cc_rate);
756
757 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
758 DEVNAME(sc),
759 sc->sc_info.mci_properties.mcp_recon_rate,
760 sc->sc_info.mci_properties.mcp_cache_flush_interval,
761 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
762 sc->sc_info.mci_properties.mcp_spinup_delay,
763 sc->sc_info.mci_properties.mcp_cluster_enable);
764
765 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
766 DEVNAME(sc),
767 sc->sc_info.mci_properties.mcp_coercion_mode,
768 sc->sc_info.mci_properties.mcp_alarm_enable,
769 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
770 sc->sc_info.mci_properties.mcp_disable_battery_warn,
771 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
772
773 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
774 DEVNAME(sc),
775 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
776 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
777 sc->sc_info.mci_properties.mcp_expose_encl_devices);
778
779 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
780 DEVNAME(sc),
781 sc->sc_info.mci_pci.mip_vendor,
782 sc->sc_info.mci_pci.mip_device,
783 sc->sc_info.mci_pci.mip_subvendor,
784 sc->sc_info.mci_pci.mip_subdevice);
785
786 printf("%s: type %#x port_count %d port_addr ",
787 DEVNAME(sc),
788 sc->sc_info.mci_host.mih_type,
789 sc->sc_info.mci_host.mih_port_count);
790
791 for (i = 0; i < 8; i++)
792 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
793 printf("\n");
794
795 printf("%s: type %.x port_count %d port_addr ",
796 DEVNAME(sc),
797 sc->sc_info.mci_device.mid_type,
798 sc->sc_info.mci_device.mid_port_count);
799
800 for (i = 0; i < 8; i++)
801 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
802 printf("\n");
803 #endif /* MFI_DEBUG */
804
805 return 0;
806 }
807
808 static int
809 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
810 {
811 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
812
813 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
814 sizeof(*stat), stat, NULL, cold ? true : false))
815 return MFI_BBU_UNKNOWN;
816 #ifdef MFI_DEBUG
817 printf("bbu type %d, voltage %d, current %d, temperature %d, "
818 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
819 stat->temperature, stat->fw_status);
820 printf("details: ");
821 switch(stat->battery_type) {
822 case MFI_BBU_TYPE_IBBU:
823 printf("guage %d relative charge %d charger state %d "
824 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
825 stat->detail.ibbu.relative_charge ,
826 stat->detail.ibbu.charger_system_state ,
827 stat->detail.ibbu.charger_system_ctrl);
828 printf("\tcurrent %d abs charge %d max error %d\n",
829 stat->detail.ibbu.charging_current ,
830 stat->detail.ibbu.absolute_charge ,
831 stat->detail.ibbu.max_error);
832 break;
833 case MFI_BBU_TYPE_BBU:
834 printf("guage %d relative charge %d charger state %d\n",
835 stat->detail.ibbu.gas_guage_status,
836 stat->detail.bbu.relative_charge ,
837 stat->detail.bbu.charger_status );
838 printf("\trem capacity %d fyll capacity %d SOH %d\n",
839 stat->detail.bbu.remaining_capacity ,
840 stat->detail.bbu.full_charge_capacity ,
841 stat->detail.bbu.is_SOH_good);
842 default:
843 printf("\n");
844 }
845 #endif
846 switch(stat->battery_type) {
847 case MFI_BBU_TYPE_BBU:
848 return (stat->detail.bbu.is_SOH_good ?
849 MFI_BBU_GOOD : MFI_BBU_BAD);
850 case MFI_BBU_TYPE_NONE:
851 return MFI_BBU_UNKNOWN;
852 default:
853 if (stat->fw_status &
854 (MFI_BBU_STATE_PACK_MISSING |
855 MFI_BBU_STATE_VOLTAGE_LOW |
856 MFI_BBU_STATE_TEMPERATURE_HIGH |
857 MFI_BBU_STATE_LEARN_CYC_FAIL |
858 MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
859 MFI_BBU_STATE_I2C_ERR_DETECT))
860 return MFI_BBU_BAD;
861 return MFI_BBU_GOOD;
862 }
863 }
864
865 static void
866 mfiminphys(struct buf *bp)
867 {
868 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
869
870 /* XXX currently using MFI_MAXFER = MAXPHYS */
871 if (bp->b_bcount > MFI_MAXFER)
872 bp->b_bcount = MFI_MAXFER;
873 minphys(bp);
874 }
875
876 int
877 mfi_rescan(device_t self, const char *ifattr, const int *locators)
878 {
879 struct mfi_softc *sc = device_private(self);
880
881 if (sc->sc_child != NULL)
882 return 0;
883
884 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
885 scsiprint, NULL);
886
887 return 0;
888 }
889
890 void
891 mfi_childdetached(device_t self, device_t child)
892 {
893 struct mfi_softc *sc = device_private(self);
894
895 KASSERT(self == sc->sc_dev);
896 KASSERT(child == sc->sc_child);
897
898 if (child == sc->sc_child)
899 sc->sc_child = NULL;
900 }
901
902 int
903 mfi_detach(struct mfi_softc *sc, int flags)
904 {
905 int error;
906
907 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
908
909 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
910 return error;
911
912 #if NBIO > 0
913 mfi_destroy_sensors(sc);
914 bio_unregister(sc->sc_dev);
915 #endif /* NBIO > 0 */
916
917 mfi_intr_disable(sc);
918 mfi_shutdown(sc->sc_dev, 0);
919
920 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
921 workqueue_destroy(sc->sc_ldsync_wq);
922 mfi_put_ccb(sc->sc_ldsync_ccb);
923 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
924 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
925 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
926 }
927
928 if ((error = mfi_destroy_ccb(sc)) != 0)
929 return error;
930
931 mfi_freemem(sc, &sc->sc_sense);
932
933 mfi_freemem(sc, &sc->sc_frames);
934
935 mfi_freemem(sc, &sc->sc_pcq);
936
937 return 0;
938 }
939
940 static bool
941 mfi_shutdown(device_t dev, int how)
942 {
943 struct mfi_softc *sc = device_private(dev);
944 uint8_t mbox[MFI_MBOX_SIZE];
945 int s = splbio();
946 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
947 if (sc->sc_running) {
948 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
949 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
950 MFI_DATA_NONE, 0, NULL, mbox, true)) {
951 aprint_error_dev(dev, "shutdown: cache flush failed\n");
952 goto fail;
953 }
954
955 mbox[0] = 0;
956 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
957 MFI_DATA_NONE, 0, NULL, mbox, true)) {
958 aprint_error_dev(dev, "shutdown: "
959 "firmware shutdown failed\n");
960 goto fail;
961 }
962 sc->sc_running = false;
963 }
964 splx(s);
965 return true;
966 fail:
967 splx(s);
968 return false;
969 }
970
971 static bool
972 mfi_suspend(device_t dev, const pmf_qual_t *q)
973 {
974 /* XXX to be implemented */
975 return false;
976 }
977
978 static bool
979 mfi_resume(device_t dev, const pmf_qual_t *q)
980 {
981 /* XXX to be implemented */
982 return false;
983 }
984
985 int
986 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
987 {
988 struct scsipi_adapter *adapt = &sc->sc_adapt;
989 struct scsipi_channel *chan = &sc->sc_chan;
990 uint32_t status, frames, max_sgl;
991 int i;
992
993 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
994
995 sc->sc_ioptype = iop;
996
997 switch (iop) {
998 case MFI_IOP_XSCALE:
999 sc->sc_iop = &mfi_iop_xscale;
1000 break;
1001 case MFI_IOP_PPC:
1002 sc->sc_iop = &mfi_iop_ppc;
1003 break;
1004 case MFI_IOP_GEN2:
1005 sc->sc_iop = &mfi_iop_gen2;
1006 break;
1007 case MFI_IOP_SKINNY:
1008 sc->sc_iop = &mfi_iop_skinny;
1009 break;
1010 case MFI_IOP_TBOLT:
1011 sc->sc_iop = &mfi_iop_tbolt;
1012 break;
1013 default:
1014 panic("%s: unknown iop %d", DEVNAME(sc), iop);
1015 }
1016
1017 if (mfi_transition_firmware(sc))
1018 return 1;
1019
1020 TAILQ_INIT(&sc->sc_ccb_freeq);
1021
1022 status = mfi_fw_state(sc);
1023 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
1024 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
1025 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1026 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1027 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1028 } else if (sc->sc_64bit_dma) {
1029 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1030 sc->sc_sgl_size = sizeof(struct mfi_sg64);
1031 } else {
1032 sc->sc_max_sgl = max_sgl;
1033 sc->sc_sgl_size = sizeof(struct mfi_sg32);
1034 }
1035 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
1036 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
1037
1038 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1039 uint32_t tb_mem_size;
1040 /* for Alignment */
1041 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;
1042
1043 tb_mem_size +=
1044 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
1045 sc->sc_reply_pool_size =
1046 ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
1047 tb_mem_size +=
1048 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
1049
1050 /* this is for SGL's */
1051 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
1052 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
1053 if (sc->sc_tbolt_reqmsgpool == NULL) {
1054 aprint_error_dev(sc->sc_dev,
1055 "unable to allocate thunderbolt "
1056 "request message pool\n");
1057 goto nopcq;
1058 }
1059 if (mfi_tbolt_init_desc_pool(sc)) {
1060 aprint_error_dev(sc->sc_dev,
1061 "Thunderbolt pool preparation error\n");
1062 goto nopcq;
1063 }
1064
1065 /*
1066 * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
1067 * we are taking it diffrent from what we have allocated for
1068 * Request and reply descriptors to avoid confusion later
1069 */
1070 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
1071 sizeof(struct mpi2_ioc_init_request));
1072 if (sc->sc_tbolt_ioc_init == NULL) {
1073 aprint_error_dev(sc->sc_dev,
1074 "unable to allocate thunderbolt IOC init memory");
1075 goto nopcq;
1076 }
1077
1078 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
1079 MEGASAS_MAX_NAME*sizeof(bus_addr_t));
1080 if (sc->sc_tbolt_verbuf == NULL) {
1081 aprint_error_dev(sc->sc_dev,
1082 "unable to allocate thunderbolt version buffer\n");
1083 goto nopcq;
1084 }
1085
1086 }
1087 /* consumer/producer and reply queue memory */
1088 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
1089 sizeof(struct mfi_prod_cons));
1090 if (sc->sc_pcq == NULL) {
1091 aprint_error_dev(sc->sc_dev,
1092 "unable to allocate reply queue memory\n");
1093 goto nopcq;
1094 }
1095 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1096 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1097 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1098
1099 /* frame memory */
1100 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
1101 MFI_FRAME_SIZE + 1;
1102 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
1103 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
1104 if (sc->sc_frames == NULL) {
1105 aprint_error_dev(sc->sc_dev,
1106 "unable to allocate frame memory\n");
1107 goto noframe;
1108 }
1109 /* XXX hack, fix this */
1110 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
1111 aprint_error_dev(sc->sc_dev,
1112 "improper frame alignment (%#llx) FIXME\n",
1113 (long long int)MFIMEM_DVA(sc->sc_frames));
1114 goto noframe;
1115 }
1116
1117 /* sense memory */
1118 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
1119 if (sc->sc_sense == NULL) {
1120 aprint_error_dev(sc->sc_dev,
1121 "unable to allocate sense memory\n");
1122 goto nosense;
1123 }
1124
1125 /* now that we have all memory bits go initialize ccbs */
1126 if (mfi_init_ccb(sc)) {
1127 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
1128 goto noinit;
1129 }
1130
1131 /* kickstart firmware with all addresses and pointers */
1132 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1133 if (mfi_tbolt_init_MFI_queue(sc)) {
1134 aprint_error_dev(sc->sc_dev,
1135 "could not initialize firmware\n");
1136 goto noinit;
1137 }
1138 } else {
1139 if (mfi_initialize_firmware(sc)) {
1140 aprint_error_dev(sc->sc_dev,
1141 "could not initialize firmware\n");
1142 goto noinit;
1143 }
1144 }
1145 sc->sc_running = true;
1146
1147 if (mfi_get_info(sc)) {
1148 aprint_error_dev(sc->sc_dev,
1149 "could not retrieve controller information\n");
1150 goto noinit;
1151 }
1152 aprint_normal_dev(sc->sc_dev,
1153 "%s version %s\n",
1154 sc->sc_info.mci_product_name,
1155 sc->sc_info.mci_package_version);
1156
1157
1158 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
1159 sc->sc_info.mci_lds_present,
1160 sc->sc_info.mci_memory_size);
1161 sc->sc_bbuok = false;
1162 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
1163 struct mfi_bbu_status bbu_stat;
1164 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
1165 aprint_normal("BBU type ");
1166 switch (bbu_stat.battery_type) {
1167 case MFI_BBU_TYPE_BBU:
1168 aprint_normal("BBU");
1169 break;
1170 case MFI_BBU_TYPE_IBBU:
1171 aprint_normal("IBBU");
1172 break;
1173 default:
1174 aprint_normal("unknown type %d", bbu_stat.battery_type);
1175 }
1176 aprint_normal(", status ");
1177 switch(mfi_bbu_status) {
1178 case MFI_BBU_GOOD:
1179 aprint_normal("good\n");
1180 sc->sc_bbuok = true;
1181 break;
1182 case MFI_BBU_BAD:
1183 aprint_normal("bad\n");
1184 break;
1185 case MFI_BBU_UNKNOWN:
1186 aprint_normal("unknown\n");
1187 break;
1188 default:
1189 panic("mfi_bbu_status");
1190 }
1191 } else {
1192 aprint_normal("BBU not present\n");
1193 }
1194
1195 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
1196 sc->sc_max_ld = sc->sc_ld_cnt;
1197 for (i = 0; i < sc->sc_ld_cnt; i++)
1198 sc->sc_ld[i].ld_present = 1;
1199
1200 memset(adapt, 0, sizeof(*adapt));
1201 adapt->adapt_dev = sc->sc_dev;
1202 adapt->adapt_nchannels = 1;
1203 /* keep a few commands for management */
1204 if (sc->sc_max_cmds > 4)
1205 adapt->adapt_openings = sc->sc_max_cmds - 4;
1206 else
1207 adapt->adapt_openings = sc->sc_max_cmds;
1208 adapt->adapt_max_periph = adapt->adapt_openings;
1209 adapt->adapt_request = mfi_scsipi_request;
1210 adapt->adapt_minphys = mfiminphys;
1211
1212 memset(chan, 0, sizeof(*chan));
1213 chan->chan_adapter = adapt;
1214 chan->chan_bustype = &scsi_sas_bustype;
1215 chan->chan_channel = 0;
1216 chan->chan_flags = 0;
1217 chan->chan_nluns = 8;
1218 chan->chan_ntargets = MFI_MAX_LD;
1219 chan->chan_id = MFI_MAX_LD;
1220
1221 mfi_rescan(sc->sc_dev, "scsi", NULL);
1222
1223 /* enable interrupts */
1224 mfi_intr_enable(sc);
1225
1226 #if NBIO > 0
1227 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
1228 panic("%s: controller registration failed", DEVNAME(sc));
1229 if (mfi_create_sensors(sc) != 0)
1230 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
1231 #endif /* NBIO > 0 */
1232 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
1233 mfi_shutdown)) {
1234 aprint_error_dev(sc->sc_dev,
1235 "couldn't establish power handler\n");
1236 }
1237
1238 return 0;
1239 noinit:
1240 mfi_freemem(sc, &sc->sc_sense);
1241 nosense:
1242 mfi_freemem(sc, &sc->sc_frames);
1243 noframe:
1244 mfi_freemem(sc, &sc->sc_pcq);
1245 nopcq:
1246 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1247 if (sc->sc_tbolt_reqmsgpool)
1248 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
1249 if (sc->sc_tbolt_verbuf)
1250 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
1251 }
1252 return 1;
1253 }
1254
1255 static int
1256 mfi_poll(struct mfi_ccb *ccb)
1257 {
1258 struct mfi_softc *sc = ccb->ccb_sc;
1259 struct mfi_frame_header *hdr;
1260 int to = 0;
1261 int rv = 0;
1262
1263 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
1264
1265 hdr = &ccb->ccb_frame->mfr_header;
1266 hdr->mfh_cmd_status = 0xff;
1267 if (!sc->sc_MFA_enabled)
1268 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1269
1270 /* no callback, caller is supposed to do the cleanup */
1271 ccb->ccb_done = NULL;
1272
1273 mfi_post(sc, ccb);
1274 if (sc->sc_MFA_enabled) {
1275 /*
1276 * depending on the command type, result may be posted
1277 * to *hdr, or not. In addition it seems there's
1278 * no way to avoid posting the SMID to the reply queue.
1279 * So pool using the interrupt routine.
1280 */
1281 while (ccb->ccb_state != MFI_CCB_DONE) {
1282 delay(1000);
1283 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1284 rv = 1;
1285 break;
1286 }
1287 mfi_tbolt_intrh(sc);
1288 }
1289 } else {
1290 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1291 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1292 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1293
1294 while (hdr->mfh_cmd_status == 0xff) {
1295 delay(1000);
1296 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1297 rv = 1;
1298 break;
1299 }
1300 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1301 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1302 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1303 }
1304 }
1305 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1306 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1307 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1308
1309 if (ccb->ccb_data != NULL) {
1310 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1311 DEVNAME(sc));
1312 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1313 ccb->ccb_dmamap->dm_mapsize,
1314 (ccb->ccb_direction & MFI_DATA_IN) ?
1315 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1316
1317 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1318 }
1319
1320 if (rv != 0) {
1321 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
1322 hdr->mfh_context);
1323 ccb->ccb_flags |= MFI_CCB_F_ERR;
1324 return 1;
1325 }
1326
1327 return 0;
1328 }
1329
1330 int
1331 mfi_intr(void *arg)
1332 {
1333 struct mfi_softc *sc = arg;
1334 struct mfi_prod_cons *pcq;
1335 struct mfi_ccb *ccb;
1336 uint32_t producer, consumer, ctx;
1337 int claimed = 0;
1338
1339 if (!mfi_my_intr(sc))
1340 return 0;
1341
1342 pcq = MFIMEM_KVA(sc->sc_pcq);
1343
1344 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
1345 (u_long)sc, (u_long)pcq);
1346
1347 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1348 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1349 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350
1351 producer = pcq->mpc_producer;
1352 consumer = pcq->mpc_consumer;
1353
1354 while (consumer != producer) {
1355 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1356 DEVNAME(sc), producer, consumer);
1357
1358 ctx = pcq->mpc_reply_q[consumer];
1359 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1360 if (ctx == MFI_INVALID_CTX)
1361 aprint_error_dev(sc->sc_dev,
1362 "invalid context, p: %d c: %d\n",
1363 producer, consumer);
1364 else {
1365 /* XXX remove from queue and call scsi_done */
1366 ccb = &sc->sc_ccb[ctx];
1367 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1368 DEVNAME(sc), ctx);
1369 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1370 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1371 sc->sc_frames_size,
1372 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1373 ccb->ccb_done(ccb);
1374
1375 claimed = 1;
1376 }
1377 consumer++;
1378 if (consumer == (sc->sc_max_cmds + 1))
1379 consumer = 0;
1380 }
1381
1382 pcq->mpc_consumer = consumer;
1383 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1384 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386
1387 return claimed;
1388 }
1389
1390 static int
1391 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
1392 uint32_t blockcnt)
1393 {
1394 struct scsipi_periph *periph = xs->xs_periph;
1395 struct mfi_io_frame *io;
1396
1397 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
1398 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1399 periph->periph_target);
1400
1401 if (!xs->data)
1402 return 1;
1403
1404 io = &ccb->ccb_frame->mfr_io;
1405 if (xs->xs_control & XS_CTL_DATA_IN) {
1406 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1407 ccb->ccb_direction = MFI_DATA_IN;
1408 } else {
1409 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1410 ccb->ccb_direction = MFI_DATA_OUT;
1411 }
1412 io->mif_header.mfh_target_id = periph->periph_target;
1413 io->mif_header.mfh_timeout = 0;
1414 io->mif_header.mfh_flags = 0;
1415 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1416 io->mif_header.mfh_data_len= blockcnt;
1417 io->mif_lba_hi = (blockno >> 32);
1418 io->mif_lba_lo = (blockno & 0xffffffff);
1419 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1420 io->mif_sense_addr_hi = 0;
1421
1422 ccb->ccb_done = mfi_scsi_ld_done;
1423 ccb->ccb_xs = xs;
1424 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1425 ccb->ccb_sgl = &io->mif_sgl;
1426 ccb->ccb_data = xs->data;
1427 ccb->ccb_len = xs->datalen;
1428
1429 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1430 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1431 return 1;
1432
1433 return 0;
1434 }
1435
1436 static void
1437 mfi_scsi_ld_done(struct mfi_ccb *ccb)
1438 {
1439 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1440 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
1441 }
1442
1443 static void
1444 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
1445 {
1446 struct scsipi_xfer *xs = ccb->ccb_xs;
1447 struct mfi_softc *sc = ccb->ccb_sc;
1448
1449 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1450 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1451
1452 if (xs->data != NULL) {
1453 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1454 DEVNAME(sc));
1455 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1456 ccb->ccb_dmamap->dm_mapsize,
1457 (xs->xs_control & XS_CTL_DATA_IN) ?
1458 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1459
1460 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1461 }
1462
1463 if (status != MFI_STAT_OK) {
1464 xs->error = XS_DRIVER_STUFFUP;
1465 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1466 DEVNAME(sc), status);
1467
1468 if (scsi_status != 0) {
1469 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1470 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1471 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1472 DNPRINTF(MFI_D_INTR,
1473 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1474 DEVNAME(sc), scsi_status,
1475 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1476 memset(&xs->sense, 0, sizeof(xs->sense));
1477 memcpy(&xs->sense, ccb->ccb_sense,
1478 sizeof(struct scsi_sense_data));
1479 xs->error = XS_SENSE;
1480 }
1481 } else {
1482 xs->error = XS_NOERROR;
1483 xs->status = SCSI_OK;
1484 xs->resid = 0;
1485 }
1486
1487 mfi_put_ccb(ccb);
1488 scsipi_done(xs);
1489 }
1490
1491 static int
1492 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1493 {
1494 struct mfi_pass_frame *pf;
1495 struct scsipi_periph *periph = xs->xs_periph;
1496
1497 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1498 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1499 periph->periph_target);
1500
1501 pf = &ccb->ccb_frame->mfr_pass;
1502 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1503 pf->mpf_header.mfh_target_id = periph->periph_target;
1504 pf->mpf_header.mfh_lun_id = 0;
1505 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1506 pf->mpf_header.mfh_timeout = 0;
1507 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1508 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1509
1510 pf->mpf_sense_addr_hi = 0;
1511 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1512
1513 memset(pf->mpf_cdb, 0, 16);
1514 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1515
1516 ccb->ccb_done = mfi_scsi_ld_done;
1517 ccb->ccb_xs = xs;
1518 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1519 ccb->ccb_sgl = &pf->mpf_sgl;
1520
1521 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1522 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1523 MFI_DATA_IN : MFI_DATA_OUT;
1524 else
1525 ccb->ccb_direction = MFI_DATA_NONE;
1526
1527 if (xs->data) {
1528 ccb->ccb_data = xs->data;
1529 ccb->ccb_len = xs->datalen;
1530
1531 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1532 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1533 return 1;
1534 }
1535
1536 return 0;
1537 }
1538
1539 static void
1540 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1541 void *arg)
1542 {
1543 struct scsipi_periph *periph;
1544 struct scsipi_xfer *xs;
1545 struct scsipi_adapter *adapt = chan->chan_adapter;
1546 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1547 struct mfi_ccb *ccb;
1548 struct scsi_rw_6 *rw;
1549 struct scsipi_rw_10 *rwb;
1550 struct scsipi_rw_12 *rw12;
1551 struct scsipi_rw_16 *rw16;
1552 uint64_t blockno;
1553 uint32_t blockcnt;
1554 uint8_t target;
1555 uint8_t mbox[MFI_MBOX_SIZE];
1556 int s;
1557
1558 switch (req) {
1559 case ADAPTER_REQ_GROW_RESOURCES:
1560 /* Not supported. */
1561 return;
1562 case ADAPTER_REQ_SET_XFER_MODE:
1563 {
1564 struct scsipi_xfer_mode *xm = arg;
1565 xm->xm_mode = PERIPH_CAP_TQING;
1566 xm->xm_period = 0;
1567 xm->xm_offset = 0;
1568 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
1569 return;
1570 }
1571 case ADAPTER_REQ_RUN_XFER:
1572 break;
1573 }
1574
1575 xs = arg;
1576
1577 periph = xs->xs_periph;
1578 target = periph->periph_target;
1579
1580 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
1581 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
1582 periph->periph_target, periph->periph_lun);
1583
1584 s = splbio();
1585 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1586 periph->periph_lun != 0) {
1587 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1588 DEVNAME(sc), target);
1589 xs->error = XS_SELTIMEOUT;
1590 scsipi_done(xs);
1591 splx(s);
1592 return;
1593 }
1594 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
1595 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
1596 /* the cache is stable storage, don't flush */
1597 xs->error = XS_NOERROR;
1598 xs->status = SCSI_OK;
1599 xs->resid = 0;
1600 scsipi_done(xs);
1601 splx(s);
1602 return;
1603 }
1604
1605 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1606 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1607 xs->error = XS_RESOURCE_SHORTAGE;
1608 scsipi_done(xs);
1609 splx(s);
1610 return;
1611 }
1612
1613 switch (xs->cmd->opcode) {
1614 /* IO path */
1615 case READ_16:
1616 case WRITE_16:
1617 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1618 blockno = _8btol(rw16->addr);
1619 blockcnt = _4btol(rw16->length);
1620 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1621 goto stuffup;
1622 }
1623 break;
1624
1625 case READ_12:
1626 case WRITE_12:
1627 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1628 blockno = _4btol(rw12->addr);
1629 blockcnt = _4btol(rw12->length);
1630 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1631 goto stuffup;
1632 }
1633 break;
1634
1635 case READ_10:
1636 case WRITE_10:
1637 rwb = (struct scsipi_rw_10 *)xs->cmd;
1638 blockno = _4btol(rwb->addr);
1639 blockcnt = _2btol(rwb->length);
1640 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1641 goto stuffup;
1642 }
1643 break;
1644
1645 case SCSI_READ_6_COMMAND:
1646 case SCSI_WRITE_6_COMMAND:
1647 rw = (struct scsi_rw_6 *)xs->cmd;
1648 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1649 blockcnt = rw->length ? rw->length : 0x100;
1650 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1651 goto stuffup;
1652 }
1653 break;
1654
1655 case SCSI_SYNCHRONIZE_CACHE_10:
1656 case SCSI_SYNCHRONIZE_CACHE_16:
1657 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1658 if (mfi_mgmt(ccb, xs,
1659 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1660 goto stuffup;
1661 }
1662 break;
1663
1664 /* hand it of to the firmware and let it deal with it */
1665 case SCSI_TEST_UNIT_READY:
1666 /* save off sd? after autoconf */
1667 if (!cold) /* XXX bogus */
1668 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1669 sizeof(sc->sc_ld[target].ld_dev));
1670 /* FALLTHROUGH */
1671
1672 default:
1673 if (mfi_scsi_ld(ccb, xs)) {
1674 goto stuffup;
1675 }
1676 break;
1677 }
1678
1679 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1680
1681 if (xs->xs_control & XS_CTL_POLL) {
1682 if (mfi_poll(ccb)) {
1683 /* XXX check for sense in ccb->ccb_sense? */
1684 aprint_error_dev(sc->sc_dev,
1685 "mfi_scsipi_request poll failed\n");
1686 memset(&xs->sense, 0, sizeof(xs->sense));
1687 xs->sense.scsi_sense.response_code =
1688 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1689 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1690 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1691 xs->error = XS_SENSE;
1692 xs->status = SCSI_CHECK;
1693 } else {
1694 DNPRINTF(MFI_D_DMA,
1695 "%s: mfi_scsipi_request poll complete %d\n",
1696 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1697 xs->error = XS_NOERROR;
1698 xs->status = SCSI_OK;
1699 xs->resid = 0;
1700 }
1701 mfi_put_ccb(ccb);
1702 scsipi_done(xs);
1703 splx(s);
1704 return;
1705 }
1706
1707 mfi_post(sc, ccb);
1708
1709 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1710 ccb->ccb_dmamap->dm_nsegs);
1711
1712 splx(s);
1713 return;
1714
1715 stuffup:
1716 mfi_put_ccb(ccb);
1717 xs->error = XS_DRIVER_STUFFUP;
1718 scsipi_done(xs);
1719 splx(s);
1720 }
1721
1722 static int
1723 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1724 {
1725 struct mfi_softc *sc = ccb->ccb_sc;
1726 struct mfi_frame_header *hdr;
1727 bus_dma_segment_t *sgd;
1728 union mfi_sgl *sgl;
1729 int error, i;
1730
1731 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1732 (u_long)ccb->ccb_data);
1733
1734 if (!ccb->ccb_data)
1735 return 1;
1736
1737 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
1738 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
1739 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1740 if (error) {
1741 if (error == EFBIG) {
1742 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
1743 sc->sc_max_sgl);
1744 } else {
1745 aprint_error_dev(sc->sc_dev,
1746 "error %d loading dma map\n", error);
1747 }
1748 return 1;
1749 }
1750
1751 hdr = &ccb->ccb_frame->mfr_header;
1752 sgl = ccb->ccb_sgl;
1753 sgd = ccb->ccb_dmamap->dm_segs;
1754 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1755 if (sc->sc_ioptype == MFI_IOP_TBOLT &&
1756 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
1757 hdr->mfh_cmd == MFI_CMD_LD_READ ||
1758 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
1759 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
1760 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
1761 sgl->sg_ieee[i].flags = 0;
1762 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1763 PRIx32 "\n",
1764 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1765 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1766 } else if (sc->sc_64bit_dma) {
1767 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1768 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1769 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1770 PRIx32 "\n",
1771 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1772 hdr->mfh_flags |= MFI_FRAME_SGL64;
1773 } else {
1774 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1775 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1776 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1777 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1778 hdr->mfh_flags |= MFI_FRAME_SGL32;
1779 }
1780 }
1781
1782 if (ccb->ccb_direction == MFI_DATA_IN) {
1783 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1784 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1785 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1786 } else {
1787 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1788 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1789 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1790 }
1791
1792 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1793 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1794 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1795
1796 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1797 " dm_nsegs: %d extra_frames: %d\n",
1798 DEVNAME(sc),
1799 hdr->mfh_sg_count,
1800 ccb->ccb_frame_size,
1801 sc->sc_frames_size,
1802 ccb->ccb_dmamap->dm_nsegs,
1803 ccb->ccb_extra_frames);
1804
1805 return 0;
1806 }
1807
1808 static int
1809 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1810 uint32_t len, void *buf, uint8_t *mbox, bool poll)
1811 {
1812 struct mfi_ccb *ccb;
1813 int rv = 1;
1814
1815 if ((ccb = mfi_get_ccb(sc)) == NULL)
1816 return rv;
1817 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1818 if (rv)
1819 return rv;
1820
1821 if (poll) {
1822 rv = 1;
1823 if (mfi_poll(ccb))
1824 goto done;
1825 } else {
1826 mfi_post(sc, ccb);
1827
1828 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1829 DEVNAME(sc));
1830 while (ccb->ccb_state != MFI_CCB_DONE)
1831 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1832
1833 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1834 goto done;
1835 }
1836 rv = 0;
1837
1838 done:
1839 mfi_put_ccb(ccb);
1840 return rv;
1841 }
1842
1843 static int
1844 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1845 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1846 {
1847 struct mfi_dcmd_frame *dcmd;
1848
1849 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1850
1851 dcmd = &ccb->ccb_frame->mfr_dcmd;
1852 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1853 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1854 dcmd->mdf_header.mfh_timeout = 0;
1855
1856 dcmd->mdf_opcode = opc;
1857 dcmd->mdf_header.mfh_data_len = 0;
1858 ccb->ccb_direction = dir;
1859 ccb->ccb_xs = xs;
1860 ccb->ccb_done = mfi_mgmt_done;
1861
1862 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1863
1864 /* handle special opcodes */
1865 if (mbox)
1866 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1867
1868 if (dir != MFI_DATA_NONE) {
1869 dcmd->mdf_header.mfh_data_len = len;
1870 ccb->ccb_data = buf;
1871 ccb->ccb_len = len;
1872 ccb->ccb_sgl = &dcmd->mdf_sgl;
1873
1874 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1875 return 1;
1876 }
1877 return 0;
1878 }
1879
1880 static void
1881 mfi_mgmt_done(struct mfi_ccb *ccb)
1882 {
1883 struct scsipi_xfer *xs = ccb->ccb_xs;
1884 struct mfi_softc *sc = ccb->ccb_sc;
1885 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1886
1887 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1888 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1889
1890 if (ccb->ccb_data != NULL) {
1891 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1892 DEVNAME(sc));
1893 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1894 ccb->ccb_dmamap->dm_mapsize,
1895 (ccb->ccb_direction & MFI_DATA_IN) ?
1896 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1897
1898 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1899 }
1900
1901 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1902 ccb->ccb_flags |= MFI_CCB_F_ERR;
1903
1904 ccb->ccb_state = MFI_CCB_DONE;
1905 if (xs) {
1906 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1907 xs->error = XS_DRIVER_STUFFUP;
1908 } else {
1909 xs->error = XS_NOERROR;
1910 xs->status = SCSI_OK;
1911 xs->resid = 0;
1912 }
1913 mfi_put_ccb(ccb);
1914 scsipi_done(xs);
1915 } else
1916 wakeup(ccb);
1917 }
1918
1919 #if NBIO > 0
1920 int
1921 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1922 {
1923 struct mfi_softc *sc = device_private(dev);
1924 int error = 0;
1925 int s;
1926
1927 KERNEL_LOCK(1, curlwp);
1928 s = splbio();
1929
1930 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1931
1932 switch (cmd) {
1933 case BIOCINQ:
1934 DNPRINTF(MFI_D_IOCTL, "inq\n");
1935 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1936 break;
1937
1938 case BIOCVOL:
1939 DNPRINTF(MFI_D_IOCTL, "vol\n");
1940 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1941 break;
1942
1943 case BIOCDISK:
1944 DNPRINTF(MFI_D_IOCTL, "disk\n");
1945 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1946 break;
1947
1948 case BIOCALARM:
1949 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1950 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1951 break;
1952
1953 case BIOCBLINK:
1954 DNPRINTF(MFI_D_IOCTL, "blink\n");
1955 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1956 break;
1957
1958 case BIOCSETSTATE:
1959 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1960 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1961 break;
1962
1963 default:
1964 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1965 error = EINVAL;
1966 }
1967 splx(s);
1968 KERNEL_UNLOCK_ONE(curlwp);
1969
1970 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1971 return error;
1972 }
1973
1974 static int
1975 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1976 {
1977 struct mfi_conf *cfg;
1978 int rv = EINVAL;
1979
1980 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1981
1982 if (mfi_get_info(sc)) {
1983 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1984 DEVNAME(sc));
1985 return EIO;
1986 }
1987
1988 /* get figures */
1989 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1990 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1991 sizeof *cfg, cfg, NULL, false))
1992 goto freeme;
1993
1994 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1995 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1996 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1997
1998 rv = 0;
1999 freeme:
2000 free(cfg, M_DEVBUF);
2001 return rv;
2002 }
2003
2004 static int
2005 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
2006 {
2007 int i, per, rv = EINVAL;
2008 uint8_t mbox[MFI_MBOX_SIZE];
2009
2010 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
2011 DEVNAME(sc), bv->bv_volid);
2012
2013 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
2014 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
2015 goto done;
2016
2017 i = bv->bv_volid;
2018 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2019 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
2020 DEVNAME(sc), mbox[0]);
2021
2022 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
2023 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false))
2024 goto done;
2025
2026 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2027 /* go do hotspares */
2028 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2029 goto done;
2030 }
2031
2032 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
2033
2034 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2035 case MFI_LD_OFFLINE:
2036 bv->bv_status = BIOC_SVOFFLINE;
2037 break;
2038
2039 case MFI_LD_PART_DEGRADED:
2040 case MFI_LD_DEGRADED:
2041 bv->bv_status = BIOC_SVDEGRADED;
2042 break;
2043
2044 case MFI_LD_ONLINE:
2045 bv->bv_status = BIOC_SVONLINE;
2046 break;
2047
2048 default:
2049 bv->bv_status = BIOC_SVINVALID;
2050 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
2051 DEVNAME(sc),
2052 sc->sc_ld_list.mll_list[i].mll_state);
2053 }
2054
2055 /* additional status can modify MFI status */
2056 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
2057 case MFI_LD_PROG_CC:
2058 case MFI_LD_PROG_BGI:
2059 bv->bv_status = BIOC_SVSCRUB;
2060 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
2061 bv->bv_percent = (per * 100) / 0xffff;
2062 bv->bv_seconds =
2063 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
2064 break;
2065
2066 case MFI_LD_PROG_FGI:
2067 case MFI_LD_PROG_RECONSTRUCT:
2068 /* nothing yet */
2069 break;
2070 }
2071
2072 /*
2073 * The RAID levels are determined per the SNIA DDF spec, this is only
2074 * a subset that is valid for the MFI contrller.
2075 */
2076 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
2077 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
2078 MFI_DDF_SRL_SPANNED)
2079 bv->bv_level *= 10;
2080
2081 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
2082 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
2083
2084 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
2085
2086 rv = 0;
2087 done:
2088 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
2089 DEVNAME(sc), rv);
2090 return rv;
2091 }
2092
2093 static int
2094 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
2095 {
2096 struct mfi_conf *cfg;
2097 struct mfi_array *ar;
2098 struct mfi_ld_cfg *ld;
2099 struct mfi_pd_details *pd;
2100 struct scsipi_inquiry_data *inqbuf;
2101 char vend[8+16+4+1];
2102 int i, rv = EINVAL;
2103 int arr, vol, disk;
2104 uint32_t size;
2105 uint8_t mbox[MFI_MBOX_SIZE];
2106
2107 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
2108 DEVNAME(sc), bd->bd_diskid);
2109
2110 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2111
2112 /* send single element command to retrieve size for full structure */
2113 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2114 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2115 sizeof *cfg, cfg, NULL, false))
2116 goto freeme;
2117
2118 size = cfg->mfc_size;
2119 free(cfg, M_DEVBUF);
2120
2121 /* memory for read config */
2122 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2123 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2124 size, cfg, NULL, false))
2125 goto freeme;
2126
2127 ar = cfg->mfc_array;
2128
2129 /* calculate offset to ld structure */
2130 ld = (struct mfi_ld_cfg *)(
2131 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2132 cfg->mfc_array_size * cfg->mfc_no_array);
2133
2134 vol = bd->bd_volid;
2135
2136 if (vol >= cfg->mfc_no_ld) {
2137 /* do hotspares */
2138 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
2139 goto freeme;
2140 }
2141
2142 /* find corresponding array for ld */
2143 for (i = 0, arr = 0; i < vol; i++)
2144 arr += ld[i].mlc_parm.mpa_span_depth;
2145
2146 /* offset disk into pd list */
2147 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
2148
2149 /* offset array index into the next spans */
2150 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
2151
2152 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
2153 switch (ar[arr].pd[disk].mar_pd_state){
2154 case MFI_PD_UNCONFIG_GOOD:
2155 bd->bd_status = BIOC_SDUNUSED;
2156 break;
2157
2158 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
2159 bd->bd_status = BIOC_SDHOTSPARE;
2160 break;
2161
2162 case MFI_PD_OFFLINE:
2163 bd->bd_status = BIOC_SDOFFLINE;
2164 break;
2165
2166 case MFI_PD_FAILED:
2167 bd->bd_status = BIOC_SDFAILED;
2168 break;
2169
2170 case MFI_PD_REBUILD:
2171 bd->bd_status = BIOC_SDREBUILD;
2172 break;
2173
2174 case MFI_PD_ONLINE:
2175 bd->bd_status = BIOC_SDONLINE;
2176 break;
2177
2178 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
2179 default:
2180 bd->bd_status = BIOC_SDINVALID;
2181 break;
2182
2183 }
2184
2185 /* get the remaining fields */
2186 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
2187 memset(pd, 0, sizeof(*pd));
2188 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2189 sizeof *pd, pd, mbox, false))
2190 goto freeme;
2191
2192 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
2193
2194 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
2195 bd->bd_channel = pd->mpd_enc_idx;
2196
2197 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2198 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
2199 vend[sizeof vend - 1] = '\0';
2200 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
2201
2202 /* XXX find a way to retrieve serial nr from drive */
2203 /* XXX find a way to get bd_procdev */
2204
2205 rv = 0;
2206 freeme:
2207 free(pd, M_DEVBUF);
2208 free(cfg, M_DEVBUF);
2209
2210 return rv;
2211 }
2212
2213 static int
2214 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
2215 {
2216 uint32_t opc, dir = MFI_DATA_NONE;
2217 int rv = 0;
2218 int8_t ret;
2219
2220 switch(ba->ba_opcode) {
2221 case BIOC_SADISABLE:
2222 opc = MR_DCMD_SPEAKER_DISABLE;
2223 break;
2224
2225 case BIOC_SAENABLE:
2226 opc = MR_DCMD_SPEAKER_ENABLE;
2227 break;
2228
2229 case BIOC_SASILENCE:
2230 opc = MR_DCMD_SPEAKER_SILENCE;
2231 break;
2232
2233 case BIOC_GASTATUS:
2234 opc = MR_DCMD_SPEAKER_GET;
2235 dir = MFI_DATA_IN;
2236 break;
2237
2238 case BIOC_SATEST:
2239 opc = MR_DCMD_SPEAKER_TEST;
2240 break;
2241
2242 default:
2243 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
2244 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
2245 return EINVAL;
2246 }
2247
2248 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
2249 rv = EINVAL;
2250 else
2251 if (ba->ba_opcode == BIOC_GASTATUS)
2252 ba->ba_status = ret;
2253 else
2254 ba->ba_status = 0;
2255
2256 return rv;
2257 }
2258
2259 static int
2260 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
2261 {
2262 int i, found, rv = EINVAL;
2263 uint8_t mbox[MFI_MBOX_SIZE];
2264 uint32_t cmd;
2265 struct mfi_pd_list *pd;
2266
2267 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
2268 bb->bb_status);
2269
2270 /* channel 0 means not in an enclosure so can't be blinked */
2271 if (bb->bb_channel == 0)
2272 return EINVAL;
2273
2274 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2275
2276 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2277 MFI_PD_LIST_SIZE, pd, NULL, false))
2278 goto done;
2279
2280 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2281 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
2282 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
2283 found = 1;
2284 break;
2285 }
2286
2287 if (!found)
2288 goto done;
2289
2290 memset(mbox, 0, sizeof mbox);
2291
2292 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2293
2294 switch (bb->bb_status) {
2295 case BIOC_SBUNBLINK:
2296 cmd = MR_DCMD_PD_UNBLINK;
2297 break;
2298
2299 case BIOC_SBBLINK:
2300 cmd = MR_DCMD_PD_BLINK;
2301 break;
2302
2303 case BIOC_SBALARM:
2304 default:
2305 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
2306 "opcode %x\n", DEVNAME(sc), bb->bb_status);
2307 goto done;
2308 }
2309
2310
2311 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false))
2312 goto done;
2313
2314 rv = 0;
2315 done:
2316 free(pd, M_DEVBUF);
2317 return rv;
2318 }
2319
2320 static int
2321 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2322 {
2323 struct mfi_pd_list *pd;
2324 int i, found, rv = EINVAL;
2325 uint8_t mbox[MFI_MBOX_SIZE];
2326 uint32_t cmd;
2327
2328 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2329 bs->bs_status);
2330
2331 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2332
2333 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2334 MFI_PD_LIST_SIZE, pd, NULL, false))
2335 goto done;
2336
2337 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2338 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2339 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2340 found = 1;
2341 break;
2342 }
2343
2344 if (!found)
2345 goto done;
2346
2347 memset(mbox, 0, sizeof mbox);
2348
2349 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2350
2351 switch (bs->bs_status) {
2352 case BIOC_SSONLINE:
2353 mbox[2] = MFI_PD_ONLINE;
2354 cmd = MD_DCMD_PD_SET_STATE;
2355 break;
2356
2357 case BIOC_SSOFFLINE:
2358 mbox[2] = MFI_PD_OFFLINE;
2359 cmd = MD_DCMD_PD_SET_STATE;
2360 break;
2361
2362 case BIOC_SSHOTSPARE:
2363 mbox[2] = MFI_PD_HOTSPARE;
2364 cmd = MD_DCMD_PD_SET_STATE;
2365 break;
2366 /*
2367 case BIOC_SSREBUILD:
2368 cmd = MD_DCMD_PD_REBUILD;
2369 break;
2370 */
2371 default:
2372 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2373 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2374 goto done;
2375 }
2376
2377
2378 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
2379 0, NULL, mbox, false))
2380 goto done;
2381
2382 rv = 0;
2383 done:
2384 free(pd, M_DEVBUF);
2385 return rv;
2386 }
2387
2388 static int
2389 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2390 {
2391 struct mfi_conf *cfg;
2392 struct mfi_hotspare *hs;
2393 struct mfi_pd_details *pd;
2394 struct bioc_disk *sdhs;
2395 struct bioc_vol *vdhs;
2396 struct scsipi_inquiry_data *inqbuf;
2397 char vend[8+16+4+1];
2398 int i, rv = EINVAL;
2399 uint32_t size;
2400 uint8_t mbox[MFI_MBOX_SIZE];
2401
2402 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2403
2404 if (!bio_hs)
2405 return EINVAL;
2406
2407 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2408
2409 /* send single element command to retrieve size for full structure */
2410 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2411 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2412 sizeof *cfg, cfg, NULL, false))
2413 goto freeme;
2414
2415 size = cfg->mfc_size;
2416 free(cfg, M_DEVBUF);
2417
2418 /* memory for read config */
2419 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2420 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2421 size, cfg, NULL, false))
2422 goto freeme;
2423
2424 /* calculate offset to hs structure */
2425 hs = (struct mfi_hotspare *)(
2426 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2427 cfg->mfc_array_size * cfg->mfc_no_array +
2428 cfg->mfc_ld_size * cfg->mfc_no_ld);
2429
2430 if (volid < cfg->mfc_no_ld)
2431 goto freeme; /* not a hotspare */
2432
2433 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2434 goto freeme; /* not a hotspare */
2435
2436 /* offset into hotspare structure */
2437 i = volid - cfg->mfc_no_ld;
2438
2439 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2440 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2441 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2442
2443 /* get pd fields */
2444 memset(mbox, 0, sizeof mbox);
2445 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2446 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2447 sizeof *pd, pd, mbox, false)) {
2448 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2449 DEVNAME(sc));
2450 goto freeme;
2451 }
2452
2453 switch (type) {
2454 case MFI_MGMT_VD:
2455 vdhs = bio_hs;
2456 vdhs->bv_status = BIOC_SVONLINE;
2457 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2458 vdhs->bv_level = -1; /* hotspare */
2459 vdhs->bv_nodisk = 1;
2460 break;
2461
2462 case MFI_MGMT_SD:
2463 sdhs = bio_hs;
2464 sdhs->bd_status = BIOC_SDHOTSPARE;
2465 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2466 sdhs->bd_channel = pd->mpd_enc_idx;
2467 sdhs->bd_target = pd->mpd_enc_slot;
2468 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2469 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2470 vend[sizeof vend - 1] = '\0';
2471 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2472 break;
2473
2474 default:
2475 goto freeme;
2476 }
2477
2478 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2479 rv = 0;
2480 freeme:
2481 free(pd, M_DEVBUF);
2482 free(cfg, M_DEVBUF);
2483
2484 return rv;
2485 }
2486
2487 static int
2488 mfi_destroy_sensors(struct mfi_softc *sc)
2489 {
2490 if (sc->sc_sme == NULL)
2491 return 0;
2492 sysmon_envsys_unregister(sc->sc_sme);
2493 sc->sc_sme = NULL;
2494 free(sc->sc_sensor, M_DEVBUF);
2495 return 0;
2496 }
2497
2498 static int
2499 mfi_create_sensors(struct mfi_softc *sc)
2500 {
2501 int i;
2502 int nsensors = sc->sc_ld_cnt + 1;
2503 int rv;
2504
2505 sc->sc_sme = sysmon_envsys_create();
2506 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2507 M_DEVBUF, M_NOWAIT | M_ZERO);
2508 if (sc->sc_sensor == NULL) {
2509 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
2510 return ENOMEM;
2511 }
2512
2513 /* BBU */
2514 sc->sc_sensor[0].units = ENVSYS_INDICATOR;
2515 sc->sc_sensor[0].state = ENVSYS_SINVALID;
2516 sc->sc_sensor[0].value_cur = 0;
2517 /* Enable monitoring for BBU state changes, if present */
2518 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
2519 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
2520 snprintf(sc->sc_sensor[0].desc,
2521 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
2522 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
2523 goto out;
2524
2525 for (i = 1; i < nsensors; i++) {
2526 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2527 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2528 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2529 /* Enable monitoring for drive state changes */
2530 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2531 /* logical drives */
2532 snprintf(sc->sc_sensor[i].desc,
2533 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2534 DEVNAME(sc), i - 1);
2535 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2536 &sc->sc_sensor[i]))
2537 goto out;
2538 }
2539
2540 sc->sc_sme->sme_name = DEVNAME(sc);
2541 sc->sc_sme->sme_cookie = sc;
2542 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2543 rv = sysmon_envsys_register(sc->sc_sme);
2544 if (rv != 0) {
2545 aprint_error_dev(sc->sc_dev,
2546 "unable to register with sysmon (rv = %d)\n", rv);
2547 goto out;
2548 }
2549 return 0;
2550
2551 out:
2552 free(sc->sc_sensor, M_DEVBUF);
2553 sysmon_envsys_destroy(sc->sc_sme);
2554 sc->sc_sme = NULL;
2555 return EINVAL;
2556 }
2557
2558 static void
2559 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2560 {
2561 struct mfi_softc *sc = sme->sme_cookie;
2562 struct bioc_vol bv;
2563 int s;
2564 int error;
2565
2566 if (edata->sensor >= sc->sc_ld_cnt + 1)
2567 return;
2568
2569 if (edata->sensor == 0) {
2570 /* BBU */
2571 struct mfi_bbu_status bbu_stat;
2572 int bbu_status;
2573 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
2574 return;
2575
2576 KERNEL_LOCK(1, curlwp);
2577 s = splbio();
2578 bbu_status = mfi_get_bbu(sc, &bbu_stat);
2579 splx(s);
2580 KERNEL_UNLOCK_ONE(curlwp);
2581 switch(bbu_status) {
2582 case MFI_BBU_GOOD:
2583 edata->value_cur = 1;
2584 edata->state = ENVSYS_SVALID;
2585 sc->sc_bbuok = true;
2586 break;
2587 case MFI_BBU_BAD:
2588 edata->value_cur = 0;
2589 edata->state = ENVSYS_SCRITICAL;
2590 sc->sc_bbuok = false;
2591 break;
2592 case MFI_BBU_UNKNOWN:
2593 default:
2594 edata->value_cur = 0;
2595 edata->state = ENVSYS_SINVALID;
2596 sc->sc_bbuok = false;
2597 break;
2598 }
2599 return;
2600 }
2601
2602 memset(&bv, 0, sizeof(bv));
2603 bv.bv_volid = edata->sensor - 1;
2604 KERNEL_LOCK(1, curlwp);
2605 s = splbio();
2606 error = mfi_ioctl_vol(sc, &bv);
2607 splx(s);
2608 KERNEL_UNLOCK_ONE(curlwp);
2609 if (error)
2610 return;
2611
2612 switch(bv.bv_status) {
2613 case BIOC_SVOFFLINE:
2614 edata->value_cur = ENVSYS_DRIVE_FAIL;
2615 edata->state = ENVSYS_SCRITICAL;
2616 break;
2617
2618 case BIOC_SVDEGRADED:
2619 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2620 edata->state = ENVSYS_SCRITICAL;
2621 break;
2622
2623 case BIOC_SVSCRUB:
2624 case BIOC_SVONLINE:
2625 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2626 edata->state = ENVSYS_SVALID;
2627 break;
2628
2629 case BIOC_SVINVALID:
2630 /* FALLTRHOUGH */
2631 default:
2632 edata->value_cur = 0; /* unknown */
2633 edata->state = ENVSYS_SINVALID;
2634 }
2635 }
2636
2637 #endif /* NBIO > 0 */
2638
2639 static uint32_t
2640 mfi_xscale_fw_state(struct mfi_softc *sc)
2641 {
2642 return mfi_read(sc, MFI_OMSG0);
2643 }
2644
2645 static void
2646 mfi_xscale_intr_dis(struct mfi_softc *sc)
2647 {
2648 mfi_write(sc, MFI_OMSK, 0);
2649 }
2650
2651 static void
2652 mfi_xscale_intr_ena(struct mfi_softc *sc)
2653 {
2654 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2655 }
2656
2657 static int
2658 mfi_xscale_intr(struct mfi_softc *sc)
2659 {
2660 uint32_t status;
2661
2662 status = mfi_read(sc, MFI_OSTS);
2663 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2664 return 0;
2665
2666 /* write status back to acknowledge interrupt */
2667 mfi_write(sc, MFI_OSTS, status);
2668 return 1;
2669 }
2670
2671 static void
2672 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2673 {
2674 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2675 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2676 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2677 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2678 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2679 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2680
2681 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2682 ccb->ccb_extra_frames);
2683 ccb->ccb_state = MFI_CCB_RUNNING;
2684 }
2685
2686 static uint32_t
2687 mfi_ppc_fw_state(struct mfi_softc *sc)
2688 {
2689 return mfi_read(sc, MFI_OSP);
2690 }
2691
2692 static void
2693 mfi_ppc_intr_dis(struct mfi_softc *sc)
2694 {
2695 /* Taking a wild guess --dyoung */
2696 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2697 mfi_write(sc, MFI_ODC, 0xffffffff);
2698 }
2699
2700 static void
2701 mfi_ppc_intr_ena(struct mfi_softc *sc)
2702 {
2703 mfi_write(sc, MFI_ODC, 0xffffffff);
2704 mfi_write(sc, MFI_OMSK, ~0x80000004);
2705 }
2706
2707 static int
2708 mfi_ppc_intr(struct mfi_softc *sc)
2709 {
2710 uint32_t status;
2711
2712 status = mfi_read(sc, MFI_OSTS);
2713 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2714 return 0;
2715
2716 /* write status back to acknowledge interrupt */
2717 mfi_write(sc, MFI_ODC, status);
2718 return 1;
2719 }
2720
2721 static void
2722 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2723 {
2724 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2725 (ccb->ccb_extra_frames << 1));
2726 ccb->ccb_state = MFI_CCB_RUNNING;
2727 }
2728
2729 u_int32_t
2730 mfi_gen2_fw_state(struct mfi_softc *sc)
2731 {
2732 return (mfi_read(sc, MFI_OSP));
2733 }
2734
2735 void
2736 mfi_gen2_intr_dis(struct mfi_softc *sc)
2737 {
2738 mfi_write(sc, MFI_OMSK, 0xffffffff);
2739 mfi_write(sc, MFI_ODC, 0xffffffff);
2740 }
2741
2742 void
2743 mfi_gen2_intr_ena(struct mfi_softc *sc)
2744 {
2745 mfi_write(sc, MFI_ODC, 0xffffffff);
2746 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2747 }
2748
2749 int
2750 mfi_gen2_intr(struct mfi_softc *sc)
2751 {
2752 u_int32_t status;
2753
2754 status = mfi_read(sc, MFI_OSTS);
2755 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2756 return (0);
2757
2758 /* write status back to acknowledge interrupt */
2759 mfi_write(sc, MFI_ODC, status);
2760
2761 return (1);
2762 }
2763
2764 void
2765 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2766 {
2767 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2768 (ccb->ccb_extra_frames << 1));
2769 ccb->ccb_state = MFI_CCB_RUNNING;
2770 }
2771
2772 u_int32_t
2773 mfi_skinny_fw_state(struct mfi_softc *sc)
2774 {
2775 return (mfi_read(sc, MFI_OSP));
2776 }
2777
2778 void
2779 mfi_skinny_intr_dis(struct mfi_softc *sc)
2780 {
2781 mfi_write(sc, MFI_OMSK, 0);
2782 }
2783
2784 void
2785 mfi_skinny_intr_ena(struct mfi_softc *sc)
2786 {
2787 mfi_write(sc, MFI_OMSK, ~0x00000001);
2788 }
2789
2790 int
2791 mfi_skinny_intr(struct mfi_softc *sc)
2792 {
2793 u_int32_t status;
2794
2795 status = mfi_read(sc, MFI_OSTS);
2796 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2797 return (0);
2798
2799 /* write status back to acknowledge interrupt */
2800 mfi_write(sc, MFI_OSTS, status);
2801
2802 return (1);
2803 }
2804
2805 void
2806 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2807 {
2808 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2809 (ccb->ccb_extra_frames << 1));
2810 mfi_write(sc, MFI_IQPH, 0x00000000);
2811 ccb->ccb_state = MFI_CCB_RUNNING;
2812 }
2813
2814 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
2815
2816 void
2817 mfi_tbolt_intr_ena(struct mfi_softc *sc)
2818 {
2819 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
2820 mfi_read(sc, MFI_OMSK);
2821 }
2822
2823 void
2824 mfi_tbolt_intr_dis(struct mfi_softc *sc)
2825 {
2826 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
2827 mfi_read(sc, MFI_OMSK);
2828 }
2829
2830 int
2831 mfi_tbolt_intr(struct mfi_softc *sc)
2832 {
2833 int32_t status;
2834
2835 status = mfi_read(sc, MFI_OSTS);
2836
2837 if (ISSET(status, 0x1)) {
2838 mfi_write(sc, MFI_OSTS, status);
2839 mfi_read(sc, MFI_OSTS);
2840 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
2841 return 0;
2842 return 1;
2843 }
2844 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
2845 return 0;
2846 mfi_read(sc, MFI_OSTS);
2847 return 1;
2848 }
2849
2850 u_int32_t
2851 mfi_tbolt_fw_state(struct mfi_softc *sc)
2852 {
2853 return mfi_read(sc, MFI_OSP);
2854 }
2855
2856 void
2857 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2858 {
2859 if (sc->sc_MFA_enabled) {
2860 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
2861 mfi_tbolt_build_mpt_ccb(ccb);
2862 mfi_write(sc, MFI_IQPL,
2863 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
2864 mfi_write(sc, MFI_IQPH,
2865 ccb->ccb_tb_request_desc.words >> 32);
2866 ccb->ccb_state = MFI_CCB_RUNNING;
2867 return;
2868 }
2869 uint64_t bus_add = ccb->ccb_pframe;
2870 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
2871 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2872 mfi_write(sc, MFI_IQPL, bus_add);
2873 mfi_write(sc, MFI_IQPH, bus_add >> 32);
2874 ccb->ccb_state = MFI_CCB_RUNNING;
2875 }
2876
2877 static void
2878 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
2879 {
2880 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
2881 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
2882 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
2883
2884 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2885 io_req->SGLOffset0 =
2886 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
2887 io_req->ChainOffset =
2888 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
2889
2890 mpi25_ieee_chain =
2891 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
2892 mpi25_ieee_chain->Address = ccb->ccb_pframe;
2893
2894 /*
2895 In MFI pass thru, nextChainOffset will always be zero to
2896 indicate the end of the chain.
2897 */
2898 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
2899 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2900
2901 /* setting the length to the maximum length */
2902 mpi25_ieee_chain->Length = 1024;
2903
2904 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2905 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2906 ccb->ccb_flags |= MFI_CCB_F_TBOLT;
2907 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
2908 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2909 ccb->ccb_tb_pio_request -
2910 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2911 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
2912 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2913 }
2914
2915 /*
2916 * Description:
2917 * This function will prepare message pools for the Thunderbolt controller
2918 */
2919 static int
2920 mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
2921 {
2922 uint32_t offset = 0;
2923 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2924
2925 /* Request Decriptors alignement restrictions */
2926 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2927
2928 /* Skip request message pool */
2929 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
2930
2931 /* Reply Frame Pool is initialized */
2932 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
2933 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2934
2935 offset = (uintptr_t)sc->sc_reply_frame_pool
2936 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2937 sc->sc_reply_frame_busaddr =
2938 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
2939
2940 /* initializing reply address to 0xFFFFFFFF */
2941 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
2942 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
2943
2944 /* Skip Reply Frame Pool */
2945 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2946 sc->sc_reply_pool_limit = (void *)addr;
2947
2948 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2949 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
2950
2951 /* initialize the last_reply_idx to 0 */
2952 sc->sc_last_reply_idx = 0;
2953 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
2954 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
2955 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
2956 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
2957 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
2958 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2959 return 0;
2960 }
2961
2962 /*
2963 * This routine prepare and issue INIT2 frame to the Firmware
2964 */
2965
2966 static int
2967 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
2968 {
2969 struct mpi2_ioc_init_request *mpi2IocInit;
2970 struct mfi_init_frame *mfi_init;
2971 struct mfi_ccb *ccb;
2972 bus_addr_t phyAddress;
2973 mfi_address *mfiAddressTemp;
2974 int s;
2975 char *verbuf;
2976 char wqbuf[10];
2977
2978 /* Check if initialization is already completed */
2979 if (sc->sc_MFA_enabled) {
2980 return 1;
2981 }
2982
2983 mpi2IocInit =
2984 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
2985
2986 s = splbio();
2987 if ((ccb = mfi_get_ccb(sc)) == NULL) {
2988 splx(s);
2989 return (EBUSY);
2990 }
2991
2992
2993 mfi_init = &ccb->ccb_frame->mfr_init;
2994
2995 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
2996 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
2997 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2998
2999 /* set MsgVersion and HeaderVersion host driver was built with */
3000 mpi2IocInit->MsgVersion = MPI2_VERSION;
3001 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
3002 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
3003 mpi2IocInit->ReplyDescriptorPostQueueDepth =
3004 (uint16_t)sc->sc_reply_pool_size;
3005 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
3006
3007 /* Get physical address of reply frame pool */
3008 phyAddress = sc->sc_reply_frame_busaddr;
3009 mfiAddressTemp =
3010 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
3011 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3012 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3013
3014 /* Get physical address of request message pool */
3015 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
3016 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
3017 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3018 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3019
3020 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
3021 mpi2IocInit->TimeStamp = time_uptime;
3022
3023 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
3024 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
3025 MEGASAS_VERSION);
3026 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3027 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
3028 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
3029 mfi_init->driver_ver_hi =
3030 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
3031
3032 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3033 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3034 BUS_DMASYNC_PREWRITE);
3035 /* Get the physical address of the mpi2 ioc init command */
3036 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init);
3037 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
3038 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
3039
3040 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
3041 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
3042 if (mfi_poll(ccb) != 0) {
3043 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
3044 "command at 0x%" PRIx64 "\n",
3045 (uint64_t)ccb->ccb_pframe);
3046 splx(s);
3047 return 1;
3048 }
3049 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3050 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3051 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3052 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3053 BUS_DMASYNC_POSTWRITE);
3054 mfi_put_ccb(ccb);
3055 splx(s);
3056
3057 if (mfi_init->mif_header.mfh_cmd_status == 0) {
3058 sc->sc_MFA_enabled = 1;
3059 }
3060 else {
3061 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
3062 mfi_init->mif_header.mfh_cmd_status);
3063 return 1;
3064 }
3065
3066 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
3067 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
3068 sc, PRIBIO, IPL_BIO, 0) != 0) {
3069 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
3070 return 1;
3071 }
3072 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3073 return 0;
3074 }
3075
3076 int
3077 mfi_tbolt_intrh(void *arg)
3078 {
3079 struct mfi_softc *sc = arg;
3080 struct mfi_ccb *ccb;
3081 union mfi_mpi2_reply_descriptor *desc;
3082 int smid, num_completed;
3083
3084 if (!mfi_tbolt_intr(sc))
3085 return 0;
3086
3087 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
3088 (u_long)sc, (u_long)sc->sc_last_reply_idx);
3089
3090 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
3091
3092 desc = (union mfi_mpi2_reply_descriptor *)
3093 ((uintptr_t)sc->sc_reply_frame_pool +
3094 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3095
3096 bus_dmamap_sync(sc->sc_dmat,
3097 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3098 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3099 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3100 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3101 num_completed = 0;
3102 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
3103 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
3104 smid = desc->header.SMID;
3105 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
3106 ccb = &sc->sc_ccb[smid - 1];
3107 DNPRINTF(MFI_D_INTR,
3108 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
3109 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
3110 sc->sc_last_reply_idx, desc->words, ccb);
3111 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
3112 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
3113 ccb->ccb_tb_io_request->ChainOffset != 0) {
3114 bus_dmamap_sync(sc->sc_dmat,
3115 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3116 ccb->ccb_tb_psg_frame -
3117 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3118 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD);
3119 }
3120 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
3121 bus_dmamap_sync(sc->sc_dmat,
3122 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3123 ccb->ccb_tb_pio_request -
3124 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3125 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3126 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3127 }
3128 if (ccb->ccb_done)
3129 ccb->ccb_done(ccb);
3130 else
3131 ccb->ccb_state = MFI_CCB_DONE;
3132 sc->sc_last_reply_idx++;
3133 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
3134 sc->sc_last_reply_idx = 0;
3135 }
3136 desc->words = ~0x0;
3137 /* Get the next reply descriptor */
3138 desc = (union mfi_mpi2_reply_descriptor *)
3139 ((uintptr_t)sc->sc_reply_frame_pool +
3140 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3141 num_completed++;
3142 }
3143 if (num_completed == 0)
3144 return 0;
3145
3146 bus_dmamap_sync(sc->sc_dmat,
3147 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3148 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3149 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3151 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
3152 return 1;
3153 }
3154
3155
3156 int
3157 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
3158 uint64_t blockno, uint32_t blockcnt)
3159 {
3160 struct scsipi_periph *periph = xs->xs_periph;
3161 struct mfi_mpi2_request_raid_scsi_io *io_req;
3162 int sge_count;
3163
3164 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
3165 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
3166 periph->periph_target);
3167
3168 if (!xs->data)
3169 return 1;
3170
3171 ccb->ccb_done = mfi_tbolt_scsi_ld_done;
3172 ccb->ccb_xs = xs;
3173 ccb->ccb_data = xs->data;
3174 ccb->ccb_len = xs->datalen;
3175
3176 io_req = ccb->ccb_tb_io_request;
3177
3178 /* Just the CDB length,rest of the Flags are zero */
3179 io_req->IoFlags = xs->cmdlen;
3180 memset(io_req->CDB.CDB32, 0, 32);
3181 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
3182
3183 io_req->RaidContext.TargetID = periph->periph_target;
3184 io_req->RaidContext.Status = 0;
3185 io_req->RaidContext.exStatus = 0;
3186 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
3187 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
3188 io_req->DevHandle = periph->periph_target;
3189
3190 ccb->ccb_tb_request_desc.header.RequestFlags =
3191 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3192 io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
3193
3194 if (xs->xs_control & XS_CTL_DATA_IN) {
3195 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
3196 ccb->ccb_direction = MFI_DATA_IN;
3197 } else {
3198 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
3199 ccb->ccb_direction = MFI_DATA_OUT;
3200 }
3201
3202 sge_count = mfi_tbolt_create_sgl(ccb,
3203 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
3204 );
3205 if (sge_count < 0)
3206 return 1;
3207 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
3208 io_req->RaidContext.numSGE = sge_count;
3209 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3210 io_req->SGLOffset0 =
3211 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
3212
3213 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
3214 io_req->SenseBufferLength = MFI_SENSE_SIZE;
3215
3216 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
3217 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
3218 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3219 ccb->ccb_tb_pio_request -
3220 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3221 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3222 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3223
3224 return 0;
3225 }
3226
3227
3228 static void
3229 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
3230 {
3231 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
3232 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
3233 io_req->RaidContext.exStatus);
3234 }
3235
3236 static int
3237 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
3238 {
3239 struct mfi_softc *sc = ccb->ccb_sc;
3240 bus_dma_segment_t *sgd;
3241 int error, i, sge_idx, sge_count;
3242 struct mfi_mpi2_request_raid_scsi_io *io_req;
3243 struct mpi25_ieee_sge_chain64 *sgl_ptr;
3244
3245 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
3246 (u_long)ccb->ccb_data);
3247
3248 if (!ccb->ccb_data)
3249 return -1;
3250
3251 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
3252 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
3253 ccb->ccb_data, ccb->ccb_len, NULL, flags);
3254 if (error) {
3255 if (error == EFBIG)
3256 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
3257 sc->sc_max_sgl);
3258 else
3259 aprint_error_dev(sc->sc_dev,
3260 "error %d loading dma map\n", error);
3261 return -1;
3262 }
3263
3264 io_req = ccb->ccb_tb_io_request;
3265 sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
3266 sge_count = ccb->ccb_dmamap->dm_nsegs;
3267 sgd = ccb->ccb_dmamap->dm_segs;
3268 KASSERT(sge_count <= sc->sc_max_sgl);
3269 KASSERT(sge_count <=
3270 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
3271 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
3272
3273 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
3274 /* One element to store the chain info */
3275 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
3276 DNPRINTF(MFI_D_DMA,
3277 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n",
3278 sge_idx, sge_count, ccb->ccb_tb_pio_request);
3279 } else {
3280 sge_idx = sge_count;
3281 }
3282
3283 for (i = 0; i < sge_idx; i++) {
3284 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3285 sgl_ptr->Length = htole32(sgd[i].ds_len);
3286 sgl_ptr->Flags = 0;
3287 if (sge_idx < sge_count) {
3288 DNPRINTF(MFI_D_DMA,
3289 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3290 " flags 0x%x\n", sgl_ptr, i,
3291 sgl_ptr->Address, sgl_ptr->Length,
3292 sgl_ptr->Flags);
3293 }
3294 sgl_ptr++;
3295 }
3296 io_req->ChainOffset = 0;
3297 if (sge_idx < sge_count) {
3298 struct mpi25_ieee_sge_chain64 *sg_chain;
3299 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
3300 sg_chain = sgl_ptr;
3301 /* Prepare chain element */
3302 sg_chain->NextChainOffset = 0;
3303 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3304 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
3305 sg_chain->Length = (sizeof(mpi2_sge_io_union) *
3306 (sge_count - sge_idx));
3307 sg_chain->Address = ccb->ccb_tb_psg_frame;
3308 DNPRINTF(MFI_D_DMA,
3309 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
3310 " flags 0x%x\n", sg_chain, sg_chain->Address,
3311 sg_chain->Length, sg_chain->Flags);
3312 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
3313 for (; i < sge_count; i++) {
3314 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3315 sgl_ptr->Length = htole32(sgd[i].ds_len);
3316 sgl_ptr->Flags = 0;
3317 DNPRINTF(MFI_D_DMA,
3318 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3319 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
3320 sgl_ptr->Length, sgl_ptr->Flags);
3321 sgl_ptr++;
3322 }
3323 bus_dmamap_sync(sc->sc_dmat,
3324 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3325 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3326 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD);
3327 }
3328
3329 if (ccb->ccb_direction == MFI_DATA_IN) {
3330 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3331 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3332 } else {
3333 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3334 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
3335 }
3336 return sge_count;
3337 }
3338
3339 /*
3340 * The ThunderBolt HW has an option for the driver to directly
3341 * access the underlying disks and operate on the RAID. To
3342 * do this there needs to be a capability to keep the RAID controller
3343 * and driver in sync. The FreeBSD driver does not take advantage
3344 * of this feature since it adds a lot of complexity and slows down
3345 * performance. Performance is gained by using the controller's
3346 * cache etc.
3347 *
3348 * Even though this driver doesn't access the disks directly, an
3349 * AEN like command is used to inform the RAID firmware to "sync"
3350 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
3351 * command in write mode will return when the RAID firmware has
3352 * detected a change to the RAID state. Examples of this type
3353 * of change are removing a disk. Once the command returns then
3354 * the driver needs to acknowledge this and "sync" all LD's again.
3355 * This repeats until we shutdown. Then we need to cancel this
3356 * pending command.
3357 *
3358 * If this is not done right the RAID firmware will not remove a
3359 * pulled drive and the RAID won't go degraded etc. Effectively,
3360 * stopping any RAID mangement to functions.
3361 *
3362 * Doing another LD sync, requires the use of an event since the
3363 * driver needs to do a mfi_wait_command and can't do that in an
3364 * interrupt thread.
3365 *
3366 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
3367 * That requires a bunch of structure and it is simplier to just do
3368 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
3369 */
3370
3371 void
3372 mfi_tbolt_sync_map_info(struct work *w, void *v)
3373 {
3374 struct mfi_softc *sc = v;
3375 int i;
3376 struct mfi_ccb *ccb = NULL;
3377 uint8_t mbox[MFI_MBOX_SIZE];
3378 struct mfi_ld *ld_sync = NULL;
3379 size_t ld_size;
3380 int s;
3381
3382 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
3383 again:
3384 s = splbio();
3385 if (sc->sc_ldsync_ccb != NULL) {
3386 splx(s);
3387 return;
3388 }
3389
3390 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
3391 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
3392 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
3393 goto err;
3394 }
3395
3396 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
3397
3398 ld_sync = (struct mfi_ld *) malloc(ld_size, M_DEVBUF,
3399 M_WAITOK | M_ZERO);
3400 if (ld_sync == NULL) {
3401 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
3402 goto err;
3403 }
3404 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3405 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
3406 }
3407
3408 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3409 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
3410 free(ld_sync, M_DEVBUF);
3411 goto err;
3412 }
3413 sc->sc_ldsync_ccb = ccb;
3414
3415 memset(mbox, 0, MFI_MBOX_SIZE);
3416 mbox[0] = sc->sc_ld_list.mll_no_ld;
3417 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
3418 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
3419 ld_size, ld_sync, mbox)) {
3420 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
3421 goto err;
3422 }
3423 /*
3424 * we won't sleep on this command, so we have to override
3425 * the callback set up by mfi_mgmt()
3426 */
3427 ccb->ccb_done = mfi_sync_map_complete;
3428
3429 mfi_post(sc, ccb);
3430 splx(s);
3431 return;
3432
3433 err:
3434 if (ld_sync)
3435 free(ld_sync, M_DEVBUF);
3436 if (ccb)
3437 mfi_put_ccb(ccb);
3438 sc->sc_ldsync_ccb = NULL;
3439 splx(s);
3440 kpause("ldsyncp", 0, hz, NULL);
3441 goto again;
3442 }
3443
3444 static void
3445 mfi_sync_map_complete(struct mfi_ccb *ccb)
3446 {
3447 struct mfi_softc *sc = ccb->ccb_sc;
3448 bool aborted = !sc->sc_running;
3449
3450 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
3451 DEVNAME(ccb->ccb_sc));
3452 KASSERT(sc->sc_ldsync_ccb == ccb);
3453 mfi_mgmt_done(ccb);
3454 free(ccb->ccb_data, M_DEVBUF);
3455 if (ccb->ccb_flags & MFI_CCB_F_ERR) {
3456 aprint_error_dev(sc->sc_dev, "sync command failed\n");
3457 aborted = true;
3458 }
3459 mfi_put_ccb(ccb);
3460 sc->sc_ldsync_ccb = NULL;
3461
3462 /* set it up again so the driver can catch more events */
3463 if (!aborted) {
3464 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3465 }
3466 }
3467