mfi.c revision 1.52 1 /* $NetBSD: mfi.c,v 1.52 2014/03/16 05:20:27 dholland Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2012 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44 /*-
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 *
49 * Copyright 1994-2009 The FreeBSD Project.
50 * All rights reserved.
51 *
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 *
70 * The views and conclusions contained in the software and documentation
71 * are those of the authors and should not be interpreted as representing
72 * official policies,either expressed or implied, of the FreeBSD Project.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.52 2014/03/16 05:20:27 dholland Exp $");
77
78 #include "bio.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/buf.h>
83 #include <sys/ioctl.h>
84 #include <sys/device.h>
85 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/proc.h>
88 #include <sys/cpu.h>
89 #include <sys/conf.h>
90 #include <sys/kauth.h>
91
92 #include <uvm/uvm_param.h>
93
94 #include <sys/bus.h>
95
96 #include <dev/scsipi/scsipi_all.h>
97 #include <dev/scsipi/scsi_all.h>
98 #include <dev/scsipi/scsi_spc.h>
99 #include <dev/scsipi/scsipi_disk.h>
100 #include <dev/scsipi/scsi_disk.h>
101 #include <dev/scsipi/scsiconf.h>
102
103 #include <dev/ic/mfireg.h>
104 #include <dev/ic/mfivar.h>
105 #include <dev/ic/mfiio.h>
106
107 #if NBIO > 0
108 #include <dev/biovar.h>
109 #endif /* NBIO > 0 */
110
111 #ifdef MFI_DEBUG
112 uint32_t mfi_debug = 0
113 /* | MFI_D_CMD */
114 /* | MFI_D_INTR */
115 /* | MFI_D_MISC */
116 /* | MFI_D_DMA */
117 /* | MFI_D_IOCTL */
118 /* | MFI_D_RW */
119 /* | MFI_D_MEM */
120 /* | MFI_D_CCB */
121 /* | MFI_D_SYNC */
122 ;
123 #endif
124
125 static void mfi_scsipi_request(struct scsipi_channel *,
126 scsipi_adapter_req_t, void *);
127 static void mfiminphys(struct buf *bp);
128
129 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
130 static void mfi_put_ccb(struct mfi_ccb *);
131 static int mfi_init_ccb(struct mfi_softc *);
132
133 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
134 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
135
136 static int mfi_transition_firmware(struct mfi_softc *);
137 static int mfi_initialize_firmware(struct mfi_softc *);
138 static int mfi_get_info(struct mfi_softc *);
139 static int mfi_get_bbu(struct mfi_softc *,
140 struct mfi_bbu_status *);
141 /* return codes for mfi_get_bbu */
142 #define MFI_BBU_GOOD 0
143 #define MFI_BBU_BAD 1
144 #define MFI_BBU_UNKNOWN 2
145 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
146 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
147 static int mfi_poll(struct mfi_ccb *);
148 static int mfi_create_sgl(struct mfi_ccb *, int);
149
150 /* commands */
151 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
152 static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
153 uint64_t, uint32_t);
154 static void mfi_scsi_ld_done(struct mfi_ccb *);
155 static void mfi_scsi_xs_done(struct mfi_ccb *, int, int);
156 static int mfi_mgmt_internal(struct mfi_softc *, uint32_t,
157 uint32_t, uint32_t, void *, uint8_t *, bool);
158 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
159 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
160 static void mfi_mgmt_done(struct mfi_ccb *);
161
162 #if NBIO > 0
163 static int mfi_ioctl(device_t, u_long, void *);
164 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
165 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
166 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
167 static int mfi_ioctl_alarm(struct mfi_softc *,
168 struct bioc_alarm *);
169 static int mfi_ioctl_blink(struct mfi_softc *sc,
170 struct bioc_blink *);
171 static int mfi_ioctl_setstate(struct mfi_softc *,
172 struct bioc_setstate *);
173 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
174 static int mfi_create_sensors(struct mfi_softc *);
175 static int mfi_destroy_sensors(struct mfi_softc *);
176 static void mfi_sensor_refresh(struct sysmon_envsys *,
177 envsys_data_t *);
178 #endif /* NBIO > 0 */
179 static bool mfi_shutdown(device_t, int);
180 static bool mfi_suspend(device_t, const pmf_qual_t *);
181 static bool mfi_resume(device_t, const pmf_qual_t *);
182
183 static dev_type_open(mfifopen);
184 static dev_type_close(mfifclose);
185 static dev_type_ioctl(mfifioctl);
186 const struct cdevsw mfi_cdevsw = {
187 .d_open = mfifopen,
188 .d_close = mfifclose,
189 .d_read = noread,
190 .d_write = nowrite,
191 .d_ioctl = mfifioctl,
192 .d_stop = nostop,
193 .d_tty = notty,
194 .d_poll = nopoll,
195 .d_mmap = nommap,
196 .d_kqfilter = nokqfilter,
197 .d_flag = D_OTHER
198 };
199
200 extern struct cfdriver mfi_cd;
201
202 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
203 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
204 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
205 static int mfi_xscale_intr(struct mfi_softc *sc);
206 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
207
208 static const struct mfi_iop_ops mfi_iop_xscale = {
209 mfi_xscale_fw_state,
210 mfi_xscale_intr_dis,
211 mfi_xscale_intr_ena,
212 mfi_xscale_intr,
213 mfi_xscale_post,
214 mfi_scsi_ld_io,
215 };
216
217 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
218 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
219 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
220 static int mfi_ppc_intr(struct mfi_softc *sc);
221 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
222
223 static const struct mfi_iop_ops mfi_iop_ppc = {
224 mfi_ppc_fw_state,
225 mfi_ppc_intr_dis,
226 mfi_ppc_intr_ena,
227 mfi_ppc_intr,
228 mfi_ppc_post,
229 mfi_scsi_ld_io,
230 };
231
232 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
233 void mfi_gen2_intr_ena(struct mfi_softc *sc);
234 void mfi_gen2_intr_dis(struct mfi_softc *sc);
235 int mfi_gen2_intr(struct mfi_softc *sc);
236 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
237
238 static const struct mfi_iop_ops mfi_iop_gen2 = {
239 mfi_gen2_fw_state,
240 mfi_gen2_intr_dis,
241 mfi_gen2_intr_ena,
242 mfi_gen2_intr,
243 mfi_gen2_post,
244 mfi_scsi_ld_io,
245 };
246
247 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
248 void mfi_skinny_intr_dis(struct mfi_softc *);
249 void mfi_skinny_intr_ena(struct mfi_softc *);
250 int mfi_skinny_intr(struct mfi_softc *);
251 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
252
253 static const struct mfi_iop_ops mfi_iop_skinny = {
254 mfi_skinny_fw_state,
255 mfi_skinny_intr_dis,
256 mfi_skinny_intr_ena,
257 mfi_skinny_intr,
258 mfi_skinny_post,
259 mfi_scsi_ld_io,
260 };
261
262 static int mfi_tbolt_init_desc_pool(struct mfi_softc *);
263 static int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
264 static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
265 int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
266 uint64_t, uint32_t);
267 static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
268 static int mfi_tbolt_create_sgl(struct mfi_ccb *, int);
269 void mfi_tbolt_sync_map_info(struct work *, void *);
270 static void mfi_sync_map_complete(struct mfi_ccb *);
271
272 u_int32_t mfi_tbolt_fw_state(struct mfi_softc *);
273 void mfi_tbolt_intr_dis(struct mfi_softc *);
274 void mfi_tbolt_intr_ena(struct mfi_softc *);
275 int mfi_tbolt_intr(struct mfi_softc *sc);
276 void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
277
278 static const struct mfi_iop_ops mfi_iop_tbolt = {
279 mfi_tbolt_fw_state,
280 mfi_tbolt_intr_dis,
281 mfi_tbolt_intr_ena,
282 mfi_tbolt_intr,
283 mfi_tbolt_post,
284 mfi_tbolt_scsi_ld_io,
285 };
286
287 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
288 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
289 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
290 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
291 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
292
293 static struct mfi_ccb *
294 mfi_get_ccb(struct mfi_softc *sc)
295 {
296 struct mfi_ccb *ccb;
297 int s;
298
299 s = splbio();
300 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
301 if (ccb) {
302 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
303 ccb->ccb_state = MFI_CCB_READY;
304 }
305 splx(s);
306
307 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
308 if (__predict_false(ccb == NULL && sc->sc_running))
309 aprint_error_dev(sc->sc_dev, "out of ccb\n");
310
311 return ccb;
312 }
313
314 static void
315 mfi_put_ccb(struct mfi_ccb *ccb)
316 {
317 struct mfi_softc *sc = ccb->ccb_sc;
318 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
319 int s;
320
321 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
322
323 hdr->mfh_cmd_status = 0x0;
324 hdr->mfh_flags = 0x0;
325 ccb->ccb_state = MFI_CCB_FREE;
326 ccb->ccb_xs = NULL;
327 ccb->ccb_flags = 0;
328 ccb->ccb_done = NULL;
329 ccb->ccb_direction = 0;
330 ccb->ccb_frame_size = 0;
331 ccb->ccb_extra_frames = 0;
332 ccb->ccb_sgl = NULL;
333 ccb->ccb_data = NULL;
334 ccb->ccb_len = 0;
335 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
336 /* erase tb_request_desc but preserve SMID */
337 int index = ccb->ccb_tb_request_desc.header.SMID;
338 ccb->ccb_tb_request_desc.words = 0;
339 ccb->ccb_tb_request_desc.header.SMID = index;
340 }
341 s = splbio();
342 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
343 splx(s);
344 }
345
346 static int
347 mfi_destroy_ccb(struct mfi_softc *sc)
348 {
349 struct mfi_ccb *ccb;
350 uint32_t i;
351
352 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
353
354
355 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
356 /* create a dma map for transfer */
357 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
358 }
359
360 if (i < sc->sc_max_cmds)
361 return EBUSY;
362
363 free(sc->sc_ccb, M_DEVBUF);
364
365 return 0;
366 }
367
368 static int
369 mfi_init_ccb(struct mfi_softc *sc)
370 {
371 struct mfi_ccb *ccb;
372 uint32_t i;
373 int error;
374 bus_addr_t io_req_base_phys;
375 uint8_t *io_req_base;
376 int offset;
377
378 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
379
380 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
381 M_DEVBUF, M_WAITOK|M_ZERO);
382 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
383 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
384 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
385 /*
386 * The first 256 bytes (SMID 0) is not used.
387 * Don't add to the cmd list.
388 */
389 io_req_base += MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
390 io_req_base_phys += MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
391 }
392
393 for (i = 0; i < sc->sc_max_cmds; i++) {
394 ccb = &sc->sc_ccb[i];
395
396 ccb->ccb_sc = sc;
397
398 /* select i'th frame */
399 ccb->ccb_frame = (union mfi_frame *)
400 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
401 ccb->ccb_pframe =
402 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
403 ccb->ccb_frame->mfr_header.mfh_context = i;
404
405 /* select i'th sense */
406 ccb->ccb_sense = (struct mfi_sense *)
407 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
408 ccb->ccb_psense =
409 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
410
411 /* create a dma map for transfer */
412 error = bus_dmamap_create(sc->sc_datadmat,
413 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
414 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
415 if (error) {
416 aprint_error_dev(sc->sc_dev,
417 "cannot create ccb dmamap (%d)\n", error);
418 goto destroy;
419 }
420 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
421 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
422 ccb->ccb_tb_io_request =
423 (struct mfi_mpi2_request_raid_scsi_io *)
424 (io_req_base + offset);
425 ccb->ccb_tb_pio_request =
426 io_req_base_phys + offset;
427 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
428 ccb->ccb_tb_sg_frame =
429 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
430 offset);
431 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
432 offset;
433 /* SMID 0 is reserved. Set SMID/index from 1 */
434 ccb->ccb_tb_request_desc.header.SMID = i + 1;
435 }
436
437 DNPRINTF(MFI_D_CCB,
438 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
439 ccb->ccb_frame->mfr_header.mfh_context, ccb,
440 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
441 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
442 (u_long)ccb->ccb_dmamap);
443
444 /* add ccb to queue */
445 mfi_put_ccb(ccb);
446 }
447
448 return 0;
449 destroy:
450 /* free dma maps and ccb memory */
451 while (i) {
452 i--;
453 ccb = &sc->sc_ccb[i];
454 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
455 }
456
457 free(sc->sc_ccb, M_DEVBUF);
458
459 return 1;
460 }
461
462 static uint32_t
463 mfi_read(struct mfi_softc *sc, bus_size_t r)
464 {
465 uint32_t rv;
466
467 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
468 BUS_SPACE_BARRIER_READ);
469 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
470
471 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
472 return rv;
473 }
474
475 static void
476 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
477 {
478 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
479
480 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
481 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
482 BUS_SPACE_BARRIER_WRITE);
483 }
484
485 static struct mfi_mem *
486 mfi_allocmem(struct mfi_softc *sc, size_t size)
487 {
488 struct mfi_mem *mm;
489 int nsegs;
490
491 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
492 (long)size);
493
494 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
495 if (mm == NULL)
496 return NULL;
497
498 mm->am_size = size;
499
500 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
501 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
502 goto amfree;
503
504 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
505 &nsegs, BUS_DMA_NOWAIT) != 0)
506 goto destroy;
507
508 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
509 BUS_DMA_NOWAIT) != 0)
510 goto free;
511
512 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
513 BUS_DMA_NOWAIT) != 0)
514 goto unmap;
515
516 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
517 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
518
519 memset(mm->am_kva, 0, size);
520 return mm;
521
522 unmap:
523 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
524 free:
525 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
526 destroy:
527 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
528 amfree:
529 free(mm, M_DEVBUF);
530
531 return NULL;
532 }
533
534 static void
535 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
536 {
537 struct mfi_mem *mm = *mmp;
538
539 if (mm == NULL)
540 return;
541
542 *mmp = NULL;
543
544 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
545
546 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
547 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
548 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
549 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
550 free(mm, M_DEVBUF);
551 }
552
553 static int
554 mfi_transition_firmware(struct mfi_softc *sc)
555 {
556 uint32_t fw_state, cur_state;
557 int max_wait, i;
558
559 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
560
561 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
562 fw_state);
563
564 while (fw_state != MFI_STATE_READY) {
565 DNPRINTF(MFI_D_MISC,
566 "%s: waiting for firmware to become ready\n",
567 DEVNAME(sc));
568 cur_state = fw_state;
569 switch (fw_state) {
570 case MFI_STATE_FAULT:
571 aprint_error_dev(sc->sc_dev, "firmware fault\n");
572 return 1;
573 case MFI_STATE_WAIT_HANDSHAKE:
574 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
575 sc->sc_ioptype == MFI_IOP_TBOLT)
576 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
577 else
578 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
579 max_wait = 2;
580 break;
581 case MFI_STATE_OPERATIONAL:
582 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
583 sc->sc_ioptype == MFI_IOP_TBOLT)
584 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
585 else
586 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
587 max_wait = 10;
588 break;
589 case MFI_STATE_UNDEFINED:
590 case MFI_STATE_BB_INIT:
591 max_wait = 2;
592 break;
593 case MFI_STATE_FW_INIT:
594 case MFI_STATE_DEVICE_SCAN:
595 case MFI_STATE_FLUSH_CACHE:
596 max_wait = 20;
597 break;
598 case MFI_STATE_BOOT_MESSAGE_PENDING:
599 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
600 sc->sc_ioptype == MFI_IOP_TBOLT) {
601 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
602 } else {
603 mfi_write(sc, MFI_IDB, MFI_INIT_HOTPLUG);
604 }
605 max_wait = 180;
606 break;
607 default:
608 aprint_error_dev(sc->sc_dev,
609 "unknown firmware state %d\n", fw_state);
610 return 1;
611 }
612 for (i = 0; i < (max_wait * 10); i++) {
613 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
614 if (fw_state == cur_state)
615 DELAY(100000);
616 else
617 break;
618 }
619 if (fw_state == cur_state) {
620 aprint_error_dev(sc->sc_dev,
621 "firmware stuck in state %#x\n", fw_state);
622 return 1;
623 }
624 }
625
626 return 0;
627 }
628
629 static int
630 mfi_initialize_firmware(struct mfi_softc *sc)
631 {
632 struct mfi_ccb *ccb;
633 struct mfi_init_frame *init;
634 struct mfi_init_qinfo *qinfo;
635
636 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
637
638 if ((ccb = mfi_get_ccb(sc)) == NULL)
639 return 1;
640
641 init = &ccb->ccb_frame->mfr_init;
642 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
643
644 memset(qinfo, 0, sizeof *qinfo);
645 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
646 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
647 offsetof(struct mfi_prod_cons, mpc_reply_q));
648 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
649 offsetof(struct mfi_prod_cons, mpc_producer));
650 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
651 offsetof(struct mfi_prod_cons, mpc_consumer));
652
653 init->mif_header.mfh_cmd = MFI_CMD_INIT;
654 init->mif_header.mfh_data_len = sizeof *qinfo;
655 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
656
657 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
658 DEVNAME(sc),
659 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
660 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
661
662 if (mfi_poll(ccb)) {
663 aprint_error_dev(sc->sc_dev,
664 "mfi_initialize_firmware failed\n");
665 return 1;
666 }
667
668 mfi_put_ccb(ccb);
669
670 return 0;
671 }
672
673 static int
674 mfi_get_info(struct mfi_softc *sc)
675 {
676 #ifdef MFI_DEBUG
677 int i;
678 #endif
679 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
680
681 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
682 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
683 return 1;
684
685 #ifdef MFI_DEBUG
686
687 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
688 printf("%s: active FW %s Version %s date %s time %s\n",
689 DEVNAME(sc),
690 sc->sc_info.mci_image_component[i].mic_name,
691 sc->sc_info.mci_image_component[i].mic_version,
692 sc->sc_info.mci_image_component[i].mic_build_date,
693 sc->sc_info.mci_image_component[i].mic_build_time);
694 }
695
696 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
697 printf("%s: pending FW %s Version %s date %s time %s\n",
698 DEVNAME(sc),
699 sc->sc_info.mci_pending_image_component[i].mic_name,
700 sc->sc_info.mci_pending_image_component[i].mic_version,
701 sc->sc_info.mci_pending_image_component[i].mic_build_date,
702 sc->sc_info.mci_pending_image_component[i].mic_build_time);
703 }
704
705 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
706 DEVNAME(sc),
707 sc->sc_info.mci_max_arms,
708 sc->sc_info.mci_max_spans,
709 sc->sc_info.mci_max_arrays,
710 sc->sc_info.mci_max_lds,
711 sc->sc_info.mci_product_name);
712
713 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
714 DEVNAME(sc),
715 sc->sc_info.mci_serial_number,
716 sc->sc_info.mci_hw_present,
717 sc->sc_info.mci_current_fw_time,
718 sc->sc_info.mci_max_cmds,
719 sc->sc_info.mci_max_sg_elements);
720
721 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
722 DEVNAME(sc),
723 sc->sc_info.mci_max_request_size,
724 sc->sc_info.mci_lds_present,
725 sc->sc_info.mci_lds_degraded,
726 sc->sc_info.mci_lds_offline,
727 sc->sc_info.mci_pd_present);
728
729 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
730 DEVNAME(sc),
731 sc->sc_info.mci_pd_disks_present,
732 sc->sc_info.mci_pd_disks_pred_failure,
733 sc->sc_info.mci_pd_disks_failed);
734
735 printf("%s: nvram %d mem %d flash %d\n",
736 DEVNAME(sc),
737 sc->sc_info.mci_nvram_size,
738 sc->sc_info.mci_memory_size,
739 sc->sc_info.mci_flash_size);
740
741 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
742 DEVNAME(sc),
743 sc->sc_info.mci_ram_correctable_errors,
744 sc->sc_info.mci_ram_uncorrectable_errors,
745 sc->sc_info.mci_cluster_allowed,
746 sc->sc_info.mci_cluster_active);
747
748 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
749 DEVNAME(sc),
750 sc->sc_info.mci_max_strips_per_io,
751 sc->sc_info.mci_raid_levels,
752 sc->sc_info.mci_adapter_ops,
753 sc->sc_info.mci_ld_ops);
754
755 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
756 DEVNAME(sc),
757 sc->sc_info.mci_stripe_sz_ops.min,
758 sc->sc_info.mci_stripe_sz_ops.max,
759 sc->sc_info.mci_pd_ops,
760 sc->sc_info.mci_pd_mix_support);
761
762 printf("%s: ecc_bucket %d pckg_prop %s\n",
763 DEVNAME(sc),
764 sc->sc_info.mci_ecc_bucket_count,
765 sc->sc_info.mci_package_version);
766
767 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
768 DEVNAME(sc),
769 sc->sc_info.mci_properties.mcp_seq_num,
770 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
771 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
772 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
773
774 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
775 DEVNAME(sc),
776 sc->sc_info.mci_properties.mcp_rebuild_rate,
777 sc->sc_info.mci_properties.mcp_patrol_read_rate,
778 sc->sc_info.mci_properties.mcp_bgi_rate,
779 sc->sc_info.mci_properties.mcp_cc_rate);
780
781 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
782 DEVNAME(sc),
783 sc->sc_info.mci_properties.mcp_recon_rate,
784 sc->sc_info.mci_properties.mcp_cache_flush_interval,
785 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
786 sc->sc_info.mci_properties.mcp_spinup_delay,
787 sc->sc_info.mci_properties.mcp_cluster_enable);
788
789 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
790 DEVNAME(sc),
791 sc->sc_info.mci_properties.mcp_coercion_mode,
792 sc->sc_info.mci_properties.mcp_alarm_enable,
793 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
794 sc->sc_info.mci_properties.mcp_disable_battery_warn,
795 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
796
797 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
798 DEVNAME(sc),
799 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
800 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
801 sc->sc_info.mci_properties.mcp_expose_encl_devices);
802
803 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
804 DEVNAME(sc),
805 sc->sc_info.mci_pci.mip_vendor,
806 sc->sc_info.mci_pci.mip_device,
807 sc->sc_info.mci_pci.mip_subvendor,
808 sc->sc_info.mci_pci.mip_subdevice);
809
810 printf("%s: type %#x port_count %d port_addr ",
811 DEVNAME(sc),
812 sc->sc_info.mci_host.mih_type,
813 sc->sc_info.mci_host.mih_port_count);
814
815 for (i = 0; i < 8; i++)
816 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
817 printf("\n");
818
819 printf("%s: type %.x port_count %d port_addr ",
820 DEVNAME(sc),
821 sc->sc_info.mci_device.mid_type,
822 sc->sc_info.mci_device.mid_port_count);
823
824 for (i = 0; i < 8; i++) {
825 printf("%.0" PRIx64 " ",
826 sc->sc_info.mci_device.mid_port_addr[i]);
827 }
828 printf("\n");
829 #endif /* MFI_DEBUG */
830
831 return 0;
832 }
833
834 static int
835 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
836 {
837 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
838
839 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
840 sizeof(*stat), stat, NULL, cold ? true : false))
841 return MFI_BBU_UNKNOWN;
842 #ifdef MFI_DEBUG
843 printf("bbu type %d, voltage %d, current %d, temperature %d, "
844 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
845 stat->temperature, stat->fw_status);
846 printf("details: ");
847 switch(stat->battery_type) {
848 case MFI_BBU_TYPE_IBBU:
849 printf("guage %d relative charge %d charger state %d "
850 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
851 stat->detail.ibbu.relative_charge ,
852 stat->detail.ibbu.charger_system_state ,
853 stat->detail.ibbu.charger_system_ctrl);
854 printf("\tcurrent %d abs charge %d max error %d\n",
855 stat->detail.ibbu.charging_current ,
856 stat->detail.ibbu.absolute_charge ,
857 stat->detail.ibbu.max_error);
858 break;
859 case MFI_BBU_TYPE_BBU:
860 printf("guage %d relative charge %d charger state %d\n",
861 stat->detail.ibbu.gas_guage_status,
862 stat->detail.bbu.relative_charge ,
863 stat->detail.bbu.charger_status );
864 printf("\trem capacity %d fyll capacity %d SOH %d\n",
865 stat->detail.bbu.remaining_capacity ,
866 stat->detail.bbu.full_charge_capacity ,
867 stat->detail.bbu.is_SOH_good);
868 default:
869 printf("\n");
870 }
871 #endif
872 switch(stat->battery_type) {
873 case MFI_BBU_TYPE_BBU:
874 return (stat->detail.bbu.is_SOH_good ?
875 MFI_BBU_GOOD : MFI_BBU_BAD);
876 case MFI_BBU_TYPE_NONE:
877 return MFI_BBU_UNKNOWN;
878 default:
879 if (stat->fw_status &
880 (MFI_BBU_STATE_PACK_MISSING |
881 MFI_BBU_STATE_VOLTAGE_LOW |
882 MFI_BBU_STATE_TEMPERATURE_HIGH |
883 MFI_BBU_STATE_LEARN_CYC_FAIL |
884 MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
885 MFI_BBU_STATE_I2C_ERR_DETECT))
886 return MFI_BBU_BAD;
887 return MFI_BBU_GOOD;
888 }
889 }
890
891 static void
892 mfiminphys(struct buf *bp)
893 {
894 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
895
896 /* XXX currently using MFI_MAXFER = MAXPHYS */
897 if (bp->b_bcount > MFI_MAXFER)
898 bp->b_bcount = MFI_MAXFER;
899 minphys(bp);
900 }
901
902 int
903 mfi_rescan(device_t self, const char *ifattr, const int *locators)
904 {
905 struct mfi_softc *sc = device_private(self);
906
907 if (sc->sc_child != NULL)
908 return 0;
909
910 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
911 scsiprint, NULL);
912
913 return 0;
914 }
915
916 void
917 mfi_childdetached(device_t self, device_t child)
918 {
919 struct mfi_softc *sc = device_private(self);
920
921 KASSERT(self == sc->sc_dev);
922 KASSERT(child == sc->sc_child);
923
924 if (child == sc->sc_child)
925 sc->sc_child = NULL;
926 }
927
928 int
929 mfi_detach(struct mfi_softc *sc, int flags)
930 {
931 int error;
932
933 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
934
935 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
936 return error;
937
938 #if NBIO > 0
939 mfi_destroy_sensors(sc);
940 bio_unregister(sc->sc_dev);
941 #endif /* NBIO > 0 */
942
943 mfi_intr_disable(sc);
944 mfi_shutdown(sc->sc_dev, 0);
945
946 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
947 workqueue_destroy(sc->sc_ldsync_wq);
948 mfi_put_ccb(sc->sc_ldsync_ccb);
949 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
950 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
951 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
952 }
953
954 if ((error = mfi_destroy_ccb(sc)) != 0)
955 return error;
956
957 mfi_freemem(sc, &sc->sc_sense);
958
959 mfi_freemem(sc, &sc->sc_frames);
960
961 mfi_freemem(sc, &sc->sc_pcq);
962
963 return 0;
964 }
965
966 static bool
967 mfi_shutdown(device_t dev, int how)
968 {
969 struct mfi_softc *sc = device_private(dev);
970 uint8_t mbox[MFI_MBOX_SIZE];
971 int s = splbio();
972 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
973 if (sc->sc_running) {
974 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
975 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
976 MFI_DATA_NONE, 0, NULL, mbox, true)) {
977 aprint_error_dev(dev, "shutdown: cache flush failed\n");
978 goto fail;
979 }
980
981 mbox[0] = 0;
982 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
983 MFI_DATA_NONE, 0, NULL, mbox, true)) {
984 aprint_error_dev(dev, "shutdown: "
985 "firmware shutdown failed\n");
986 goto fail;
987 }
988 sc->sc_running = false;
989 }
990 splx(s);
991 return true;
992 fail:
993 splx(s);
994 return false;
995 }
996
997 static bool
998 mfi_suspend(device_t dev, const pmf_qual_t *q)
999 {
1000 /* XXX to be implemented */
1001 return false;
1002 }
1003
1004 static bool
1005 mfi_resume(device_t dev, const pmf_qual_t *q)
1006 {
1007 /* XXX to be implemented */
1008 return false;
1009 }
1010
1011 int
1012 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
1013 {
1014 struct scsipi_adapter *adapt = &sc->sc_adapt;
1015 struct scsipi_channel *chan = &sc->sc_chan;
1016 uint32_t status, frames, max_sgl;
1017 int i;
1018
1019 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
1020
1021 sc->sc_ioptype = iop;
1022
1023 switch (iop) {
1024 case MFI_IOP_XSCALE:
1025 sc->sc_iop = &mfi_iop_xscale;
1026 break;
1027 case MFI_IOP_PPC:
1028 sc->sc_iop = &mfi_iop_ppc;
1029 break;
1030 case MFI_IOP_GEN2:
1031 sc->sc_iop = &mfi_iop_gen2;
1032 break;
1033 case MFI_IOP_SKINNY:
1034 sc->sc_iop = &mfi_iop_skinny;
1035 break;
1036 case MFI_IOP_TBOLT:
1037 sc->sc_iop = &mfi_iop_tbolt;
1038 break;
1039 default:
1040 panic("%s: unknown iop %d", DEVNAME(sc), iop);
1041 }
1042
1043 if (mfi_transition_firmware(sc))
1044 return 1;
1045
1046 TAILQ_INIT(&sc->sc_ccb_freeq);
1047
1048 status = mfi_fw_state(sc);
1049 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
1050 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
1051 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1052 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1053 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1054 } else if (sc->sc_64bit_dma) {
1055 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1056 sc->sc_sgl_size = sizeof(struct mfi_sg64);
1057 } else {
1058 sc->sc_max_sgl = max_sgl;
1059 sc->sc_sgl_size = sizeof(struct mfi_sg32);
1060 }
1061 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
1062 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
1063
1064 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1065 uint32_t tb_mem_size;
1066 /* for Alignment */
1067 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;
1068
1069 tb_mem_size +=
1070 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
1071 sc->sc_reply_pool_size =
1072 ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
1073 tb_mem_size +=
1074 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
1075
1076 /* this is for SGL's */
1077 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
1078 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
1079 if (sc->sc_tbolt_reqmsgpool == NULL) {
1080 aprint_error_dev(sc->sc_dev,
1081 "unable to allocate thunderbolt "
1082 "request message pool\n");
1083 goto nopcq;
1084 }
1085 if (mfi_tbolt_init_desc_pool(sc)) {
1086 aprint_error_dev(sc->sc_dev,
1087 "Thunderbolt pool preparation error\n");
1088 goto nopcq;
1089 }
1090
1091 /*
1092 * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
1093 * we are taking it diffrent from what we have allocated for
1094 * Request and reply descriptors to avoid confusion later
1095 */
1096 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
1097 sizeof(struct mpi2_ioc_init_request));
1098 if (sc->sc_tbolt_ioc_init == NULL) {
1099 aprint_error_dev(sc->sc_dev,
1100 "unable to allocate thunderbolt IOC init memory");
1101 goto nopcq;
1102 }
1103
1104 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
1105 MEGASAS_MAX_NAME*sizeof(bus_addr_t));
1106 if (sc->sc_tbolt_verbuf == NULL) {
1107 aprint_error_dev(sc->sc_dev,
1108 "unable to allocate thunderbolt version buffer\n");
1109 goto nopcq;
1110 }
1111
1112 }
1113 /* consumer/producer and reply queue memory */
1114 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
1115 sizeof(struct mfi_prod_cons));
1116 if (sc->sc_pcq == NULL) {
1117 aprint_error_dev(sc->sc_dev,
1118 "unable to allocate reply queue memory\n");
1119 goto nopcq;
1120 }
1121 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1122 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1123 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1124
1125 /* frame memory */
1126 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
1127 MFI_FRAME_SIZE + 1;
1128 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
1129 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
1130 if (sc->sc_frames == NULL) {
1131 aprint_error_dev(sc->sc_dev,
1132 "unable to allocate frame memory\n");
1133 goto noframe;
1134 }
1135 /* XXX hack, fix this */
1136 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
1137 aprint_error_dev(sc->sc_dev,
1138 "improper frame alignment (%#llx) FIXME\n",
1139 (long long int)MFIMEM_DVA(sc->sc_frames));
1140 goto noframe;
1141 }
1142
1143 /* sense memory */
1144 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
1145 if (sc->sc_sense == NULL) {
1146 aprint_error_dev(sc->sc_dev,
1147 "unable to allocate sense memory\n");
1148 goto nosense;
1149 }
1150
1151 /* now that we have all memory bits go initialize ccbs */
1152 if (mfi_init_ccb(sc)) {
1153 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
1154 goto noinit;
1155 }
1156
1157 /* kickstart firmware with all addresses and pointers */
1158 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1159 if (mfi_tbolt_init_MFI_queue(sc)) {
1160 aprint_error_dev(sc->sc_dev,
1161 "could not initialize firmware\n");
1162 goto noinit;
1163 }
1164 } else {
1165 if (mfi_initialize_firmware(sc)) {
1166 aprint_error_dev(sc->sc_dev,
1167 "could not initialize firmware\n");
1168 goto noinit;
1169 }
1170 }
1171 sc->sc_running = true;
1172
1173 if (mfi_get_info(sc)) {
1174 aprint_error_dev(sc->sc_dev,
1175 "could not retrieve controller information\n");
1176 goto noinit;
1177 }
1178 aprint_normal_dev(sc->sc_dev,
1179 "%s version %s\n",
1180 sc->sc_info.mci_product_name,
1181 sc->sc_info.mci_package_version);
1182
1183
1184 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
1185 sc->sc_info.mci_lds_present,
1186 sc->sc_info.mci_memory_size);
1187 sc->sc_bbuok = false;
1188 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
1189 struct mfi_bbu_status bbu_stat;
1190 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
1191 aprint_normal("BBU type ");
1192 switch (bbu_stat.battery_type) {
1193 case MFI_BBU_TYPE_BBU:
1194 aprint_normal("BBU");
1195 break;
1196 case MFI_BBU_TYPE_IBBU:
1197 aprint_normal("IBBU");
1198 break;
1199 default:
1200 aprint_normal("unknown type %d", bbu_stat.battery_type);
1201 }
1202 aprint_normal(", status ");
1203 switch(mfi_bbu_status) {
1204 case MFI_BBU_GOOD:
1205 aprint_normal("good\n");
1206 sc->sc_bbuok = true;
1207 break;
1208 case MFI_BBU_BAD:
1209 aprint_normal("bad\n");
1210 break;
1211 case MFI_BBU_UNKNOWN:
1212 aprint_normal("unknown\n");
1213 break;
1214 default:
1215 panic("mfi_bbu_status");
1216 }
1217 } else {
1218 aprint_normal("BBU not present\n");
1219 }
1220
1221 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
1222 sc->sc_max_ld = sc->sc_ld_cnt;
1223 for (i = 0; i < sc->sc_ld_cnt; i++)
1224 sc->sc_ld[i].ld_present = 1;
1225
1226 memset(adapt, 0, sizeof(*adapt));
1227 adapt->adapt_dev = sc->sc_dev;
1228 adapt->adapt_nchannels = 1;
1229 /* keep a few commands for management */
1230 if (sc->sc_max_cmds > 4)
1231 adapt->adapt_openings = sc->sc_max_cmds - 4;
1232 else
1233 adapt->adapt_openings = sc->sc_max_cmds;
1234 adapt->adapt_max_periph = adapt->adapt_openings;
1235 adapt->adapt_request = mfi_scsipi_request;
1236 adapt->adapt_minphys = mfiminphys;
1237
1238 memset(chan, 0, sizeof(*chan));
1239 chan->chan_adapter = adapt;
1240 chan->chan_bustype = &scsi_sas_bustype;
1241 chan->chan_channel = 0;
1242 chan->chan_flags = 0;
1243 chan->chan_nluns = 8;
1244 chan->chan_ntargets = MFI_MAX_LD;
1245 chan->chan_id = MFI_MAX_LD;
1246
1247 mfi_rescan(sc->sc_dev, "scsi", NULL);
1248
1249 /* enable interrupts */
1250 mfi_intr_enable(sc);
1251
1252 #if NBIO > 0
1253 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
1254 panic("%s: controller registration failed", DEVNAME(sc));
1255 if (mfi_create_sensors(sc) != 0)
1256 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
1257 #endif /* NBIO > 0 */
1258 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
1259 mfi_shutdown)) {
1260 aprint_error_dev(sc->sc_dev,
1261 "couldn't establish power handler\n");
1262 }
1263
1264 return 0;
1265 noinit:
1266 mfi_freemem(sc, &sc->sc_sense);
1267 nosense:
1268 mfi_freemem(sc, &sc->sc_frames);
1269 noframe:
1270 mfi_freemem(sc, &sc->sc_pcq);
1271 nopcq:
1272 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1273 if (sc->sc_tbolt_reqmsgpool)
1274 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
1275 if (sc->sc_tbolt_verbuf)
1276 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
1277 }
1278 return 1;
1279 }
1280
1281 static int
1282 mfi_poll(struct mfi_ccb *ccb)
1283 {
1284 struct mfi_softc *sc = ccb->ccb_sc;
1285 struct mfi_frame_header *hdr;
1286 int to = 0;
1287 int rv = 0;
1288
1289 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
1290
1291 hdr = &ccb->ccb_frame->mfr_header;
1292 hdr->mfh_cmd_status = 0xff;
1293 if (!sc->sc_MFA_enabled)
1294 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1295
1296 /* no callback, caller is supposed to do the cleanup */
1297 ccb->ccb_done = NULL;
1298
1299 mfi_post(sc, ccb);
1300 if (sc->sc_MFA_enabled) {
1301 /*
1302 * depending on the command type, result may be posted
1303 * to *hdr, or not. In addition it seems there's
1304 * no way to avoid posting the SMID to the reply queue.
1305 * So pool using the interrupt routine.
1306 */
1307 while (ccb->ccb_state != MFI_CCB_DONE) {
1308 delay(1000);
1309 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1310 rv = 1;
1311 break;
1312 }
1313 mfi_tbolt_intrh(sc);
1314 }
1315 } else {
1316 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1317 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1318 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1319
1320 while (hdr->mfh_cmd_status == 0xff) {
1321 delay(1000);
1322 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1323 rv = 1;
1324 break;
1325 }
1326 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1327 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1328 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1329 }
1330 }
1331 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1332 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1333 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1334
1335 if (ccb->ccb_data != NULL) {
1336 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1337 DEVNAME(sc));
1338 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1339 ccb->ccb_dmamap->dm_mapsize,
1340 (ccb->ccb_direction & MFI_DATA_IN) ?
1341 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1342
1343 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1344 }
1345
1346 if (rv != 0) {
1347 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
1348 hdr->mfh_context);
1349 ccb->ccb_flags |= MFI_CCB_F_ERR;
1350 return 1;
1351 }
1352
1353 return 0;
1354 }
1355
1356 int
1357 mfi_intr(void *arg)
1358 {
1359 struct mfi_softc *sc = arg;
1360 struct mfi_prod_cons *pcq;
1361 struct mfi_ccb *ccb;
1362 uint32_t producer, consumer, ctx;
1363 int claimed = 0;
1364
1365 if (!mfi_my_intr(sc))
1366 return 0;
1367
1368 pcq = MFIMEM_KVA(sc->sc_pcq);
1369
1370 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
1371 (u_long)sc, (u_long)pcq);
1372
1373 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1374 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1375 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1376
1377 producer = pcq->mpc_producer;
1378 consumer = pcq->mpc_consumer;
1379
1380 while (consumer != producer) {
1381 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1382 DEVNAME(sc), producer, consumer);
1383
1384 ctx = pcq->mpc_reply_q[consumer];
1385 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1386 if (ctx == MFI_INVALID_CTX)
1387 aprint_error_dev(sc->sc_dev,
1388 "invalid context, p: %d c: %d\n",
1389 producer, consumer);
1390 else {
1391 /* XXX remove from queue and call scsi_done */
1392 ccb = &sc->sc_ccb[ctx];
1393 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1394 DEVNAME(sc), ctx);
1395 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1396 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1397 sc->sc_frames_size,
1398 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1399 ccb->ccb_done(ccb);
1400
1401 claimed = 1;
1402 }
1403 consumer++;
1404 if (consumer == (sc->sc_max_cmds + 1))
1405 consumer = 0;
1406 }
1407
1408 pcq->mpc_consumer = consumer;
1409 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1410 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1411 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412
1413 return claimed;
1414 }
1415
1416 static int
1417 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
1418 uint32_t blockcnt)
1419 {
1420 struct scsipi_periph *periph = xs->xs_periph;
1421 struct mfi_io_frame *io;
1422
1423 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
1424 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1425 periph->periph_target);
1426
1427 if (!xs->data)
1428 return 1;
1429
1430 io = &ccb->ccb_frame->mfr_io;
1431 if (xs->xs_control & XS_CTL_DATA_IN) {
1432 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1433 ccb->ccb_direction = MFI_DATA_IN;
1434 } else {
1435 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1436 ccb->ccb_direction = MFI_DATA_OUT;
1437 }
1438 io->mif_header.mfh_target_id = periph->periph_target;
1439 io->mif_header.mfh_timeout = 0;
1440 io->mif_header.mfh_flags = 0;
1441 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1442 io->mif_header.mfh_data_len= blockcnt;
1443 io->mif_lba_hi = (blockno >> 32);
1444 io->mif_lba_lo = (blockno & 0xffffffff);
1445 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1446 io->mif_sense_addr_hi = 0;
1447
1448 ccb->ccb_done = mfi_scsi_ld_done;
1449 ccb->ccb_xs = xs;
1450 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1451 ccb->ccb_sgl = &io->mif_sgl;
1452 ccb->ccb_data = xs->data;
1453 ccb->ccb_len = xs->datalen;
1454
1455 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1456 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1457 return 1;
1458
1459 return 0;
1460 }
1461
1462 static void
1463 mfi_scsi_ld_done(struct mfi_ccb *ccb)
1464 {
1465 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1466 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
1467 }
1468
1469 static void
1470 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
1471 {
1472 struct scsipi_xfer *xs = ccb->ccb_xs;
1473 struct mfi_softc *sc = ccb->ccb_sc;
1474
1475 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1476 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1477
1478 if (xs->data != NULL) {
1479 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1480 DEVNAME(sc));
1481 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1482 ccb->ccb_dmamap->dm_mapsize,
1483 (xs->xs_control & XS_CTL_DATA_IN) ?
1484 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1485
1486 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1487 }
1488
1489 if (status != MFI_STAT_OK) {
1490 xs->error = XS_DRIVER_STUFFUP;
1491 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1492 DEVNAME(sc), status);
1493
1494 if (scsi_status != 0) {
1495 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1496 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1497 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1498 DNPRINTF(MFI_D_INTR,
1499 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1500 DEVNAME(sc), scsi_status,
1501 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1502 memset(&xs->sense, 0, sizeof(xs->sense));
1503 memcpy(&xs->sense, ccb->ccb_sense,
1504 sizeof(struct scsi_sense_data));
1505 xs->error = XS_SENSE;
1506 }
1507 } else {
1508 xs->error = XS_NOERROR;
1509 xs->status = SCSI_OK;
1510 xs->resid = 0;
1511 }
1512
1513 mfi_put_ccb(ccb);
1514 scsipi_done(xs);
1515 }
1516
1517 static int
1518 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1519 {
1520 struct mfi_pass_frame *pf;
1521 struct scsipi_periph *periph = xs->xs_periph;
1522
1523 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1524 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1525 periph->periph_target);
1526
1527 pf = &ccb->ccb_frame->mfr_pass;
1528 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1529 pf->mpf_header.mfh_target_id = periph->periph_target;
1530 pf->mpf_header.mfh_lun_id = 0;
1531 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1532 pf->mpf_header.mfh_timeout = 0;
1533 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1534 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1535
1536 pf->mpf_sense_addr_hi = 0;
1537 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1538
1539 memset(pf->mpf_cdb, 0, 16);
1540 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1541
1542 ccb->ccb_done = mfi_scsi_ld_done;
1543 ccb->ccb_xs = xs;
1544 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1545 ccb->ccb_sgl = &pf->mpf_sgl;
1546
1547 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1548 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1549 MFI_DATA_IN : MFI_DATA_OUT;
1550 else
1551 ccb->ccb_direction = MFI_DATA_NONE;
1552
1553 if (xs->data) {
1554 ccb->ccb_data = xs->data;
1555 ccb->ccb_len = xs->datalen;
1556
1557 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1558 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1559 return 1;
1560 }
1561
1562 return 0;
1563 }
1564
1565 static void
1566 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1567 void *arg)
1568 {
1569 struct scsipi_periph *periph;
1570 struct scsipi_xfer *xs;
1571 struct scsipi_adapter *adapt = chan->chan_adapter;
1572 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1573 struct mfi_ccb *ccb;
1574 struct scsi_rw_6 *rw;
1575 struct scsipi_rw_10 *rwb;
1576 struct scsipi_rw_12 *rw12;
1577 struct scsipi_rw_16 *rw16;
1578 uint64_t blockno;
1579 uint32_t blockcnt;
1580 uint8_t target;
1581 uint8_t mbox[MFI_MBOX_SIZE];
1582 int s;
1583
1584 switch (req) {
1585 case ADAPTER_REQ_GROW_RESOURCES:
1586 /* Not supported. */
1587 return;
1588 case ADAPTER_REQ_SET_XFER_MODE:
1589 {
1590 struct scsipi_xfer_mode *xm = arg;
1591 xm->xm_mode = PERIPH_CAP_TQING;
1592 xm->xm_period = 0;
1593 xm->xm_offset = 0;
1594 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
1595 return;
1596 }
1597 case ADAPTER_REQ_RUN_XFER:
1598 break;
1599 }
1600
1601 xs = arg;
1602
1603 periph = xs->xs_periph;
1604 target = periph->periph_target;
1605
1606 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
1607 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
1608 periph->periph_target, periph->periph_lun);
1609
1610 s = splbio();
1611 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1612 periph->periph_lun != 0) {
1613 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1614 DEVNAME(sc), target);
1615 xs->error = XS_SELTIMEOUT;
1616 scsipi_done(xs);
1617 splx(s);
1618 return;
1619 }
1620 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
1621 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
1622 /* the cache is stable storage, don't flush */
1623 xs->error = XS_NOERROR;
1624 xs->status = SCSI_OK;
1625 xs->resid = 0;
1626 scsipi_done(xs);
1627 splx(s);
1628 return;
1629 }
1630
1631 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1632 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1633 xs->error = XS_RESOURCE_SHORTAGE;
1634 scsipi_done(xs);
1635 splx(s);
1636 return;
1637 }
1638
1639 switch (xs->cmd->opcode) {
1640 /* IO path */
1641 case READ_16:
1642 case WRITE_16:
1643 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1644 blockno = _8btol(rw16->addr);
1645 blockcnt = _4btol(rw16->length);
1646 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1647 goto stuffup;
1648 }
1649 break;
1650
1651 case READ_12:
1652 case WRITE_12:
1653 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1654 blockno = _4btol(rw12->addr);
1655 blockcnt = _4btol(rw12->length);
1656 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1657 goto stuffup;
1658 }
1659 break;
1660
1661 case READ_10:
1662 case WRITE_10:
1663 rwb = (struct scsipi_rw_10 *)xs->cmd;
1664 blockno = _4btol(rwb->addr);
1665 blockcnt = _2btol(rwb->length);
1666 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1667 goto stuffup;
1668 }
1669 break;
1670
1671 case SCSI_READ_6_COMMAND:
1672 case SCSI_WRITE_6_COMMAND:
1673 rw = (struct scsi_rw_6 *)xs->cmd;
1674 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1675 blockcnt = rw->length ? rw->length : 0x100;
1676 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1677 goto stuffup;
1678 }
1679 break;
1680
1681 case SCSI_SYNCHRONIZE_CACHE_10:
1682 case SCSI_SYNCHRONIZE_CACHE_16:
1683 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1684 if (mfi_mgmt(ccb, xs,
1685 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1686 goto stuffup;
1687 }
1688 break;
1689
1690 /* hand it of to the firmware and let it deal with it */
1691 case SCSI_TEST_UNIT_READY:
1692 /* save off sd? after autoconf */
1693 if (!cold) /* XXX bogus */
1694 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1695 sizeof(sc->sc_ld[target].ld_dev));
1696 /* FALLTHROUGH */
1697
1698 default:
1699 if (mfi_scsi_ld(ccb, xs)) {
1700 goto stuffup;
1701 }
1702 break;
1703 }
1704
1705 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1706
1707 if (xs->xs_control & XS_CTL_POLL) {
1708 if (mfi_poll(ccb)) {
1709 /* XXX check for sense in ccb->ccb_sense? */
1710 aprint_error_dev(sc->sc_dev,
1711 "mfi_scsipi_request poll failed\n");
1712 memset(&xs->sense, 0, sizeof(xs->sense));
1713 xs->sense.scsi_sense.response_code =
1714 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1715 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1716 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1717 xs->error = XS_SENSE;
1718 xs->status = SCSI_CHECK;
1719 } else {
1720 DNPRINTF(MFI_D_DMA,
1721 "%s: mfi_scsipi_request poll complete %d\n",
1722 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1723 xs->error = XS_NOERROR;
1724 xs->status = SCSI_OK;
1725 xs->resid = 0;
1726 }
1727 mfi_put_ccb(ccb);
1728 scsipi_done(xs);
1729 splx(s);
1730 return;
1731 }
1732
1733 mfi_post(sc, ccb);
1734
1735 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1736 ccb->ccb_dmamap->dm_nsegs);
1737
1738 splx(s);
1739 return;
1740
1741 stuffup:
1742 mfi_put_ccb(ccb);
1743 xs->error = XS_DRIVER_STUFFUP;
1744 scsipi_done(xs);
1745 splx(s);
1746 }
1747
1748 static int
1749 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1750 {
1751 struct mfi_softc *sc = ccb->ccb_sc;
1752 struct mfi_frame_header *hdr;
1753 bus_dma_segment_t *sgd;
1754 union mfi_sgl *sgl;
1755 int error, i;
1756
1757 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1758 (u_long)ccb->ccb_data);
1759
1760 if (!ccb->ccb_data)
1761 return 1;
1762
1763 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
1764 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
1765 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1766 if (error) {
1767 if (error == EFBIG) {
1768 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
1769 sc->sc_max_sgl);
1770 } else {
1771 aprint_error_dev(sc->sc_dev,
1772 "error %d loading dma map\n", error);
1773 }
1774 return 1;
1775 }
1776
1777 hdr = &ccb->ccb_frame->mfr_header;
1778 sgl = ccb->ccb_sgl;
1779 sgd = ccb->ccb_dmamap->dm_segs;
1780 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1781 if (sc->sc_ioptype == MFI_IOP_TBOLT &&
1782 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
1783 hdr->mfh_cmd == MFI_CMD_LD_READ ||
1784 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
1785 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
1786 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
1787 sgl->sg_ieee[i].flags = 0;
1788 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1789 PRIx32 "\n",
1790 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1791 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1792 } else if (sc->sc_64bit_dma) {
1793 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1794 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1795 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1796 PRIx32 "\n",
1797 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1798 hdr->mfh_flags |= MFI_FRAME_SGL64;
1799 } else {
1800 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1801 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1802 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1803 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1804 hdr->mfh_flags |= MFI_FRAME_SGL32;
1805 }
1806 }
1807
1808 if (ccb->ccb_direction == MFI_DATA_IN) {
1809 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1810 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1811 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1812 } else {
1813 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1814 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1815 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1816 }
1817
1818 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1819 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1820 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1821
1822 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1823 " dm_nsegs: %d extra_frames: %d\n",
1824 DEVNAME(sc),
1825 hdr->mfh_sg_count,
1826 ccb->ccb_frame_size,
1827 sc->sc_frames_size,
1828 ccb->ccb_dmamap->dm_nsegs,
1829 ccb->ccb_extra_frames);
1830
1831 return 0;
1832 }
1833
1834 static int
1835 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1836 uint32_t len, void *buf, uint8_t *mbox, bool poll)
1837 {
1838 struct mfi_ccb *ccb;
1839 int rv = 1;
1840
1841 if ((ccb = mfi_get_ccb(sc)) == NULL)
1842 return rv;
1843 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1844 if (rv)
1845 return rv;
1846
1847 if (poll) {
1848 rv = 1;
1849 if (mfi_poll(ccb))
1850 goto done;
1851 } else {
1852 mfi_post(sc, ccb);
1853
1854 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1855 DEVNAME(sc));
1856 while (ccb->ccb_state != MFI_CCB_DONE)
1857 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1858
1859 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1860 goto done;
1861 }
1862 rv = 0;
1863
1864 done:
1865 mfi_put_ccb(ccb);
1866 return rv;
1867 }
1868
1869 static int
1870 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1871 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1872 {
1873 struct mfi_dcmd_frame *dcmd;
1874
1875 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1876
1877 dcmd = &ccb->ccb_frame->mfr_dcmd;
1878 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1879 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1880 dcmd->mdf_header.mfh_timeout = 0;
1881
1882 dcmd->mdf_opcode = opc;
1883 dcmd->mdf_header.mfh_data_len = 0;
1884 ccb->ccb_direction = dir;
1885 ccb->ccb_xs = xs;
1886 ccb->ccb_done = mfi_mgmt_done;
1887
1888 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1889
1890 /* handle special opcodes */
1891 if (mbox)
1892 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1893
1894 if (dir != MFI_DATA_NONE) {
1895 dcmd->mdf_header.mfh_data_len = len;
1896 ccb->ccb_data = buf;
1897 ccb->ccb_len = len;
1898 ccb->ccb_sgl = &dcmd->mdf_sgl;
1899
1900 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1901 return 1;
1902 }
1903 return 0;
1904 }
1905
1906 static void
1907 mfi_mgmt_done(struct mfi_ccb *ccb)
1908 {
1909 struct scsipi_xfer *xs = ccb->ccb_xs;
1910 struct mfi_softc *sc = ccb->ccb_sc;
1911 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1912
1913 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1914 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1915
1916 if (ccb->ccb_data != NULL) {
1917 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1918 DEVNAME(sc));
1919 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1920 ccb->ccb_dmamap->dm_mapsize,
1921 (ccb->ccb_direction & MFI_DATA_IN) ?
1922 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1923
1924 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1925 }
1926
1927 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1928 ccb->ccb_flags |= MFI_CCB_F_ERR;
1929
1930 ccb->ccb_state = MFI_CCB_DONE;
1931 if (xs) {
1932 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1933 xs->error = XS_DRIVER_STUFFUP;
1934 } else {
1935 xs->error = XS_NOERROR;
1936 xs->status = SCSI_OK;
1937 xs->resid = 0;
1938 }
1939 mfi_put_ccb(ccb);
1940 scsipi_done(xs);
1941 } else
1942 wakeup(ccb);
1943 }
1944
1945 #if NBIO > 0
1946 int
1947 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1948 {
1949 struct mfi_softc *sc = device_private(dev);
1950 int error = 0;
1951 int s;
1952
1953 KERNEL_LOCK(1, curlwp);
1954 s = splbio();
1955
1956 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1957
1958 switch (cmd) {
1959 case BIOCINQ:
1960 DNPRINTF(MFI_D_IOCTL, "inq\n");
1961 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1962 break;
1963
1964 case BIOCVOL:
1965 DNPRINTF(MFI_D_IOCTL, "vol\n");
1966 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1967 break;
1968
1969 case BIOCDISK:
1970 DNPRINTF(MFI_D_IOCTL, "disk\n");
1971 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1972 break;
1973
1974 case BIOCALARM:
1975 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1976 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1977 break;
1978
1979 case BIOCBLINK:
1980 DNPRINTF(MFI_D_IOCTL, "blink\n");
1981 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1982 break;
1983
1984 case BIOCSETSTATE:
1985 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1986 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1987 break;
1988
1989 default:
1990 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1991 error = EINVAL;
1992 }
1993 splx(s);
1994 KERNEL_UNLOCK_ONE(curlwp);
1995
1996 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1997 return error;
1998 }
1999
2000 static int
2001 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
2002 {
2003 struct mfi_conf *cfg;
2004 int rv = EINVAL;
2005
2006 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
2007
2008 if (mfi_get_info(sc)) {
2009 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
2010 DEVNAME(sc));
2011 return EIO;
2012 }
2013
2014 /* get figures */
2015 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2016 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2017 sizeof *cfg, cfg, NULL, false))
2018 goto freeme;
2019
2020 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2021 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2022 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2023
2024 rv = 0;
2025 freeme:
2026 free(cfg, M_DEVBUF);
2027 return rv;
2028 }
2029
2030 static int
2031 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
2032 {
2033 int i, per, rv = EINVAL;
2034 uint8_t mbox[MFI_MBOX_SIZE];
2035
2036 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
2037 DEVNAME(sc), bv->bv_volid);
2038
2039 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
2040 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
2041 goto done;
2042
2043 i = bv->bv_volid;
2044 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2045 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
2046 DEVNAME(sc), mbox[0]);
2047
2048 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
2049 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false))
2050 goto done;
2051
2052 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2053 /* go do hotspares */
2054 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2055 goto done;
2056 }
2057
2058 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
2059
2060 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2061 case MFI_LD_OFFLINE:
2062 bv->bv_status = BIOC_SVOFFLINE;
2063 break;
2064
2065 case MFI_LD_PART_DEGRADED:
2066 case MFI_LD_DEGRADED:
2067 bv->bv_status = BIOC_SVDEGRADED;
2068 break;
2069
2070 case MFI_LD_ONLINE:
2071 bv->bv_status = BIOC_SVONLINE;
2072 break;
2073
2074 default:
2075 bv->bv_status = BIOC_SVINVALID;
2076 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
2077 DEVNAME(sc),
2078 sc->sc_ld_list.mll_list[i].mll_state);
2079 }
2080
2081 /* additional status can modify MFI status */
2082 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
2083 case MFI_LD_PROG_CC:
2084 case MFI_LD_PROG_BGI:
2085 bv->bv_status = BIOC_SVSCRUB;
2086 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
2087 bv->bv_percent = (per * 100) / 0xffff;
2088 bv->bv_seconds =
2089 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
2090 break;
2091
2092 case MFI_LD_PROG_FGI:
2093 case MFI_LD_PROG_RECONSTRUCT:
2094 /* nothing yet */
2095 break;
2096 }
2097
2098 /*
2099 * The RAID levels are determined per the SNIA DDF spec, this is only
2100 * a subset that is valid for the MFI contrller.
2101 */
2102 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
2103 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
2104 MFI_DDF_SRL_SPANNED)
2105 bv->bv_level *= 10;
2106
2107 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
2108 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
2109
2110 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
2111
2112 rv = 0;
2113 done:
2114 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
2115 DEVNAME(sc), rv);
2116 return rv;
2117 }
2118
2119 static int
2120 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
2121 {
2122 struct mfi_conf *cfg;
2123 struct mfi_array *ar;
2124 struct mfi_ld_cfg *ld;
2125 struct mfi_pd_details *pd;
2126 struct scsipi_inquiry_data *inqbuf;
2127 char vend[8+16+4+1];
2128 int i, rv = EINVAL;
2129 int arr, vol, disk;
2130 uint32_t size;
2131 uint8_t mbox[MFI_MBOX_SIZE];
2132
2133 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
2134 DEVNAME(sc), bd->bd_diskid);
2135
2136 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2137
2138 /* send single element command to retrieve size for full structure */
2139 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2140 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2141 sizeof *cfg, cfg, NULL, false))
2142 goto freeme;
2143
2144 size = cfg->mfc_size;
2145 free(cfg, M_DEVBUF);
2146
2147 /* memory for read config */
2148 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2149 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2150 size, cfg, NULL, false))
2151 goto freeme;
2152
2153 ar = cfg->mfc_array;
2154
2155 /* calculate offset to ld structure */
2156 ld = (struct mfi_ld_cfg *)(
2157 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2158 cfg->mfc_array_size * cfg->mfc_no_array);
2159
2160 vol = bd->bd_volid;
2161
2162 if (vol >= cfg->mfc_no_ld) {
2163 /* do hotspares */
2164 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
2165 goto freeme;
2166 }
2167
2168 /* find corresponding array for ld */
2169 for (i = 0, arr = 0; i < vol; i++)
2170 arr += ld[i].mlc_parm.mpa_span_depth;
2171
2172 /* offset disk into pd list */
2173 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
2174
2175 /* offset array index into the next spans */
2176 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
2177
2178 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
2179 switch (ar[arr].pd[disk].mar_pd_state){
2180 case MFI_PD_UNCONFIG_GOOD:
2181 bd->bd_status = BIOC_SDUNUSED;
2182 break;
2183
2184 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
2185 bd->bd_status = BIOC_SDHOTSPARE;
2186 break;
2187
2188 case MFI_PD_OFFLINE:
2189 bd->bd_status = BIOC_SDOFFLINE;
2190 break;
2191
2192 case MFI_PD_FAILED:
2193 bd->bd_status = BIOC_SDFAILED;
2194 break;
2195
2196 case MFI_PD_REBUILD:
2197 bd->bd_status = BIOC_SDREBUILD;
2198 break;
2199
2200 case MFI_PD_ONLINE:
2201 bd->bd_status = BIOC_SDONLINE;
2202 break;
2203
2204 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
2205 default:
2206 bd->bd_status = BIOC_SDINVALID;
2207 break;
2208
2209 }
2210
2211 /* get the remaining fields */
2212 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
2213 memset(pd, 0, sizeof(*pd));
2214 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2215 sizeof *pd, pd, mbox, false))
2216 goto freeme;
2217
2218 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
2219
2220 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
2221 bd->bd_channel = pd->mpd_enc_idx;
2222
2223 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2224 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
2225 vend[sizeof vend - 1] = '\0';
2226 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
2227
2228 /* XXX find a way to retrieve serial nr from drive */
2229 /* XXX find a way to get bd_procdev */
2230
2231 rv = 0;
2232 freeme:
2233 free(pd, M_DEVBUF);
2234 free(cfg, M_DEVBUF);
2235
2236 return rv;
2237 }
2238
2239 static int
2240 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
2241 {
2242 uint32_t opc, dir = MFI_DATA_NONE;
2243 int rv = 0;
2244 int8_t ret;
2245
2246 switch(ba->ba_opcode) {
2247 case BIOC_SADISABLE:
2248 opc = MR_DCMD_SPEAKER_DISABLE;
2249 break;
2250
2251 case BIOC_SAENABLE:
2252 opc = MR_DCMD_SPEAKER_ENABLE;
2253 break;
2254
2255 case BIOC_SASILENCE:
2256 opc = MR_DCMD_SPEAKER_SILENCE;
2257 break;
2258
2259 case BIOC_GASTATUS:
2260 opc = MR_DCMD_SPEAKER_GET;
2261 dir = MFI_DATA_IN;
2262 break;
2263
2264 case BIOC_SATEST:
2265 opc = MR_DCMD_SPEAKER_TEST;
2266 break;
2267
2268 default:
2269 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
2270 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
2271 return EINVAL;
2272 }
2273
2274 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
2275 rv = EINVAL;
2276 else
2277 if (ba->ba_opcode == BIOC_GASTATUS)
2278 ba->ba_status = ret;
2279 else
2280 ba->ba_status = 0;
2281
2282 return rv;
2283 }
2284
2285 static int
2286 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
2287 {
2288 int i, found, rv = EINVAL;
2289 uint8_t mbox[MFI_MBOX_SIZE];
2290 uint32_t cmd;
2291 struct mfi_pd_list *pd;
2292
2293 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
2294 bb->bb_status);
2295
2296 /* channel 0 means not in an enclosure so can't be blinked */
2297 if (bb->bb_channel == 0)
2298 return EINVAL;
2299
2300 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2301
2302 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2303 MFI_PD_LIST_SIZE, pd, NULL, false))
2304 goto done;
2305
2306 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2307 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
2308 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
2309 found = 1;
2310 break;
2311 }
2312
2313 if (!found)
2314 goto done;
2315
2316 memset(mbox, 0, sizeof mbox);
2317
2318 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2319
2320 switch (bb->bb_status) {
2321 case BIOC_SBUNBLINK:
2322 cmd = MR_DCMD_PD_UNBLINK;
2323 break;
2324
2325 case BIOC_SBBLINK:
2326 cmd = MR_DCMD_PD_BLINK;
2327 break;
2328
2329 case BIOC_SBALARM:
2330 default:
2331 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
2332 "opcode %x\n", DEVNAME(sc), bb->bb_status);
2333 goto done;
2334 }
2335
2336
2337 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false))
2338 goto done;
2339
2340 rv = 0;
2341 done:
2342 free(pd, M_DEVBUF);
2343 return rv;
2344 }
2345
2346 static int
2347 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2348 {
2349 struct mfi_pd_list *pd;
2350 int i, found, rv = EINVAL;
2351 uint8_t mbox[MFI_MBOX_SIZE];
2352
2353 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2354 bs->bs_status);
2355
2356 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2357
2358 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2359 MFI_PD_LIST_SIZE, pd, NULL, false))
2360 goto done;
2361
2362 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2363 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2364 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2365 found = 1;
2366 break;
2367 }
2368
2369 if (!found)
2370 goto done;
2371
2372 memset(mbox, 0, sizeof mbox);
2373
2374 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2375
2376 switch (bs->bs_status) {
2377 case BIOC_SSONLINE:
2378 mbox[2] = MFI_PD_ONLINE;
2379 break;
2380
2381 case BIOC_SSOFFLINE:
2382 mbox[2] = MFI_PD_OFFLINE;
2383 break;
2384
2385 case BIOC_SSHOTSPARE:
2386 mbox[2] = MFI_PD_HOTSPARE;
2387 break;
2388 /*
2389 case BIOC_SSREBUILD:
2390 break;
2391 */
2392 default:
2393 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2394 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2395 goto done;
2396 }
2397
2398
2399 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
2400 0, NULL, mbox, false))
2401 goto done;
2402
2403 rv = 0;
2404 done:
2405 free(pd, M_DEVBUF);
2406 return rv;
2407 }
2408
2409 static int
2410 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2411 {
2412 struct mfi_conf *cfg;
2413 struct mfi_hotspare *hs;
2414 struct mfi_pd_details *pd;
2415 struct bioc_disk *sdhs;
2416 struct bioc_vol *vdhs;
2417 struct scsipi_inquiry_data *inqbuf;
2418 char vend[8+16+4+1];
2419 int i, rv = EINVAL;
2420 uint32_t size;
2421 uint8_t mbox[MFI_MBOX_SIZE];
2422
2423 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2424
2425 if (!bio_hs)
2426 return EINVAL;
2427
2428 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2429
2430 /* send single element command to retrieve size for full structure */
2431 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2432 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2433 sizeof *cfg, cfg, NULL, false))
2434 goto freeme;
2435
2436 size = cfg->mfc_size;
2437 free(cfg, M_DEVBUF);
2438
2439 /* memory for read config */
2440 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2441 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2442 size, cfg, NULL, false))
2443 goto freeme;
2444
2445 /* calculate offset to hs structure */
2446 hs = (struct mfi_hotspare *)(
2447 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2448 cfg->mfc_array_size * cfg->mfc_no_array +
2449 cfg->mfc_ld_size * cfg->mfc_no_ld);
2450
2451 if (volid < cfg->mfc_no_ld)
2452 goto freeme; /* not a hotspare */
2453
2454 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2455 goto freeme; /* not a hotspare */
2456
2457 /* offset into hotspare structure */
2458 i = volid - cfg->mfc_no_ld;
2459
2460 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2461 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2462 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2463
2464 /* get pd fields */
2465 memset(mbox, 0, sizeof mbox);
2466 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2467 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2468 sizeof *pd, pd, mbox, false)) {
2469 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2470 DEVNAME(sc));
2471 goto freeme;
2472 }
2473
2474 switch (type) {
2475 case MFI_MGMT_VD:
2476 vdhs = bio_hs;
2477 vdhs->bv_status = BIOC_SVONLINE;
2478 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2479 vdhs->bv_level = -1; /* hotspare */
2480 vdhs->bv_nodisk = 1;
2481 break;
2482
2483 case MFI_MGMT_SD:
2484 sdhs = bio_hs;
2485 sdhs->bd_status = BIOC_SDHOTSPARE;
2486 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2487 sdhs->bd_channel = pd->mpd_enc_idx;
2488 sdhs->bd_target = pd->mpd_enc_slot;
2489 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2490 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2491 vend[sizeof vend - 1] = '\0';
2492 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2493 break;
2494
2495 default:
2496 goto freeme;
2497 }
2498
2499 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2500 rv = 0;
2501 freeme:
2502 free(pd, M_DEVBUF);
2503 free(cfg, M_DEVBUF);
2504
2505 return rv;
2506 }
2507
2508 static int
2509 mfi_destroy_sensors(struct mfi_softc *sc)
2510 {
2511 if (sc->sc_sme == NULL)
2512 return 0;
2513 sysmon_envsys_unregister(sc->sc_sme);
2514 sc->sc_sme = NULL;
2515 free(sc->sc_sensor, M_DEVBUF);
2516 return 0;
2517 }
2518
2519 static int
2520 mfi_create_sensors(struct mfi_softc *sc)
2521 {
2522 int i;
2523 int nsensors = sc->sc_ld_cnt + 1;
2524 int rv;
2525
2526 sc->sc_sme = sysmon_envsys_create();
2527 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2528 M_DEVBUF, M_NOWAIT | M_ZERO);
2529 if (sc->sc_sensor == NULL) {
2530 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
2531 return ENOMEM;
2532 }
2533
2534 /* BBU */
2535 sc->sc_sensor[0].units = ENVSYS_INDICATOR;
2536 sc->sc_sensor[0].state = ENVSYS_SINVALID;
2537 sc->sc_sensor[0].value_cur = 0;
2538 /* Enable monitoring for BBU state changes, if present */
2539 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
2540 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
2541 snprintf(sc->sc_sensor[0].desc,
2542 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
2543 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
2544 goto out;
2545
2546 for (i = 1; i < nsensors; i++) {
2547 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2548 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2549 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2550 /* Enable monitoring for drive state changes */
2551 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2552 /* logical drives */
2553 snprintf(sc->sc_sensor[i].desc,
2554 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2555 DEVNAME(sc), i - 1);
2556 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2557 &sc->sc_sensor[i]))
2558 goto out;
2559 }
2560
2561 sc->sc_sme->sme_name = DEVNAME(sc);
2562 sc->sc_sme->sme_cookie = sc;
2563 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2564 rv = sysmon_envsys_register(sc->sc_sme);
2565 if (rv != 0) {
2566 aprint_error_dev(sc->sc_dev,
2567 "unable to register with sysmon (rv = %d)\n", rv);
2568 goto out;
2569 }
2570 return 0;
2571
2572 out:
2573 free(sc->sc_sensor, M_DEVBUF);
2574 sysmon_envsys_destroy(sc->sc_sme);
2575 sc->sc_sme = NULL;
2576 return EINVAL;
2577 }
2578
2579 static void
2580 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2581 {
2582 struct mfi_softc *sc = sme->sme_cookie;
2583 struct bioc_vol bv;
2584 int s;
2585 int error;
2586
2587 if (edata->sensor >= sc->sc_ld_cnt + 1)
2588 return;
2589
2590 if (edata->sensor == 0) {
2591 /* BBU */
2592 struct mfi_bbu_status bbu_stat;
2593 int bbu_status;
2594 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
2595 return;
2596
2597 KERNEL_LOCK(1, curlwp);
2598 s = splbio();
2599 bbu_status = mfi_get_bbu(sc, &bbu_stat);
2600 splx(s);
2601 KERNEL_UNLOCK_ONE(curlwp);
2602 switch(bbu_status) {
2603 case MFI_BBU_GOOD:
2604 edata->value_cur = 1;
2605 edata->state = ENVSYS_SVALID;
2606 if (!sc->sc_bbuok)
2607 aprint_normal_dev(sc->sc_dev,
2608 "BBU state changed to good\n");
2609 sc->sc_bbuok = true;
2610 break;
2611 case MFI_BBU_BAD:
2612 edata->value_cur = 0;
2613 edata->state = ENVSYS_SCRITICAL;
2614 if (sc->sc_bbuok)
2615 aprint_normal_dev(sc->sc_dev,
2616 "BBU state changed to bad\n");
2617 sc->sc_bbuok = false;
2618 break;
2619 case MFI_BBU_UNKNOWN:
2620 default:
2621 edata->value_cur = 0;
2622 edata->state = ENVSYS_SINVALID;
2623 sc->sc_bbuok = false;
2624 break;
2625 }
2626 return;
2627 }
2628
2629 memset(&bv, 0, sizeof(bv));
2630 bv.bv_volid = edata->sensor - 1;
2631 KERNEL_LOCK(1, curlwp);
2632 s = splbio();
2633 error = mfi_ioctl_vol(sc, &bv);
2634 splx(s);
2635 KERNEL_UNLOCK_ONE(curlwp);
2636 if (error)
2637 return;
2638
2639 switch(bv.bv_status) {
2640 case BIOC_SVOFFLINE:
2641 edata->value_cur = ENVSYS_DRIVE_FAIL;
2642 edata->state = ENVSYS_SCRITICAL;
2643 break;
2644
2645 case BIOC_SVDEGRADED:
2646 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2647 edata->state = ENVSYS_SCRITICAL;
2648 break;
2649
2650 case BIOC_SVSCRUB:
2651 case BIOC_SVONLINE:
2652 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2653 edata->state = ENVSYS_SVALID;
2654 break;
2655
2656 case BIOC_SVINVALID:
2657 /* FALLTRHOUGH */
2658 default:
2659 edata->value_cur = 0; /* unknown */
2660 edata->state = ENVSYS_SINVALID;
2661 }
2662 }
2663
2664 #endif /* NBIO > 0 */
2665
2666 static uint32_t
2667 mfi_xscale_fw_state(struct mfi_softc *sc)
2668 {
2669 return mfi_read(sc, MFI_OMSG0);
2670 }
2671
2672 static void
2673 mfi_xscale_intr_dis(struct mfi_softc *sc)
2674 {
2675 mfi_write(sc, MFI_OMSK, 0);
2676 }
2677
2678 static void
2679 mfi_xscale_intr_ena(struct mfi_softc *sc)
2680 {
2681 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2682 }
2683
2684 static int
2685 mfi_xscale_intr(struct mfi_softc *sc)
2686 {
2687 uint32_t status;
2688
2689 status = mfi_read(sc, MFI_OSTS);
2690 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2691 return 0;
2692
2693 /* write status back to acknowledge interrupt */
2694 mfi_write(sc, MFI_OSTS, status);
2695 return 1;
2696 }
2697
2698 static void
2699 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2700 {
2701 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2702 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2703 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2704 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2705 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2706 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2707
2708 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2709 ccb->ccb_extra_frames);
2710 ccb->ccb_state = MFI_CCB_RUNNING;
2711 }
2712
2713 static uint32_t
2714 mfi_ppc_fw_state(struct mfi_softc *sc)
2715 {
2716 return mfi_read(sc, MFI_OSP);
2717 }
2718
2719 static void
2720 mfi_ppc_intr_dis(struct mfi_softc *sc)
2721 {
2722 /* Taking a wild guess --dyoung */
2723 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2724 mfi_write(sc, MFI_ODC, 0xffffffff);
2725 }
2726
2727 static void
2728 mfi_ppc_intr_ena(struct mfi_softc *sc)
2729 {
2730 mfi_write(sc, MFI_ODC, 0xffffffff);
2731 mfi_write(sc, MFI_OMSK, ~0x80000004);
2732 }
2733
2734 static int
2735 mfi_ppc_intr(struct mfi_softc *sc)
2736 {
2737 uint32_t status;
2738
2739 status = mfi_read(sc, MFI_OSTS);
2740 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2741 return 0;
2742
2743 /* write status back to acknowledge interrupt */
2744 mfi_write(sc, MFI_ODC, status);
2745 return 1;
2746 }
2747
2748 static void
2749 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2750 {
2751 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2752 (ccb->ccb_extra_frames << 1));
2753 ccb->ccb_state = MFI_CCB_RUNNING;
2754 }
2755
2756 u_int32_t
2757 mfi_gen2_fw_state(struct mfi_softc *sc)
2758 {
2759 return (mfi_read(sc, MFI_OSP));
2760 }
2761
2762 void
2763 mfi_gen2_intr_dis(struct mfi_softc *sc)
2764 {
2765 mfi_write(sc, MFI_OMSK, 0xffffffff);
2766 mfi_write(sc, MFI_ODC, 0xffffffff);
2767 }
2768
2769 void
2770 mfi_gen2_intr_ena(struct mfi_softc *sc)
2771 {
2772 mfi_write(sc, MFI_ODC, 0xffffffff);
2773 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2774 }
2775
2776 int
2777 mfi_gen2_intr(struct mfi_softc *sc)
2778 {
2779 u_int32_t status;
2780
2781 status = mfi_read(sc, MFI_OSTS);
2782 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2783 return (0);
2784
2785 /* write status back to acknowledge interrupt */
2786 mfi_write(sc, MFI_ODC, status);
2787
2788 return (1);
2789 }
2790
2791 void
2792 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2793 {
2794 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2795 (ccb->ccb_extra_frames << 1));
2796 ccb->ccb_state = MFI_CCB_RUNNING;
2797 }
2798
2799 u_int32_t
2800 mfi_skinny_fw_state(struct mfi_softc *sc)
2801 {
2802 return (mfi_read(sc, MFI_OSP));
2803 }
2804
2805 void
2806 mfi_skinny_intr_dis(struct mfi_softc *sc)
2807 {
2808 mfi_write(sc, MFI_OMSK, 0);
2809 }
2810
2811 void
2812 mfi_skinny_intr_ena(struct mfi_softc *sc)
2813 {
2814 mfi_write(sc, MFI_OMSK, ~0x00000001);
2815 }
2816
2817 int
2818 mfi_skinny_intr(struct mfi_softc *sc)
2819 {
2820 u_int32_t status;
2821
2822 status = mfi_read(sc, MFI_OSTS);
2823 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2824 return (0);
2825
2826 /* write status back to acknowledge interrupt */
2827 mfi_write(sc, MFI_OSTS, status);
2828
2829 return (1);
2830 }
2831
2832 void
2833 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2834 {
2835 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2836 (ccb->ccb_extra_frames << 1));
2837 mfi_write(sc, MFI_IQPH, 0x00000000);
2838 ccb->ccb_state = MFI_CCB_RUNNING;
2839 }
2840
2841 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
2842
2843 void
2844 mfi_tbolt_intr_ena(struct mfi_softc *sc)
2845 {
2846 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
2847 mfi_read(sc, MFI_OMSK);
2848 }
2849
2850 void
2851 mfi_tbolt_intr_dis(struct mfi_softc *sc)
2852 {
2853 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
2854 mfi_read(sc, MFI_OMSK);
2855 }
2856
2857 int
2858 mfi_tbolt_intr(struct mfi_softc *sc)
2859 {
2860 int32_t status;
2861
2862 status = mfi_read(sc, MFI_OSTS);
2863
2864 if (ISSET(status, 0x1)) {
2865 mfi_write(sc, MFI_OSTS, status);
2866 mfi_read(sc, MFI_OSTS);
2867 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
2868 return 0;
2869 return 1;
2870 }
2871 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
2872 return 0;
2873 mfi_read(sc, MFI_OSTS);
2874 return 1;
2875 }
2876
2877 u_int32_t
2878 mfi_tbolt_fw_state(struct mfi_softc *sc)
2879 {
2880 return mfi_read(sc, MFI_OSP);
2881 }
2882
2883 void
2884 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2885 {
2886 if (sc->sc_MFA_enabled) {
2887 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
2888 mfi_tbolt_build_mpt_ccb(ccb);
2889 mfi_write(sc, MFI_IQPL,
2890 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
2891 mfi_write(sc, MFI_IQPH,
2892 ccb->ccb_tb_request_desc.words >> 32);
2893 ccb->ccb_state = MFI_CCB_RUNNING;
2894 return;
2895 }
2896 uint64_t bus_add = ccb->ccb_pframe;
2897 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
2898 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2899 mfi_write(sc, MFI_IQPL, bus_add);
2900 mfi_write(sc, MFI_IQPH, bus_add >> 32);
2901 ccb->ccb_state = MFI_CCB_RUNNING;
2902 }
2903
2904 static void
2905 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
2906 {
2907 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
2908 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
2909 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
2910
2911 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2912 io_req->SGLOffset0 =
2913 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
2914 io_req->ChainOffset =
2915 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
2916
2917 mpi25_ieee_chain =
2918 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
2919 mpi25_ieee_chain->Address = ccb->ccb_pframe;
2920
2921 /*
2922 In MFI pass thru, nextChainOffset will always be zero to
2923 indicate the end of the chain.
2924 */
2925 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
2926 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2927
2928 /* setting the length to the maximum length */
2929 mpi25_ieee_chain->Length = 1024;
2930
2931 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2932 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2933 ccb->ccb_flags |= MFI_CCB_F_TBOLT;
2934 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
2935 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2936 ccb->ccb_tb_pio_request -
2937 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2938 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
2939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2940 }
2941
2942 /*
2943 * Description:
2944 * This function will prepare message pools for the Thunderbolt controller
2945 */
2946 static int
2947 mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
2948 {
2949 uint32_t offset = 0;
2950 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2951
2952 /* Request Decriptors alignment restrictions */
2953 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2954
2955 /* Skip request message pool */
2956 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
2957
2958 /* Reply Frame Pool is initialized */
2959 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
2960 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2961
2962 offset = (uintptr_t)sc->sc_reply_frame_pool
2963 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2964 sc->sc_reply_frame_busaddr =
2965 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
2966
2967 /* initializing reply address to 0xFFFFFFFF */
2968 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
2969 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
2970
2971 /* Skip Reply Frame Pool */
2972 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2973 sc->sc_reply_pool_limit = (void *)addr;
2974
2975 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2976 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
2977
2978 /* initialize the last_reply_idx to 0 */
2979 sc->sc_last_reply_idx = 0;
2980 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
2981 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
2982 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
2983 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
2984 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
2985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2986 return 0;
2987 }
2988
2989 /*
2990 * This routine prepare and issue INIT2 frame to the Firmware
2991 */
2992
2993 static int
2994 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
2995 {
2996 struct mpi2_ioc_init_request *mpi2IocInit;
2997 struct mfi_init_frame *mfi_init;
2998 struct mfi_ccb *ccb;
2999 bus_addr_t phyAddress;
3000 mfi_address *mfiAddressTemp;
3001 int s;
3002 char *verbuf;
3003 char wqbuf[10];
3004
3005 /* Check if initialization is already completed */
3006 if (sc->sc_MFA_enabled) {
3007 return 1;
3008 }
3009
3010 mpi2IocInit =
3011 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
3012
3013 s = splbio();
3014 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3015 splx(s);
3016 return (EBUSY);
3017 }
3018
3019
3020 mfi_init = &ccb->ccb_frame->mfr_init;
3021
3022 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
3023 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
3024 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3025
3026 /* set MsgVersion and HeaderVersion host driver was built with */
3027 mpi2IocInit->MsgVersion = MPI2_VERSION;
3028 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
3029 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
3030 mpi2IocInit->ReplyDescriptorPostQueueDepth =
3031 (uint16_t)sc->sc_reply_pool_size;
3032 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
3033
3034 /* Get physical address of reply frame pool */
3035 phyAddress = sc->sc_reply_frame_busaddr;
3036 mfiAddressTemp =
3037 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
3038 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3039 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3040
3041 /* Get physical address of request message pool */
3042 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
3043 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
3044 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3045 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3046
3047 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
3048 mpi2IocInit->TimeStamp = time_uptime;
3049
3050 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
3051 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
3052 MEGASAS_VERSION);
3053 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3054 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
3055 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
3056 mfi_init->driver_ver_hi =
3057 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
3058
3059 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3060 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3061 BUS_DMASYNC_PREWRITE);
3062 /* Get the physical address of the mpi2 ioc init command */
3063 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init);
3064 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
3065 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
3066
3067 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
3068 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
3069 if (mfi_poll(ccb) != 0) {
3070 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
3071 "command at 0x%" PRIx64 "\n",
3072 (uint64_t)ccb->ccb_pframe);
3073 splx(s);
3074 return 1;
3075 }
3076 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3077 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3078 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3079 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3080 BUS_DMASYNC_POSTWRITE);
3081 mfi_put_ccb(ccb);
3082 splx(s);
3083
3084 if (mfi_init->mif_header.mfh_cmd_status == 0) {
3085 sc->sc_MFA_enabled = 1;
3086 }
3087 else {
3088 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
3089 mfi_init->mif_header.mfh_cmd_status);
3090 return 1;
3091 }
3092
3093 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
3094 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
3095 sc, PRIBIO, IPL_BIO, 0) != 0) {
3096 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
3097 return 1;
3098 }
3099 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3100 return 0;
3101 }
3102
3103 int
3104 mfi_tbolt_intrh(void *arg)
3105 {
3106 struct mfi_softc *sc = arg;
3107 struct mfi_ccb *ccb;
3108 union mfi_mpi2_reply_descriptor *desc;
3109 int smid, num_completed;
3110
3111 if (!mfi_tbolt_intr(sc))
3112 return 0;
3113
3114 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
3115 (u_long)sc, (u_long)sc->sc_last_reply_idx);
3116
3117 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
3118
3119 desc = (union mfi_mpi2_reply_descriptor *)
3120 ((uintptr_t)sc->sc_reply_frame_pool +
3121 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3122
3123 bus_dmamap_sync(sc->sc_dmat,
3124 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3125 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3126 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3127 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3128 num_completed = 0;
3129 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
3130 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
3131 smid = desc->header.SMID;
3132 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
3133 ccb = &sc->sc_ccb[smid - 1];
3134 DNPRINTF(MFI_D_INTR,
3135 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
3136 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
3137 sc->sc_last_reply_idx, desc->words, ccb);
3138 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
3139 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
3140 ccb->ccb_tb_io_request->ChainOffset != 0) {
3141 bus_dmamap_sync(sc->sc_dmat,
3142 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3143 ccb->ccb_tb_psg_frame -
3144 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3145 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD);
3146 }
3147 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
3148 bus_dmamap_sync(sc->sc_dmat,
3149 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3150 ccb->ccb_tb_pio_request -
3151 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3152 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3153 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3154 }
3155 if (ccb->ccb_done)
3156 ccb->ccb_done(ccb);
3157 else
3158 ccb->ccb_state = MFI_CCB_DONE;
3159 sc->sc_last_reply_idx++;
3160 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
3161 sc->sc_last_reply_idx = 0;
3162 }
3163 desc->words = ~0x0;
3164 /* Get the next reply descriptor */
3165 desc = (union mfi_mpi2_reply_descriptor *)
3166 ((uintptr_t)sc->sc_reply_frame_pool +
3167 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3168 num_completed++;
3169 }
3170 if (num_completed == 0)
3171 return 0;
3172
3173 bus_dmamap_sync(sc->sc_dmat,
3174 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3175 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3176 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3177 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3178 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
3179 return 1;
3180 }
3181
3182
3183 int
3184 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
3185 uint64_t blockno, uint32_t blockcnt)
3186 {
3187 struct scsipi_periph *periph = xs->xs_periph;
3188 struct mfi_mpi2_request_raid_scsi_io *io_req;
3189 int sge_count;
3190
3191 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
3192 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
3193 periph->periph_target);
3194
3195 if (!xs->data)
3196 return 1;
3197
3198 ccb->ccb_done = mfi_tbolt_scsi_ld_done;
3199 ccb->ccb_xs = xs;
3200 ccb->ccb_data = xs->data;
3201 ccb->ccb_len = xs->datalen;
3202
3203 io_req = ccb->ccb_tb_io_request;
3204
3205 /* Just the CDB length,rest of the Flags are zero */
3206 io_req->IoFlags = xs->cmdlen;
3207 memset(io_req->CDB.CDB32, 0, 32);
3208 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
3209
3210 io_req->RaidContext.TargetID = periph->periph_target;
3211 io_req->RaidContext.Status = 0;
3212 io_req->RaidContext.exStatus = 0;
3213 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
3214 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
3215 io_req->DevHandle = periph->periph_target;
3216
3217 ccb->ccb_tb_request_desc.header.RequestFlags =
3218 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3219 io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
3220
3221 if (xs->xs_control & XS_CTL_DATA_IN) {
3222 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
3223 ccb->ccb_direction = MFI_DATA_IN;
3224 } else {
3225 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
3226 ccb->ccb_direction = MFI_DATA_OUT;
3227 }
3228
3229 sge_count = mfi_tbolt_create_sgl(ccb,
3230 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
3231 );
3232 if (sge_count < 0)
3233 return 1;
3234 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
3235 io_req->RaidContext.numSGE = sge_count;
3236 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3237 io_req->SGLOffset0 =
3238 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
3239
3240 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
3241 io_req->SenseBufferLength = MFI_SENSE_SIZE;
3242
3243 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
3244 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
3245 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3246 ccb->ccb_tb_pio_request -
3247 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3248 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3250
3251 return 0;
3252 }
3253
3254
3255 static void
3256 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
3257 {
3258 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
3259 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
3260 io_req->RaidContext.exStatus);
3261 }
3262
3263 static int
3264 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
3265 {
3266 struct mfi_softc *sc = ccb->ccb_sc;
3267 bus_dma_segment_t *sgd;
3268 int error, i, sge_idx, sge_count;
3269 struct mfi_mpi2_request_raid_scsi_io *io_req;
3270 struct mpi25_ieee_sge_chain64 *sgl_ptr;
3271
3272 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
3273 (u_long)ccb->ccb_data);
3274
3275 if (!ccb->ccb_data)
3276 return -1;
3277
3278 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
3279 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
3280 ccb->ccb_data, ccb->ccb_len, NULL, flags);
3281 if (error) {
3282 if (error == EFBIG)
3283 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
3284 sc->sc_max_sgl);
3285 else
3286 aprint_error_dev(sc->sc_dev,
3287 "error %d loading dma map\n", error);
3288 return -1;
3289 }
3290
3291 io_req = ccb->ccb_tb_io_request;
3292 sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
3293 sge_count = ccb->ccb_dmamap->dm_nsegs;
3294 sgd = ccb->ccb_dmamap->dm_segs;
3295 KASSERT(sge_count <= sc->sc_max_sgl);
3296 KASSERT(sge_count <=
3297 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
3298 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
3299
3300 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
3301 /* One element to store the chain info */
3302 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
3303 DNPRINTF(MFI_D_DMA,
3304 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n",
3305 sge_idx, sge_count, ccb->ccb_tb_pio_request);
3306 } else {
3307 sge_idx = sge_count;
3308 }
3309
3310 for (i = 0; i < sge_idx; i++) {
3311 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3312 sgl_ptr->Length = htole32(sgd[i].ds_len);
3313 sgl_ptr->Flags = 0;
3314 if (sge_idx < sge_count) {
3315 DNPRINTF(MFI_D_DMA,
3316 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3317 " flags 0x%x\n", sgl_ptr, i,
3318 sgl_ptr->Address, sgl_ptr->Length,
3319 sgl_ptr->Flags);
3320 }
3321 sgl_ptr++;
3322 }
3323 io_req->ChainOffset = 0;
3324 if (sge_idx < sge_count) {
3325 struct mpi25_ieee_sge_chain64 *sg_chain;
3326 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
3327 sg_chain = sgl_ptr;
3328 /* Prepare chain element */
3329 sg_chain->NextChainOffset = 0;
3330 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3331 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
3332 sg_chain->Length = (sizeof(mpi2_sge_io_union) *
3333 (sge_count - sge_idx));
3334 sg_chain->Address = ccb->ccb_tb_psg_frame;
3335 DNPRINTF(MFI_D_DMA,
3336 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
3337 " flags 0x%x\n", sg_chain, sg_chain->Address,
3338 sg_chain->Length, sg_chain->Flags);
3339 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
3340 for (; i < sge_count; i++) {
3341 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3342 sgl_ptr->Length = htole32(sgd[i].ds_len);
3343 sgl_ptr->Flags = 0;
3344 DNPRINTF(MFI_D_DMA,
3345 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3346 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
3347 sgl_ptr->Length, sgl_ptr->Flags);
3348 sgl_ptr++;
3349 }
3350 bus_dmamap_sync(sc->sc_dmat,
3351 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3352 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3353 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD);
3354 }
3355
3356 if (ccb->ccb_direction == MFI_DATA_IN) {
3357 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3358 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3359 } else {
3360 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3361 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
3362 }
3363 return sge_count;
3364 }
3365
3366 /*
3367 * The ThunderBolt HW has an option for the driver to directly
3368 * access the underlying disks and operate on the RAID. To
3369 * do this there needs to be a capability to keep the RAID controller
3370 * and driver in sync. The FreeBSD driver does not take advantage
3371 * of this feature since it adds a lot of complexity and slows down
3372 * performance. Performance is gained by using the controller's
3373 * cache etc.
3374 *
3375 * Even though this driver doesn't access the disks directly, an
3376 * AEN like command is used to inform the RAID firmware to "sync"
3377 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
3378 * command in write mode will return when the RAID firmware has
3379 * detected a change to the RAID state. Examples of this type
3380 * of change are removing a disk. Once the command returns then
3381 * the driver needs to acknowledge this and "sync" all LD's again.
3382 * This repeats until we shutdown. Then we need to cancel this
3383 * pending command.
3384 *
3385 * If this is not done right the RAID firmware will not remove a
3386 * pulled drive and the RAID won't go degraded etc. Effectively,
3387 * stopping any RAID mangement to functions.
3388 *
3389 * Doing another LD sync, requires the use of an event since the
3390 * driver needs to do a mfi_wait_command and can't do that in an
3391 * interrupt thread.
3392 *
3393 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
3394 * That requires a bunch of structure and it is simplier to just do
3395 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
3396 */
3397
3398 void
3399 mfi_tbolt_sync_map_info(struct work *w, void *v)
3400 {
3401 struct mfi_softc *sc = v;
3402 int i;
3403 struct mfi_ccb *ccb = NULL;
3404 uint8_t mbox[MFI_MBOX_SIZE];
3405 struct mfi_ld *ld_sync = NULL;
3406 size_t ld_size;
3407 int s;
3408
3409 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
3410 again:
3411 s = splbio();
3412 if (sc->sc_ldsync_ccb != NULL) {
3413 splx(s);
3414 return;
3415 }
3416
3417 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
3418 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
3419 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
3420 goto err;
3421 }
3422
3423 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
3424
3425 ld_sync = (struct mfi_ld *) malloc(ld_size, M_DEVBUF,
3426 M_WAITOK | M_ZERO);
3427 if (ld_sync == NULL) {
3428 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
3429 goto err;
3430 }
3431 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3432 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
3433 }
3434
3435 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3436 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
3437 free(ld_sync, M_DEVBUF);
3438 goto err;
3439 }
3440 sc->sc_ldsync_ccb = ccb;
3441
3442 memset(mbox, 0, MFI_MBOX_SIZE);
3443 mbox[0] = sc->sc_ld_list.mll_no_ld;
3444 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
3445 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
3446 ld_size, ld_sync, mbox)) {
3447 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
3448 goto err;
3449 }
3450 /*
3451 * we won't sleep on this command, so we have to override
3452 * the callback set up by mfi_mgmt()
3453 */
3454 ccb->ccb_done = mfi_sync_map_complete;
3455
3456 mfi_post(sc, ccb);
3457 splx(s);
3458 return;
3459
3460 err:
3461 if (ld_sync)
3462 free(ld_sync, M_DEVBUF);
3463 if (ccb)
3464 mfi_put_ccb(ccb);
3465 sc->sc_ldsync_ccb = NULL;
3466 splx(s);
3467 kpause("ldsyncp", 0, hz, NULL);
3468 goto again;
3469 }
3470
3471 static void
3472 mfi_sync_map_complete(struct mfi_ccb *ccb)
3473 {
3474 struct mfi_softc *sc = ccb->ccb_sc;
3475 bool aborted = !sc->sc_running;
3476
3477 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
3478 DEVNAME(ccb->ccb_sc));
3479 KASSERT(sc->sc_ldsync_ccb == ccb);
3480 mfi_mgmt_done(ccb);
3481 free(ccb->ccb_data, M_DEVBUF);
3482 if (ccb->ccb_flags & MFI_CCB_F_ERR) {
3483 aprint_error_dev(sc->sc_dev, "sync command failed\n");
3484 aborted = true;
3485 }
3486 mfi_put_ccb(ccb);
3487 sc->sc_ldsync_ccb = NULL;
3488
3489 /* set it up again so the driver can catch more events */
3490 if (!aborted) {
3491 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3492 }
3493 }
3494
3495 static int
3496 mfifopen(dev_t dev, int flag, int mode, struct lwp *l)
3497 {
3498 struct mfi_softc *sc;
3499
3500 if ((sc = device_lookup_private(&mfi_cd, minor(dev))) == NULL)
3501 return (ENXIO);
3502 return (0);
3503 }
3504
3505 static int
3506 mfifclose(dev_t dev, int flag, int mode, struct lwp *l)
3507 {
3508 return (0);
3509 }
3510
3511 static int
3512 mfifioctl(dev_t dev, u_long cmd, void *data, int flag,
3513 struct lwp *l)
3514 {
3515 struct mfi_softc *sc;
3516 struct mfi_ioc_packet *ioc = data;
3517 uint8_t *udata;
3518 struct mfi_ccb *ccb = NULL;
3519 int ctx, i, s, error;
3520 union mfi_sense_ptr sense_ptr;
3521
3522 switch(cmd) {
3523 case MFI_CMD:
3524 sc = device_lookup_private(&mfi_cd, ioc->mfi_adapter_no);
3525 break;
3526 default:
3527 return ENOTTY;
3528 }
3529 if (sc == NULL)
3530 return (ENXIO);
3531 if (sc->sc_opened)
3532 return (EBUSY);
3533
3534 switch(cmd) {
3535 case MFI_CMD:
3536 error = kauth_authorize_device_passthru(l->l_cred, dev,
3537 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
3538 if (error)
3539 return error;
3540 if (ioc->mfi_sge_count > MAX_IOCTL_SGE)
3541 return EINVAL;
3542 s = splbio();
3543 if ((ccb = mfi_get_ccb(sc)) == NULL)
3544 return ENOMEM;
3545 ccb->ccb_data = NULL;
3546 ctx = ccb->ccb_frame->mfr_header.mfh_context;
3547 memcpy(ccb->ccb_frame, ioc->mfi_frame.raw,
3548 sizeof(*ccb->ccb_frame));
3549 ccb->ccb_frame->mfr_header.mfh_context = ctx;
3550 ccb->ccb_frame->mfr_header.mfh_scsi_status = 0;
3551 ccb->ccb_frame->mfr_header.mfh_pad0 = 0;
3552 ccb->ccb_frame_size =
3553 (sizeof(union mfi_sgl) * ioc->mfi_sge_count) +
3554 ioc->mfi_sgl_off;
3555 if (ioc->mfi_sge_count > 0) {
3556 ccb->ccb_sgl = (union mfi_sgl *)
3557 &ccb->ccb_frame->mfr_bytes[ioc->mfi_sgl_off];
3558 }
3559 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_READ)
3560 ccb->ccb_direction = MFI_DATA_IN;
3561 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_WRITE)
3562 ccb->ccb_direction = MFI_DATA_OUT;
3563 ccb->ccb_len = ccb->ccb_frame->mfr_header.mfh_data_len;
3564 if (ccb->ccb_len > MAXPHYS) {
3565 error = ENOMEM;
3566 goto out;
3567 }
3568 if (ccb->ccb_len &&
3569 (ccb->ccb_direction & (MFI_DATA_IN | MFI_DATA_OUT)) != 0) {
3570 udata = malloc(ccb->ccb_len, M_DEVBUF, M_WAITOK|M_ZERO);
3571 if (udata == NULL) {
3572 error = ENOMEM;
3573 goto out;
3574 }
3575 ccb->ccb_data = udata;
3576 if (ccb->ccb_direction & MFI_DATA_OUT) {
3577 for (i = 0; i < ioc->mfi_sge_count; i++) {
3578 error = copyin(ioc->mfi_sgl[i].iov_base,
3579 udata, ioc->mfi_sgl[i].iov_len);
3580 if (error)
3581 goto out;
3582 udata = &udata[
3583 ioc->mfi_sgl[i].iov_len];
3584 }
3585 }
3586 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) {
3587 error = EIO;
3588 goto out;
3589 }
3590 }
3591 if (ccb->ccb_frame->mfr_header.mfh_cmd == MFI_CMD_PD_SCSI_IO) {
3592 ccb->ccb_frame->mfr_io.mif_sense_addr_lo =
3593 htole32(ccb->ccb_psense);
3594 ccb->ccb_frame->mfr_io.mif_sense_addr_hi = 0;
3595 }
3596 ccb->ccb_done = mfi_mgmt_done;
3597 mfi_post(sc, ccb);
3598 while (ccb->ccb_state != MFI_CCB_DONE)
3599 tsleep(ccb, PRIBIO, "mfi_fioc", 0);
3600
3601 if (ccb->ccb_direction & MFI_DATA_IN) {
3602 udata = ccb->ccb_data;
3603 for (i = 0; i < ioc->mfi_sge_count; i++) {
3604 error = copyout(udata,
3605 ioc->mfi_sgl[i].iov_base,
3606 ioc->mfi_sgl[i].iov_len);
3607 if (error)
3608 goto out;
3609 udata = &udata[
3610 ioc->mfi_sgl[i].iov_len];
3611 }
3612 }
3613 if (ioc->mfi_sense_len) {
3614 memcpy(&sense_ptr.sense_ptr_data[0],
3615 &ioc->mfi_frame.raw[ioc->mfi_sense_off],
3616 sizeof(sense_ptr.sense_ptr_data));
3617 error = copyout(ccb->ccb_sense,
3618 sense_ptr.user_space,
3619 sizeof(sense_ptr.sense_ptr_data));
3620 if (error)
3621 goto out;
3622 }
3623 memcpy(ioc->mfi_frame.raw, ccb->ccb_frame,
3624 sizeof(*ccb->ccb_frame));
3625 break;
3626 default:
3627 printf("mfifioctl unhandled cmd 0x%lx\n", cmd);
3628 return ENOTTY;
3629 }
3630
3631 out:
3632 if (ccb->ccb_data)
3633 free(ccb->ccb_data, M_DEVBUF);
3634 if (ccb)
3635 mfi_put_ccb(ccb);
3636 splx(s);
3637 return error;
3638 }
3639