mfi.c revision 1.42 1 /* $NetBSD: mfi.c,v 1.42 2012/08/05 22:47:36 bouyer Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.42 2012/08/05 22:47:36 bouyer Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt_internal(struct mfi_softc *,
89 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 static int mfi_ioctl(device_t, u_long, void *);
96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int mfi_ioctl_alarm(struct mfi_softc *,
100 struct bioc_alarm *);
101 static int mfi_ioctl_blink(struct mfi_softc *sc,
102 struct bioc_blink *);
103 static int mfi_ioctl_setstate(struct mfi_softc *,
104 struct bioc_setstate *);
105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int mfi_create_sensors(struct mfi_softc *);
107 static int mfi_destroy_sensors(struct mfi_softc *);
108 static void mfi_sensor_refresh(struct sysmon_envsys *,
109 envsys_data_t *);
110 #endif /* NBIO > 0 */
111
112 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
113 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
114 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
115 static int mfi_xscale_intr(struct mfi_softc *sc);
116 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
117
118 static const struct mfi_iop_ops mfi_iop_xscale = {
119 mfi_xscale_fw_state,
120 mfi_xscale_intr_dis,
121 mfi_xscale_intr_ena,
122 mfi_xscale_intr,
123 mfi_xscale_post
124 };
125
126 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
127 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
128 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
129 static int mfi_ppc_intr(struct mfi_softc *sc);
130 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
131
132 static const struct mfi_iop_ops mfi_iop_ppc = {
133 mfi_ppc_fw_state,
134 mfi_ppc_intr_dis,
135 mfi_ppc_intr_ena,
136 mfi_ppc_intr,
137 mfi_ppc_post
138 };
139
140 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
141 void mfi_gen2_intr_ena(struct mfi_softc *sc);
142 void mfi_gen2_intr_dis(struct mfi_softc *sc);
143 int mfi_gen2_intr(struct mfi_softc *sc);
144 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
145
146 static const struct mfi_iop_ops mfi_iop_gen2 = {
147 mfi_gen2_fw_state,
148 mfi_gen2_intr_dis,
149 mfi_gen2_intr_ena,
150 mfi_gen2_intr,
151 mfi_gen2_post
152 };
153
154 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
155 void mfi_skinny_intr_dis(struct mfi_softc *);
156 void mfi_skinny_intr_ena(struct mfi_softc *);
157 int mfi_skinny_intr(struct mfi_softc *);
158 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
159
160 static const struct mfi_iop_ops mfi_iop_skinny = {
161 mfi_skinny_fw_state,
162 mfi_skinny_intr_dis,
163 mfi_skinny_intr_ena,
164 mfi_skinny_intr,
165 mfi_skinny_post
166 };
167
168 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
169 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
170 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
171 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
172 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
173
174 static struct mfi_ccb *
175 mfi_get_ccb(struct mfi_softc *sc)
176 {
177 struct mfi_ccb *ccb;
178 int s;
179
180 s = splbio();
181 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
182 if (ccb) {
183 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
184 ccb->ccb_state = MFI_CCB_READY;
185 }
186 splx(s);
187
188 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
189
190 return ccb;
191 }
192
193 static void
194 mfi_put_ccb(struct mfi_ccb *ccb)
195 {
196 struct mfi_softc *sc = ccb->ccb_sc;
197 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
198 int s;
199
200 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
201
202 hdr->mfh_cmd_status = 0x0;
203 hdr->mfh_flags = 0x0;
204 ccb->ccb_state = MFI_CCB_FREE;
205 ccb->ccb_xs = NULL;
206 ccb->ccb_flags = 0;
207 ccb->ccb_done = NULL;
208 ccb->ccb_direction = 0;
209 ccb->ccb_frame_size = 0;
210 ccb->ccb_extra_frames = 0;
211 ccb->ccb_sgl = NULL;
212 ccb->ccb_data = NULL;
213 ccb->ccb_len = 0;
214
215 s = splbio();
216 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
217 splx(s);
218 }
219
220 static int
221 mfi_destroy_ccb(struct mfi_softc *sc)
222 {
223 struct mfi_ccb *ccb;
224 uint32_t i;
225
226 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
227
228
229 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
230 /* create a dma map for transfer */
231 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
232 }
233
234 if (i < sc->sc_max_cmds)
235 return EBUSY;
236
237 free(sc->sc_ccb, M_DEVBUF);
238
239 return 0;
240 }
241
242 static int
243 mfi_init_ccb(struct mfi_softc *sc)
244 {
245 struct mfi_ccb *ccb;
246 uint32_t i;
247 int error;
248
249 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
250
251 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
252 M_DEVBUF, M_WAITOK|M_ZERO);
253
254 for (i = 0; i < sc->sc_max_cmds; i++) {
255 ccb = &sc->sc_ccb[i];
256
257 ccb->ccb_sc = sc;
258
259 /* select i'th frame */
260 ccb->ccb_frame = (union mfi_frame *)
261 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
262 ccb->ccb_pframe =
263 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
264 ccb->ccb_frame->mfr_header.mfh_context = i;
265
266 /* select i'th sense */
267 ccb->ccb_sense = (struct mfi_sense *)
268 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
269 ccb->ccb_psense =
270 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
271
272 /* create a dma map for transfer */
273 error = bus_dmamap_create(sc->sc_dmat,
274 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
275 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
276 if (error) {
277 printf("%s: cannot create ccb dmamap (%d)\n",
278 DEVNAME(sc), error);
279 goto destroy;
280 }
281
282 DNPRINTF(MFI_D_CCB,
283 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
284 ccb->ccb_frame->mfr_header.mfh_context, ccb,
285 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
286 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
287 (u_long)ccb->ccb_dmamap);
288
289 /* add ccb to queue */
290 mfi_put_ccb(ccb);
291 }
292
293 return 0;
294 destroy:
295 /* free dma maps and ccb memory */
296 while (i) {
297 i--;
298 ccb = &sc->sc_ccb[i];
299 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
300 }
301
302 free(sc->sc_ccb, M_DEVBUF);
303
304 return 1;
305 }
306
307 static uint32_t
308 mfi_read(struct mfi_softc *sc, bus_size_t r)
309 {
310 uint32_t rv;
311
312 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
313 BUS_SPACE_BARRIER_READ);
314 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
315
316 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
317 return rv;
318 }
319
320 static void
321 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
322 {
323 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
324
325 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
326 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
327 BUS_SPACE_BARRIER_WRITE);
328 }
329
330 static struct mfi_mem *
331 mfi_allocmem(struct mfi_softc *sc, size_t size)
332 {
333 struct mfi_mem *mm;
334 int nsegs;
335
336 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
337 (long)size);
338
339 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
340 if (mm == NULL)
341 return NULL;
342
343 mm->am_size = size;
344
345 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
346 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
347 goto amfree;
348
349 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
350 &nsegs, BUS_DMA_NOWAIT) != 0)
351 goto destroy;
352
353 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
354 BUS_DMA_NOWAIT) != 0)
355 goto free;
356
357 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
358 BUS_DMA_NOWAIT) != 0)
359 goto unmap;
360
361 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
362 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
363
364 memset(mm->am_kva, 0, size);
365 return mm;
366
367 unmap:
368 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 free(mm, M_DEVBUF);
375
376 return NULL;
377 }
378
379 static void
380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
381 {
382 struct mfi_mem *mm = *mmp;
383
384 if (mm == NULL)
385 return;
386
387 *mmp = NULL;
388
389 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
390
391 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
392 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
393 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
394 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
395 free(mm, M_DEVBUF);
396 }
397
398 static int
399 mfi_transition_firmware(struct mfi_softc *sc)
400 {
401 uint32_t fw_state, cur_state;
402 int max_wait, i;
403
404 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
405
406 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
407 fw_state);
408
409 while (fw_state != MFI_STATE_READY) {
410 DNPRINTF(MFI_D_MISC,
411 "%s: waiting for firmware to become ready\n",
412 DEVNAME(sc));
413 cur_state = fw_state;
414 switch (fw_state) {
415 case MFI_STATE_FAULT:
416 printf("%s: firmware fault\n", DEVNAME(sc));
417 return 1;
418 case MFI_STATE_WAIT_HANDSHAKE:
419 if (sc->sc_ioptype == MFI_IOP_SKINNY)
420 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
421 else
422 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
423 max_wait = 2;
424 break;
425 case MFI_STATE_OPERATIONAL:
426 if (sc->sc_ioptype == MFI_IOP_SKINNY)
427 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
428 else
429 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
430 max_wait = 10;
431 break;
432 case MFI_STATE_UNDEFINED:
433 case MFI_STATE_BB_INIT:
434 max_wait = 2;
435 break;
436 case MFI_STATE_FW_INIT:
437 case MFI_STATE_DEVICE_SCAN:
438 case MFI_STATE_FLUSH_CACHE:
439 max_wait = 20;
440 break;
441 default:
442 printf("%s: unknown firmware state %d\n",
443 DEVNAME(sc), fw_state);
444 return 1;
445 }
446 for (i = 0; i < (max_wait * 10); i++) {
447 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
448 if (fw_state == cur_state)
449 DELAY(100000);
450 else
451 break;
452 }
453 if (fw_state == cur_state) {
454 printf("%s: firmware stuck in state %#x\n",
455 DEVNAME(sc), fw_state);
456 return 1;
457 }
458 }
459
460 return 0;
461 }
462
463 static int
464 mfi_initialize_firmware(struct mfi_softc *sc)
465 {
466 struct mfi_ccb *ccb;
467 struct mfi_init_frame *init;
468 struct mfi_init_qinfo *qinfo;
469
470 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
471
472 if ((ccb = mfi_get_ccb(sc)) == NULL)
473 return 1;
474
475 init = &ccb->ccb_frame->mfr_init;
476 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
477
478 memset(qinfo, 0, sizeof *qinfo);
479 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
480 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
481 offsetof(struct mfi_prod_cons, mpc_reply_q));
482 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
483 offsetof(struct mfi_prod_cons, mpc_producer));
484 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
485 offsetof(struct mfi_prod_cons, mpc_consumer));
486
487 init->mif_header.mfh_cmd = MFI_CMD_INIT;
488 init->mif_header.mfh_data_len = sizeof *qinfo;
489 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
490
491 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
492 DEVNAME(sc),
493 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
494 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
495
496 if (mfi_poll(ccb)) {
497 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
498 return 1;
499 }
500
501 mfi_put_ccb(ccb);
502
503 return 0;
504 }
505
506 static int
507 mfi_get_info(struct mfi_softc *sc)
508 {
509 #ifdef MFI_DEBUG
510 int i;
511 #endif
512 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
513
514 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
515 sizeof(sc->sc_info), &sc->sc_info, NULL))
516 return 1;
517
518 #ifdef MFI_DEBUG
519
520 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
521 printf("%s: active FW %s Version %s date %s time %s\n",
522 DEVNAME(sc),
523 sc->sc_info.mci_image_component[i].mic_name,
524 sc->sc_info.mci_image_component[i].mic_version,
525 sc->sc_info.mci_image_component[i].mic_build_date,
526 sc->sc_info.mci_image_component[i].mic_build_time);
527 }
528
529 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
530 printf("%s: pending FW %s Version %s date %s time %s\n",
531 DEVNAME(sc),
532 sc->sc_info.mci_pending_image_component[i].mic_name,
533 sc->sc_info.mci_pending_image_component[i].mic_version,
534 sc->sc_info.mci_pending_image_component[i].mic_build_date,
535 sc->sc_info.mci_pending_image_component[i].mic_build_time);
536 }
537
538 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
539 DEVNAME(sc),
540 sc->sc_info.mci_max_arms,
541 sc->sc_info.mci_max_spans,
542 sc->sc_info.mci_max_arrays,
543 sc->sc_info.mci_max_lds,
544 sc->sc_info.mci_product_name);
545
546 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
547 DEVNAME(sc),
548 sc->sc_info.mci_serial_number,
549 sc->sc_info.mci_hw_present,
550 sc->sc_info.mci_current_fw_time,
551 sc->sc_info.mci_max_cmds,
552 sc->sc_info.mci_max_sg_elements);
553
554 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
555 DEVNAME(sc),
556 sc->sc_info.mci_max_request_size,
557 sc->sc_info.mci_lds_present,
558 sc->sc_info.mci_lds_degraded,
559 sc->sc_info.mci_lds_offline,
560 sc->sc_info.mci_pd_present);
561
562 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
563 DEVNAME(sc),
564 sc->sc_info.mci_pd_disks_present,
565 sc->sc_info.mci_pd_disks_pred_failure,
566 sc->sc_info.mci_pd_disks_failed);
567
568 printf("%s: nvram %d mem %d flash %d\n",
569 DEVNAME(sc),
570 sc->sc_info.mci_nvram_size,
571 sc->sc_info.mci_memory_size,
572 sc->sc_info.mci_flash_size);
573
574 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
575 DEVNAME(sc),
576 sc->sc_info.mci_ram_correctable_errors,
577 sc->sc_info.mci_ram_uncorrectable_errors,
578 sc->sc_info.mci_cluster_allowed,
579 sc->sc_info.mci_cluster_active);
580
581 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
582 DEVNAME(sc),
583 sc->sc_info.mci_max_strips_per_io,
584 sc->sc_info.mci_raid_levels,
585 sc->sc_info.mci_adapter_ops,
586 sc->sc_info.mci_ld_ops);
587
588 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
589 DEVNAME(sc),
590 sc->sc_info.mci_stripe_sz_ops.min,
591 sc->sc_info.mci_stripe_sz_ops.max,
592 sc->sc_info.mci_pd_ops,
593 sc->sc_info.mci_pd_mix_support);
594
595 printf("%s: ecc_bucket %d pckg_prop %s\n",
596 DEVNAME(sc),
597 sc->sc_info.mci_ecc_bucket_count,
598 sc->sc_info.mci_package_version);
599
600 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
601 DEVNAME(sc),
602 sc->sc_info.mci_properties.mcp_seq_num,
603 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
604 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
605 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
606
607 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
608 DEVNAME(sc),
609 sc->sc_info.mci_properties.mcp_rebuild_rate,
610 sc->sc_info.mci_properties.mcp_patrol_read_rate,
611 sc->sc_info.mci_properties.mcp_bgi_rate,
612 sc->sc_info.mci_properties.mcp_cc_rate);
613
614 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
615 DEVNAME(sc),
616 sc->sc_info.mci_properties.mcp_recon_rate,
617 sc->sc_info.mci_properties.mcp_cache_flush_interval,
618 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
619 sc->sc_info.mci_properties.mcp_spinup_delay,
620 sc->sc_info.mci_properties.mcp_cluster_enable);
621
622 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
623 DEVNAME(sc),
624 sc->sc_info.mci_properties.mcp_coercion_mode,
625 sc->sc_info.mci_properties.mcp_alarm_enable,
626 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
627 sc->sc_info.mci_properties.mcp_disable_battery_warn,
628 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
629
630 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
631 DEVNAME(sc),
632 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
633 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
634 sc->sc_info.mci_properties.mcp_expose_encl_devices);
635
636 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
637 DEVNAME(sc),
638 sc->sc_info.mci_pci.mip_vendor,
639 sc->sc_info.mci_pci.mip_device,
640 sc->sc_info.mci_pci.mip_subvendor,
641 sc->sc_info.mci_pci.mip_subdevice);
642
643 printf("%s: type %#x port_count %d port_addr ",
644 DEVNAME(sc),
645 sc->sc_info.mci_host.mih_type,
646 sc->sc_info.mci_host.mih_port_count);
647
648 for (i = 0; i < 8; i++)
649 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
650 printf("\n");
651
652 printf("%s: type %.x port_count %d port_addr ",
653 DEVNAME(sc),
654 sc->sc_info.mci_device.mid_type,
655 sc->sc_info.mci_device.mid_port_count);
656
657 for (i = 0; i < 8; i++)
658 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
659 printf("\n");
660 #endif /* MFI_DEBUG */
661
662 return 0;
663 }
664
665 static void
666 mfiminphys(struct buf *bp)
667 {
668 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
669
670 /* XXX currently using MFI_MAXFER = MAXPHYS */
671 if (bp->b_bcount > MFI_MAXFER)
672 bp->b_bcount = MFI_MAXFER;
673 minphys(bp);
674 }
675
676 int
677 mfi_rescan(device_t self, const char *ifattr, const int *locators)
678 {
679 struct mfi_softc *sc = device_private(self);
680
681 if (sc->sc_child != NULL)
682 return 0;
683
684 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
685 scsiprint, NULL);
686
687 return 0;
688 }
689
690 void
691 mfi_childdetached(device_t self, device_t child)
692 {
693 struct mfi_softc *sc = device_private(self);
694
695 KASSERT(self == sc->sc_dev);
696 KASSERT(child == sc->sc_child);
697
698 if (child == sc->sc_child)
699 sc->sc_child = NULL;
700 }
701
702 int
703 mfi_detach(struct mfi_softc *sc, int flags)
704 {
705 int error;
706
707 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
708
709 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
710 return error;
711
712 #if NBIO > 0
713 mfi_destroy_sensors(sc);
714 bio_unregister(sc->sc_dev);
715 #endif /* NBIO > 0 */
716
717 mfi_intr_disable(sc);
718
719 /* TBD: shutdown firmware */
720
721 if ((error = mfi_destroy_ccb(sc)) != 0)
722 return error;
723
724 mfi_freemem(sc, &sc->sc_sense);
725
726 mfi_freemem(sc, &sc->sc_frames);
727
728 mfi_freemem(sc, &sc->sc_pcq);
729
730 return 0;
731 }
732
733 int
734 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
735 {
736 struct scsipi_adapter *adapt = &sc->sc_adapt;
737 struct scsipi_channel *chan = &sc->sc_chan;
738 uint32_t status, frames, max_sgl;
739 int i;
740
741 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
742
743 sc->sc_ioptype = iop;
744
745 switch (iop) {
746 case MFI_IOP_XSCALE:
747 sc->sc_iop = &mfi_iop_xscale;
748 break;
749 case MFI_IOP_PPC:
750 sc->sc_iop = &mfi_iop_ppc;
751 break;
752 case MFI_IOP_GEN2:
753 sc->sc_iop = &mfi_iop_gen2;
754 break;
755 case MFI_IOP_SKINNY:
756 sc->sc_iop = &mfi_iop_skinny;
757 break;
758 default:
759 panic("%s: unknown iop %d", DEVNAME(sc), iop);
760 }
761
762 if (mfi_transition_firmware(sc))
763 return 1;
764
765 TAILQ_INIT(&sc->sc_ccb_freeq);
766
767 status = mfi_fw_state(sc);
768 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
769 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
770 if (sc->sc_64bit_dma) {
771 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
772 sc->sc_sgl_size = sizeof(struct mfi_sg64);
773 sc->sc_sgl_flags = MFI_FRAME_SGL64;
774 } else {
775 sc->sc_max_sgl = max_sgl;
776 sc->sc_sgl_size = sizeof(struct mfi_sg32);
777 sc->sc_sgl_flags = MFI_FRAME_SGL32;
778 }
779 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
780 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
781
782 /* consumer/producer and reply queue memory */
783 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
784 sizeof(struct mfi_prod_cons));
785 if (sc->sc_pcq == NULL) {
786 aprint_error("%s: unable to allocate reply queue memory\n",
787 DEVNAME(sc));
788 goto nopcq;
789 }
790 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
791 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
792 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
793
794 /* frame memory */
795 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
796 MFI_FRAME_SIZE + 1;
797 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
798 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
799 if (sc->sc_frames == NULL) {
800 aprint_error("%s: unable to allocate frame memory\n",
801 DEVNAME(sc));
802 goto noframe;
803 }
804 /* XXX hack, fix this */
805 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
806 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
807 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
808 goto noframe;
809 }
810
811 /* sense memory */
812 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
813 if (sc->sc_sense == NULL) {
814 aprint_error("%s: unable to allocate sense memory\n",
815 DEVNAME(sc));
816 goto nosense;
817 }
818
819 /* now that we have all memory bits go initialize ccbs */
820 if (mfi_init_ccb(sc)) {
821 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
822 goto noinit;
823 }
824
825 /* kickstart firmware with all addresses and pointers */
826 if (mfi_initialize_firmware(sc)) {
827 aprint_error("%s: could not initialize firmware\n",
828 DEVNAME(sc));
829 goto noinit;
830 }
831
832 if (mfi_get_info(sc)) {
833 aprint_error("%s: could not retrieve controller information\n",
834 DEVNAME(sc));
835 goto noinit;
836 }
837
838 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
839 DEVNAME(sc),
840 sc->sc_info.mci_lds_present,
841 sc->sc_info.mci_package_version,
842 sc->sc_info.mci_memory_size);
843
844 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
845 sc->sc_max_ld = sc->sc_ld_cnt;
846 for (i = 0; i < sc->sc_ld_cnt; i++)
847 sc->sc_ld[i].ld_present = 1;
848
849 memset(adapt, 0, sizeof(*adapt));
850 adapt->adapt_dev = sc->sc_dev;
851 adapt->adapt_nchannels = 1;
852 if (sc->sc_ld_cnt)
853 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
854 else
855 adapt->adapt_openings = sc->sc_max_cmds;
856 adapt->adapt_max_periph = adapt->adapt_openings;
857 adapt->adapt_request = mfi_scsipi_request;
858 adapt->adapt_minphys = mfiminphys;
859
860 memset(chan, 0, sizeof(*chan));
861 chan->chan_adapter = adapt;
862 chan->chan_bustype = &scsi_bustype;
863 chan->chan_channel = 0;
864 chan->chan_flags = 0;
865 chan->chan_nluns = 8;
866 chan->chan_ntargets = MFI_MAX_LD;
867 chan->chan_id = MFI_MAX_LD;
868
869 mfi_rescan(sc->sc_dev, "scsi", NULL);
870
871 /* enable interrupts */
872 mfi_intr_enable(sc);
873
874 #if NBIO > 0
875 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
876 panic("%s: controller registration failed", DEVNAME(sc));
877 if (mfi_create_sensors(sc) != 0)
878 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
879 #endif /* NBIO > 0 */
880
881 return 0;
882 noinit:
883 mfi_freemem(sc, &sc->sc_sense);
884 nosense:
885 mfi_freemem(sc, &sc->sc_frames);
886 noframe:
887 mfi_freemem(sc, &sc->sc_pcq);
888 nopcq:
889 return 1;
890 }
891
892 static int
893 mfi_poll(struct mfi_ccb *ccb)
894 {
895 struct mfi_softc *sc = ccb->ccb_sc;
896 struct mfi_frame_header *hdr;
897 int to = 0;
898
899 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
900
901 hdr = &ccb->ccb_frame->mfr_header;
902 hdr->mfh_cmd_status = 0xff;
903 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
904
905 mfi_post(sc, ccb);
906 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
907 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
908 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
909
910 while (hdr->mfh_cmd_status == 0xff) {
911 delay(1000);
912 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
913 break;
914 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
915 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
916 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
917 }
918 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
919 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
920 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
921
922 if (ccb->ccb_data != NULL) {
923 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
924 DEVNAME(sc));
925 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
926 ccb->ccb_dmamap->dm_mapsize,
927 (ccb->ccb_direction & MFI_DATA_IN) ?
928 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
929
930 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
931 }
932
933 if (hdr->mfh_cmd_status == 0xff) {
934 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
935 hdr->mfh_context);
936 ccb->ccb_flags |= MFI_CCB_F_ERR;
937 return 1;
938 }
939
940 return 0;
941 }
942
943 int
944 mfi_intr(void *arg)
945 {
946 struct mfi_softc *sc = arg;
947 struct mfi_prod_cons *pcq;
948 struct mfi_ccb *ccb;
949 uint32_t producer, consumer, ctx;
950 int claimed = 0;
951
952 if (!mfi_my_intr(sc))
953 return 0;
954
955 pcq = MFIMEM_KVA(sc->sc_pcq);
956
957 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
958 (u_long)sc, (u_long)pcq);
959
960 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
961 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
963
964 producer = pcq->mpc_producer;
965 consumer = pcq->mpc_consumer;
966
967 while (consumer != producer) {
968 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
969 DEVNAME(sc), producer, consumer);
970
971 ctx = pcq->mpc_reply_q[consumer];
972 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
973 if (ctx == MFI_INVALID_CTX)
974 printf("%s: invalid context, p: %d c: %d\n",
975 DEVNAME(sc), producer, consumer);
976 else {
977 /* XXX remove from queue and call scsi_done */
978 ccb = &sc->sc_ccb[ctx];
979 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
980 DEVNAME(sc), ctx);
981 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
982 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
983 sc->sc_frames_size,
984 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
985 ccb->ccb_done(ccb);
986
987 claimed = 1;
988 }
989 consumer++;
990 if (consumer == (sc->sc_max_cmds + 1))
991 consumer = 0;
992 }
993
994 pcq->mpc_consumer = consumer;
995 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
996 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
998
999 return claimed;
1000 }
1001
1002 static int
1003 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
1004 uint32_t blockcnt)
1005 {
1006 struct scsipi_periph *periph = xs->xs_periph;
1007 struct mfi_io_frame *io;
1008
1009 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1010 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1011 periph->periph_target);
1012
1013 if (!xs->data)
1014 return 1;
1015
1016 io = &ccb->ccb_frame->mfr_io;
1017 if (xs->xs_control & XS_CTL_DATA_IN) {
1018 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1019 ccb->ccb_direction = MFI_DATA_IN;
1020 } else {
1021 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1022 ccb->ccb_direction = MFI_DATA_OUT;
1023 }
1024 io->mif_header.mfh_target_id = periph->periph_target;
1025 io->mif_header.mfh_timeout = 0;
1026 io->mif_header.mfh_flags = 0;
1027 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1028 io->mif_header.mfh_data_len= blockcnt;
1029 io->mif_lba_hi = 0;
1030 io->mif_lba_lo = blockno;
1031 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1032 io->mif_sense_addr_hi = 0;
1033
1034 ccb->ccb_done = mfi_scsi_xs_done;
1035 ccb->ccb_xs = xs;
1036 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1037 ccb->ccb_sgl = &io->mif_sgl;
1038 ccb->ccb_data = xs->data;
1039 ccb->ccb_len = xs->datalen;
1040
1041 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1042 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1043 return 1;
1044
1045 return 0;
1046 }
1047
1048 static void
1049 mfi_scsi_xs_done(struct mfi_ccb *ccb)
1050 {
1051 struct scsipi_xfer *xs = ccb->ccb_xs;
1052 struct mfi_softc *sc = ccb->ccb_sc;
1053 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1054
1055 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1056 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1057
1058 if (xs->data != NULL) {
1059 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1060 DEVNAME(sc));
1061 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1062 ccb->ccb_dmamap->dm_mapsize,
1063 (xs->xs_control & XS_CTL_DATA_IN) ?
1064 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1065
1066 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1067 }
1068
1069 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1070 xs->error = XS_DRIVER_STUFFUP;
1071 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1072 DEVNAME(sc), hdr->mfh_cmd_status);
1073
1074 if (hdr->mfh_scsi_status != 0) {
1075 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1076 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1077 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1078 DNPRINTF(MFI_D_INTR,
1079 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1080 DEVNAME(sc), hdr->mfh_scsi_status,
1081 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1082 memset(&xs->sense, 0, sizeof(xs->sense));
1083 memcpy(&xs->sense, ccb->ccb_sense,
1084 sizeof(struct scsi_sense_data));
1085 xs->error = XS_SENSE;
1086 }
1087 } else {
1088 xs->error = XS_NOERROR;
1089 xs->status = SCSI_OK;
1090 xs->resid = 0;
1091 }
1092
1093 mfi_put_ccb(ccb);
1094 scsipi_done(xs);
1095 }
1096
1097 static int
1098 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1099 {
1100 struct mfi_pass_frame *pf;
1101 struct scsipi_periph *periph = xs->xs_periph;
1102
1103 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1104 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1105 periph->periph_target);
1106
1107 pf = &ccb->ccb_frame->mfr_pass;
1108 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1109 pf->mpf_header.mfh_target_id = periph->periph_target;
1110 pf->mpf_header.mfh_lun_id = 0;
1111 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1112 pf->mpf_header.mfh_timeout = 0;
1113 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1114 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1115
1116 pf->mpf_sense_addr_hi = 0;
1117 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1118
1119 memset(pf->mpf_cdb, 0, 16);
1120 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1121
1122 ccb->ccb_done = mfi_scsi_xs_done;
1123 ccb->ccb_xs = xs;
1124 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1125 ccb->ccb_sgl = &pf->mpf_sgl;
1126
1127 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1128 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1129 MFI_DATA_IN : MFI_DATA_OUT;
1130 else
1131 ccb->ccb_direction = MFI_DATA_NONE;
1132
1133 if (xs->data) {
1134 ccb->ccb_data = xs->data;
1135 ccb->ccb_len = xs->datalen;
1136
1137 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1138 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1139 return 1;
1140 }
1141
1142 return 0;
1143 }
1144
1145 static void
1146 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1147 void *arg)
1148 {
1149 struct scsipi_periph *periph;
1150 struct scsipi_xfer *xs;
1151 struct scsipi_adapter *adapt = chan->chan_adapter;
1152 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1153 struct mfi_ccb *ccb;
1154 struct scsi_rw_6 *rw;
1155 struct scsipi_rw_10 *rwb;
1156 uint32_t blockno, blockcnt;
1157 uint8_t target;
1158 uint8_t mbox[MFI_MBOX_SIZE];
1159 int s;
1160
1161 switch (req) {
1162 case ADAPTER_REQ_GROW_RESOURCES:
1163 /* Not supported. */
1164 return;
1165 case ADAPTER_REQ_SET_XFER_MODE:
1166 /* Not supported. */
1167 return;
1168 case ADAPTER_REQ_RUN_XFER:
1169 break;
1170 }
1171
1172 xs = arg;
1173
1174 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1175 DEVNAME(sc), req, xs->cmd->opcode);
1176
1177 periph = xs->xs_periph;
1178 target = periph->periph_target;
1179
1180 s = splbio();
1181 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1182 periph->periph_lun != 0) {
1183 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1184 DEVNAME(sc), target);
1185 xs->error = XS_SELTIMEOUT;
1186 scsipi_done(xs);
1187 splx(s);
1188 return;
1189 }
1190
1191 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1192 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1193 xs->error = XS_RESOURCE_SHORTAGE;
1194 scsipi_done(xs);
1195 splx(s);
1196 return;
1197 }
1198
1199 switch (xs->cmd->opcode) {
1200 /* IO path */
1201 case READ_10:
1202 case WRITE_10:
1203 rwb = (struct scsipi_rw_10 *)xs->cmd;
1204 blockno = _4btol(rwb->addr);
1205 blockcnt = _2btol(rwb->length);
1206 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1207 mfi_put_ccb(ccb);
1208 goto stuffup;
1209 }
1210 break;
1211
1212 case SCSI_READ_6_COMMAND:
1213 case SCSI_WRITE_6_COMMAND:
1214 rw = (struct scsi_rw_6 *)xs->cmd;
1215 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1216 blockcnt = rw->length ? rw->length : 0x100;
1217 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1218 mfi_put_ccb(ccb);
1219 goto stuffup;
1220 }
1221 break;
1222
1223 case SCSI_SYNCHRONIZE_CACHE_10:
1224 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1225 if (mfi_mgmt(ccb, xs,
1226 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1227 mfi_put_ccb(ccb);
1228 goto stuffup;
1229 }
1230 break;
1231
1232 /* hand it of to the firmware and let it deal with it */
1233 case SCSI_TEST_UNIT_READY:
1234 /* save off sd? after autoconf */
1235 if (!cold) /* XXX bogus */
1236 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1237 sizeof(sc->sc_ld[target].ld_dev));
1238 /* FALLTHROUGH */
1239
1240 default:
1241 if (mfi_scsi_ld(ccb, xs)) {
1242 mfi_put_ccb(ccb);
1243 goto stuffup;
1244 }
1245 break;
1246 }
1247
1248 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1249
1250 if (xs->xs_control & XS_CTL_POLL) {
1251 if (mfi_poll(ccb)) {
1252 /* XXX check for sense in ccb->ccb_sense? */
1253 printf("%s: mfi_scsipi_request poll failed\n",
1254 DEVNAME(sc));
1255 memset(&xs->sense, 0, sizeof(xs->sense));
1256 xs->sense.scsi_sense.response_code =
1257 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1258 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1259 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1260 xs->error = XS_SENSE;
1261 xs->status = SCSI_CHECK;
1262 } else {
1263 DNPRINTF(MFI_D_DMA,
1264 "%s: mfi_scsipi_request poll complete %d\n",
1265 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1266 xs->error = XS_NOERROR;
1267 xs->status = SCSI_OK;
1268 xs->resid = 0;
1269 }
1270 mfi_put_ccb(ccb);
1271 scsipi_done(xs);
1272 splx(s);
1273 return;
1274 }
1275
1276 mfi_post(sc, ccb);
1277
1278 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1279 ccb->ccb_dmamap->dm_nsegs);
1280
1281 splx(s);
1282 return;
1283
1284 stuffup:
1285 xs->error = XS_DRIVER_STUFFUP;
1286 scsipi_done(xs);
1287 splx(s);
1288 }
1289
1290 static int
1291 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1292 {
1293 struct mfi_softc *sc = ccb->ccb_sc;
1294 struct mfi_frame_header *hdr;
1295 bus_dma_segment_t *sgd;
1296 union mfi_sgl *sgl;
1297 int error, i;
1298
1299 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1300 (u_long)ccb->ccb_data);
1301
1302 if (!ccb->ccb_data)
1303 return 1;
1304
1305 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1306 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1307 if (error) {
1308 if (error == EFBIG)
1309 printf("more than %d dma segs\n",
1310 sc->sc_max_sgl);
1311 else
1312 printf("error %d loading dma map\n", error);
1313 return 1;
1314 }
1315
1316 hdr = &ccb->ccb_frame->mfr_header;
1317 sgl = ccb->ccb_sgl;
1318 sgd = ccb->ccb_dmamap->dm_segs;
1319 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1320 if (sc->sc_64bit_dma) {
1321 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1322 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1323 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1324 PRIx64 "\n",
1325 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1326 } else {
1327 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1328 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1329 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1330 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1331 }
1332 }
1333
1334 if (ccb->ccb_direction == MFI_DATA_IN) {
1335 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1336 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1337 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1338 } else {
1339 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1340 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1341 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1342 }
1343
1344 hdr->mfh_flags |= sc->sc_sgl_flags;
1345 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1346 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1347 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1348
1349 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1350 " dm_nsegs: %d extra_frames: %d\n",
1351 DEVNAME(sc),
1352 hdr->mfh_sg_count,
1353 ccb->ccb_frame_size,
1354 sc->sc_frames_size,
1355 ccb->ccb_dmamap->dm_nsegs,
1356 ccb->ccb_extra_frames);
1357
1358 return 0;
1359 }
1360
1361 static int
1362 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1363 uint32_t len, void *buf, uint8_t *mbox)
1364 {
1365 struct mfi_ccb *ccb;
1366 int rv = 1;
1367
1368 if ((ccb = mfi_get_ccb(sc)) == NULL)
1369 return rv;
1370 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1371 if (rv)
1372 return rv;
1373
1374 if (cold) {
1375 if (mfi_poll(ccb))
1376 goto done;
1377 } else {
1378 mfi_post(sc, ccb);
1379
1380 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1381 DEVNAME(sc));
1382 while (ccb->ccb_state != MFI_CCB_DONE)
1383 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1384
1385 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1386 goto done;
1387 }
1388 rv = 0;
1389
1390 done:
1391 mfi_put_ccb(ccb);
1392 return rv;
1393 }
1394
1395 static int
1396 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1397 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1398 {
1399 struct mfi_dcmd_frame *dcmd;
1400
1401 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1402
1403 dcmd = &ccb->ccb_frame->mfr_dcmd;
1404 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1405 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1406 dcmd->mdf_header.mfh_timeout = 0;
1407
1408 dcmd->mdf_opcode = opc;
1409 dcmd->mdf_header.mfh_data_len = 0;
1410 ccb->ccb_direction = dir;
1411 ccb->ccb_xs = xs;
1412 ccb->ccb_done = mfi_mgmt_done;
1413
1414 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1415
1416 /* handle special opcodes */
1417 if (mbox)
1418 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1419
1420 if (dir != MFI_DATA_NONE) {
1421 dcmd->mdf_header.mfh_data_len = len;
1422 ccb->ccb_data = buf;
1423 ccb->ccb_len = len;
1424 ccb->ccb_sgl = &dcmd->mdf_sgl;
1425
1426 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1427 return 1;
1428 }
1429 return 0;
1430 }
1431
1432 static void
1433 mfi_mgmt_done(struct mfi_ccb *ccb)
1434 {
1435 struct scsipi_xfer *xs = ccb->ccb_xs;
1436 struct mfi_softc *sc = ccb->ccb_sc;
1437 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1438
1439 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1440 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1441
1442 if (ccb->ccb_data != NULL) {
1443 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1444 DEVNAME(sc));
1445 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1446 ccb->ccb_dmamap->dm_mapsize,
1447 (ccb->ccb_direction & MFI_DATA_IN) ?
1448 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1449
1450 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1451 }
1452
1453 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1454 ccb->ccb_flags |= MFI_CCB_F_ERR;
1455
1456 ccb->ccb_state = MFI_CCB_DONE;
1457 if (xs) {
1458 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1459 xs->error = XS_DRIVER_STUFFUP;
1460 } else {
1461 xs->error = XS_NOERROR;
1462 xs->status = SCSI_OK;
1463 xs->resid = 0;
1464 }
1465 mfi_put_ccb(ccb);
1466 scsipi_done(xs);
1467 } else
1468 wakeup(ccb);
1469 }
1470
1471 #if NBIO > 0
1472 int
1473 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1474 {
1475 struct mfi_softc *sc = device_private(dev);
1476 int error = 0;
1477 int s;
1478
1479 KERNEL_LOCK(1, curlwp);
1480 s = splbio();
1481
1482 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1483
1484 switch (cmd) {
1485 case BIOCINQ:
1486 DNPRINTF(MFI_D_IOCTL, "inq\n");
1487 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1488 break;
1489
1490 case BIOCVOL:
1491 DNPRINTF(MFI_D_IOCTL, "vol\n");
1492 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1493 break;
1494
1495 case BIOCDISK:
1496 DNPRINTF(MFI_D_IOCTL, "disk\n");
1497 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1498 break;
1499
1500 case BIOCALARM:
1501 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1502 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1503 break;
1504
1505 case BIOCBLINK:
1506 DNPRINTF(MFI_D_IOCTL, "blink\n");
1507 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1508 break;
1509
1510 case BIOCSETSTATE:
1511 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1512 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1513 break;
1514
1515 default:
1516 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1517 error = EINVAL;
1518 }
1519 splx(s);
1520 KERNEL_UNLOCK_ONE(curlwp);
1521
1522 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1523 return error;
1524 }
1525
1526 static int
1527 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1528 {
1529 struct mfi_conf *cfg;
1530 int rv = EINVAL;
1531
1532 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1533
1534 if (mfi_get_info(sc)) {
1535 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1536 DEVNAME(sc));
1537 return EIO;
1538 }
1539
1540 /* get figures */
1541 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1542 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1543 sizeof *cfg, cfg, NULL))
1544 goto freeme;
1545
1546 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1547 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1548 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1549
1550 rv = 0;
1551 freeme:
1552 free(cfg, M_DEVBUF);
1553 return rv;
1554 }
1555
1556 static int
1557 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1558 {
1559 int i, per, rv = EINVAL;
1560 uint8_t mbox[MFI_MBOX_SIZE];
1561
1562 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1563 DEVNAME(sc), bv->bv_volid);
1564
1565 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1566 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1567 goto done;
1568
1569 i = bv->bv_volid;
1570 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1571 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1572 DEVNAME(sc), mbox[0]);
1573
1574 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1575 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1576 goto done;
1577
1578 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1579 /* go do hotspares */
1580 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1581 goto done;
1582 }
1583
1584 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1585
1586 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1587 case MFI_LD_OFFLINE:
1588 bv->bv_status = BIOC_SVOFFLINE;
1589 break;
1590
1591 case MFI_LD_PART_DEGRADED:
1592 case MFI_LD_DEGRADED:
1593 bv->bv_status = BIOC_SVDEGRADED;
1594 break;
1595
1596 case MFI_LD_ONLINE:
1597 bv->bv_status = BIOC_SVONLINE;
1598 break;
1599
1600 default:
1601 bv->bv_status = BIOC_SVINVALID;
1602 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1603 DEVNAME(sc),
1604 sc->sc_ld_list.mll_list[i].mll_state);
1605 }
1606
1607 /* additional status can modify MFI status */
1608 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1609 case MFI_LD_PROG_CC:
1610 case MFI_LD_PROG_BGI:
1611 bv->bv_status = BIOC_SVSCRUB;
1612 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1613 bv->bv_percent = (per * 100) / 0xffff;
1614 bv->bv_seconds =
1615 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1616 break;
1617
1618 case MFI_LD_PROG_FGI:
1619 case MFI_LD_PROG_RECONSTRUCT:
1620 /* nothing yet */
1621 break;
1622 }
1623
1624 /*
1625 * The RAID levels are determined per the SNIA DDF spec, this is only
1626 * a subset that is valid for the MFI contrller.
1627 */
1628 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1629 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1630 MFI_DDF_SRL_SPANNED)
1631 bv->bv_level *= 10;
1632
1633 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1634 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1635
1636 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1637
1638 rv = 0;
1639 done:
1640 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1641 DEVNAME(sc), rv);
1642 return rv;
1643 }
1644
1645 static int
1646 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1647 {
1648 struct mfi_conf *cfg;
1649 struct mfi_array *ar;
1650 struct mfi_ld_cfg *ld;
1651 struct mfi_pd_details *pd;
1652 struct scsipi_inquiry_data *inqbuf;
1653 char vend[8+16+4+1];
1654 int i, rv = EINVAL;
1655 int arr, vol, disk;
1656 uint32_t size;
1657 uint8_t mbox[MFI_MBOX_SIZE];
1658
1659 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1660 DEVNAME(sc), bd->bd_diskid);
1661
1662 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1663
1664 /* send single element command to retrieve size for full structure */
1665 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1666 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1667 sizeof *cfg, cfg, NULL))
1668 goto freeme;
1669
1670 size = cfg->mfc_size;
1671 free(cfg, M_DEVBUF);
1672
1673 /* memory for read config */
1674 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1675 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1676 size, cfg, NULL))
1677 goto freeme;
1678
1679 ar = cfg->mfc_array;
1680
1681 /* calculate offset to ld structure */
1682 ld = (struct mfi_ld_cfg *)(
1683 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1684 cfg->mfc_array_size * cfg->mfc_no_array);
1685
1686 vol = bd->bd_volid;
1687
1688 if (vol >= cfg->mfc_no_ld) {
1689 /* do hotspares */
1690 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1691 goto freeme;
1692 }
1693
1694 /* find corresponding array for ld */
1695 for (i = 0, arr = 0; i < vol; i++)
1696 arr += ld[i].mlc_parm.mpa_span_depth;
1697
1698 /* offset disk into pd list */
1699 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1700
1701 /* offset array index into the next spans */
1702 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1703
1704 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1705 switch (ar[arr].pd[disk].mar_pd_state){
1706 case MFI_PD_UNCONFIG_GOOD:
1707 bd->bd_status = BIOC_SDUNUSED;
1708 break;
1709
1710 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1711 bd->bd_status = BIOC_SDHOTSPARE;
1712 break;
1713
1714 case MFI_PD_OFFLINE:
1715 bd->bd_status = BIOC_SDOFFLINE;
1716 break;
1717
1718 case MFI_PD_FAILED:
1719 bd->bd_status = BIOC_SDFAILED;
1720 break;
1721
1722 case MFI_PD_REBUILD:
1723 bd->bd_status = BIOC_SDREBUILD;
1724 break;
1725
1726 case MFI_PD_ONLINE:
1727 bd->bd_status = BIOC_SDONLINE;
1728 break;
1729
1730 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1731 default:
1732 bd->bd_status = BIOC_SDINVALID;
1733 break;
1734
1735 }
1736
1737 /* get the remaining fields */
1738 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1739 memset(pd, 0, sizeof(*pd));
1740 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1741 sizeof *pd, pd, mbox))
1742 goto freeme;
1743
1744 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1745
1746 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1747 bd->bd_channel = pd->mpd_enc_idx;
1748
1749 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1750 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1751 vend[sizeof vend - 1] = '\0';
1752 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1753
1754 /* XXX find a way to retrieve serial nr from drive */
1755 /* XXX find a way to get bd_procdev */
1756
1757 rv = 0;
1758 freeme:
1759 free(pd, M_DEVBUF);
1760 free(cfg, M_DEVBUF);
1761
1762 return rv;
1763 }
1764
1765 static int
1766 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1767 {
1768 uint32_t opc, dir = MFI_DATA_NONE;
1769 int rv = 0;
1770 int8_t ret;
1771
1772 switch(ba->ba_opcode) {
1773 case BIOC_SADISABLE:
1774 opc = MR_DCMD_SPEAKER_DISABLE;
1775 break;
1776
1777 case BIOC_SAENABLE:
1778 opc = MR_DCMD_SPEAKER_ENABLE;
1779 break;
1780
1781 case BIOC_SASILENCE:
1782 opc = MR_DCMD_SPEAKER_SILENCE;
1783 break;
1784
1785 case BIOC_GASTATUS:
1786 opc = MR_DCMD_SPEAKER_GET;
1787 dir = MFI_DATA_IN;
1788 break;
1789
1790 case BIOC_SATEST:
1791 opc = MR_DCMD_SPEAKER_TEST;
1792 break;
1793
1794 default:
1795 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1796 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1797 return EINVAL;
1798 }
1799
1800 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1801 rv = EINVAL;
1802 else
1803 if (ba->ba_opcode == BIOC_GASTATUS)
1804 ba->ba_status = ret;
1805 else
1806 ba->ba_status = 0;
1807
1808 return rv;
1809 }
1810
1811 static int
1812 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1813 {
1814 int i, found, rv = EINVAL;
1815 uint8_t mbox[MFI_MBOX_SIZE];
1816 uint32_t cmd;
1817 struct mfi_pd_list *pd;
1818
1819 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1820 bb->bb_status);
1821
1822 /* channel 0 means not in an enclosure so can't be blinked */
1823 if (bb->bb_channel == 0)
1824 return EINVAL;
1825
1826 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1827
1828 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1829 MFI_PD_LIST_SIZE, pd, NULL))
1830 goto done;
1831
1832 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1833 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1834 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1835 found = 1;
1836 break;
1837 }
1838
1839 if (!found)
1840 goto done;
1841
1842 memset(mbox, 0, sizeof mbox);
1843
1844 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1845
1846 switch (bb->bb_status) {
1847 case BIOC_SBUNBLINK:
1848 cmd = MR_DCMD_PD_UNBLINK;
1849 break;
1850
1851 case BIOC_SBBLINK:
1852 cmd = MR_DCMD_PD_BLINK;
1853 break;
1854
1855 case BIOC_SBALARM:
1856 default:
1857 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1858 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1859 goto done;
1860 }
1861
1862
1863 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1864 goto done;
1865
1866 rv = 0;
1867 done:
1868 free(pd, M_DEVBUF);
1869 return rv;
1870 }
1871
1872 static int
1873 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1874 {
1875 struct mfi_pd_list *pd;
1876 int i, found, rv = EINVAL;
1877 uint8_t mbox[MFI_MBOX_SIZE];
1878 uint32_t cmd;
1879
1880 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1881 bs->bs_status);
1882
1883 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1884
1885 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1886 MFI_PD_LIST_SIZE, pd, NULL))
1887 goto done;
1888
1889 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1890 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1891 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1892 found = 1;
1893 break;
1894 }
1895
1896 if (!found)
1897 goto done;
1898
1899 memset(mbox, 0, sizeof mbox);
1900
1901 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1902
1903 switch (bs->bs_status) {
1904 case BIOC_SSONLINE:
1905 mbox[2] = MFI_PD_ONLINE;
1906 cmd = MD_DCMD_PD_SET_STATE;
1907 break;
1908
1909 case BIOC_SSOFFLINE:
1910 mbox[2] = MFI_PD_OFFLINE;
1911 cmd = MD_DCMD_PD_SET_STATE;
1912 break;
1913
1914 case BIOC_SSHOTSPARE:
1915 mbox[2] = MFI_PD_HOTSPARE;
1916 cmd = MD_DCMD_PD_SET_STATE;
1917 break;
1918 /*
1919 case BIOC_SSREBUILD:
1920 cmd = MD_DCMD_PD_REBUILD;
1921 break;
1922 */
1923 default:
1924 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1925 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1926 goto done;
1927 }
1928
1929
1930 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1931 0, NULL, mbox))
1932 goto done;
1933
1934 rv = 0;
1935 done:
1936 free(pd, M_DEVBUF);
1937 return rv;
1938 }
1939
1940 static int
1941 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1942 {
1943 struct mfi_conf *cfg;
1944 struct mfi_hotspare *hs;
1945 struct mfi_pd_details *pd;
1946 struct bioc_disk *sdhs;
1947 struct bioc_vol *vdhs;
1948 struct scsipi_inquiry_data *inqbuf;
1949 char vend[8+16+4+1];
1950 int i, rv = EINVAL;
1951 uint32_t size;
1952 uint8_t mbox[MFI_MBOX_SIZE];
1953
1954 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1955
1956 if (!bio_hs)
1957 return EINVAL;
1958
1959 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1960
1961 /* send single element command to retrieve size for full structure */
1962 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1963 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1964 sizeof *cfg, cfg, NULL))
1965 goto freeme;
1966
1967 size = cfg->mfc_size;
1968 free(cfg, M_DEVBUF);
1969
1970 /* memory for read config */
1971 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1972 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1973 size, cfg, NULL))
1974 goto freeme;
1975
1976 /* calculate offset to hs structure */
1977 hs = (struct mfi_hotspare *)(
1978 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1979 cfg->mfc_array_size * cfg->mfc_no_array +
1980 cfg->mfc_ld_size * cfg->mfc_no_ld);
1981
1982 if (volid < cfg->mfc_no_ld)
1983 goto freeme; /* not a hotspare */
1984
1985 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1986 goto freeme; /* not a hotspare */
1987
1988 /* offset into hotspare structure */
1989 i = volid - cfg->mfc_no_ld;
1990
1991 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1992 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1993 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1994
1995 /* get pd fields */
1996 memset(mbox, 0, sizeof mbox);
1997 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1998 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1999 sizeof *pd, pd, mbox)) {
2000 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2001 DEVNAME(sc));
2002 goto freeme;
2003 }
2004
2005 switch (type) {
2006 case MFI_MGMT_VD:
2007 vdhs = bio_hs;
2008 vdhs->bv_status = BIOC_SVONLINE;
2009 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2010 vdhs->bv_level = -1; /* hotspare */
2011 vdhs->bv_nodisk = 1;
2012 break;
2013
2014 case MFI_MGMT_SD:
2015 sdhs = bio_hs;
2016 sdhs->bd_status = BIOC_SDHOTSPARE;
2017 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2018 sdhs->bd_channel = pd->mpd_enc_idx;
2019 sdhs->bd_target = pd->mpd_enc_slot;
2020 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2021 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2022 vend[sizeof vend - 1] = '\0';
2023 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2024 break;
2025
2026 default:
2027 goto freeme;
2028 }
2029
2030 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2031 rv = 0;
2032 freeme:
2033 free(pd, M_DEVBUF);
2034 free(cfg, M_DEVBUF);
2035
2036 return rv;
2037 }
2038
2039 static int
2040 mfi_destroy_sensors(struct mfi_softc *sc)
2041 {
2042 if (sc->sc_sme == NULL)
2043 return 0;
2044 sysmon_envsys_unregister(sc->sc_sme);
2045 sc->sc_sme = NULL;
2046 free(sc->sc_sensor, M_DEVBUF);
2047 return 0;
2048 }
2049
2050 static int
2051 mfi_create_sensors(struct mfi_softc *sc)
2052 {
2053 int i;
2054 int nsensors = sc->sc_ld_cnt;
2055 int rv;
2056
2057 sc->sc_sme = sysmon_envsys_create();
2058 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2059 M_DEVBUF, M_NOWAIT | M_ZERO);
2060 if (sc->sc_sensor == NULL) {
2061 aprint_error("%s: can't allocate envsys_data_t\n",
2062 DEVNAME(sc));
2063 return ENOMEM;
2064 }
2065
2066 for (i = 0; i < nsensors; i++) {
2067 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2068 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2069 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2070 /* Enable monitoring for drive state changes */
2071 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2072 /* logical drives */
2073 snprintf(sc->sc_sensor[i].desc,
2074 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2075 DEVNAME(sc), i);
2076 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2077 &sc->sc_sensor[i]))
2078 goto out;
2079 }
2080
2081 sc->sc_sme->sme_name = DEVNAME(sc);
2082 sc->sc_sme->sme_cookie = sc;
2083 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2084 rv = sysmon_envsys_register(sc->sc_sme);
2085 if (rv != 0) {
2086 aprint_error("%s: unable to register with sysmon (rv = %d)\n",
2087 DEVNAME(sc), rv);
2088 goto out;
2089 }
2090 return 0;
2091
2092 out:
2093 free(sc->sc_sensor, M_DEVBUF);
2094 sysmon_envsys_destroy(sc->sc_sme);
2095 sc->sc_sme = NULL;
2096 return EINVAL;
2097 }
2098
2099 static void
2100 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2101 {
2102 struct mfi_softc *sc = sme->sme_cookie;
2103 struct bioc_vol bv;
2104 int s;
2105 int error;
2106
2107 if (edata->sensor >= sc->sc_ld_cnt)
2108 return;
2109
2110 memset(&bv, 0, sizeof(bv));
2111 bv.bv_volid = edata->sensor;
2112 KERNEL_LOCK(1, curlwp);
2113 s = splbio();
2114 error = mfi_ioctl_vol(sc, &bv);
2115 splx(s);
2116 KERNEL_UNLOCK_ONE(curlwp);
2117 if (error)
2118 return;
2119
2120 switch(bv.bv_status) {
2121 case BIOC_SVOFFLINE:
2122 edata->value_cur = ENVSYS_DRIVE_FAIL;
2123 edata->state = ENVSYS_SCRITICAL;
2124 break;
2125
2126 case BIOC_SVDEGRADED:
2127 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2128 edata->state = ENVSYS_SCRITICAL;
2129 break;
2130
2131 case BIOC_SVSCRUB:
2132 case BIOC_SVONLINE:
2133 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2134 edata->state = ENVSYS_SVALID;
2135 break;
2136
2137 case BIOC_SVINVALID:
2138 /* FALLTRHOUGH */
2139 default:
2140 edata->value_cur = 0; /* unknown */
2141 edata->state = ENVSYS_SINVALID;
2142 }
2143 }
2144
2145 #endif /* NBIO > 0 */
2146
2147 static uint32_t
2148 mfi_xscale_fw_state(struct mfi_softc *sc)
2149 {
2150 return mfi_read(sc, MFI_OMSG0);
2151 }
2152
2153 static void
2154 mfi_xscale_intr_dis(struct mfi_softc *sc)
2155 {
2156 mfi_write(sc, MFI_OMSK, 0);
2157 }
2158
2159 static void
2160 mfi_xscale_intr_ena(struct mfi_softc *sc)
2161 {
2162 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2163 }
2164
2165 static int
2166 mfi_xscale_intr(struct mfi_softc *sc)
2167 {
2168 uint32_t status;
2169
2170 status = mfi_read(sc, MFI_OSTS);
2171 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2172 return 0;
2173
2174 /* write status back to acknowledge interrupt */
2175 mfi_write(sc, MFI_OSTS, status);
2176 return 1;
2177 }
2178
2179 static void
2180 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2181 {
2182 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2183 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2184 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2185 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2186 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2187 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2188
2189 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2190 ccb->ccb_extra_frames);
2191 }
2192
2193 static uint32_t
2194 mfi_ppc_fw_state(struct mfi_softc *sc)
2195 {
2196 return mfi_read(sc, MFI_OSP);
2197 }
2198
2199 static void
2200 mfi_ppc_intr_dis(struct mfi_softc *sc)
2201 {
2202 /* Taking a wild guess --dyoung */
2203 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2204 mfi_write(sc, MFI_ODC, 0xffffffff);
2205 }
2206
2207 static void
2208 mfi_ppc_intr_ena(struct mfi_softc *sc)
2209 {
2210 mfi_write(sc, MFI_ODC, 0xffffffff);
2211 mfi_write(sc, MFI_OMSK, ~0x80000004);
2212 }
2213
2214 static int
2215 mfi_ppc_intr(struct mfi_softc *sc)
2216 {
2217 uint32_t status;
2218
2219 status = mfi_read(sc, MFI_OSTS);
2220 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2221 return 0;
2222
2223 /* write status back to acknowledge interrupt */
2224 mfi_write(sc, MFI_ODC, status);
2225 return 1;
2226 }
2227
2228 static void
2229 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2230 {
2231 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2232 (ccb->ccb_extra_frames << 1));
2233 }
2234
2235 u_int32_t
2236 mfi_gen2_fw_state(struct mfi_softc *sc)
2237 {
2238 return (mfi_read(sc, MFI_OSP));
2239 }
2240
2241 void
2242 mfi_gen2_intr_dis(struct mfi_softc *sc)
2243 {
2244 mfi_write(sc, MFI_OMSK, 0xffffffff);
2245 mfi_write(sc, MFI_ODC, 0xffffffff);
2246 }
2247
2248 void
2249 mfi_gen2_intr_ena(struct mfi_softc *sc)
2250 {
2251 mfi_write(sc, MFI_ODC, 0xffffffff);
2252 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2253 }
2254
2255 int
2256 mfi_gen2_intr(struct mfi_softc *sc)
2257 {
2258 u_int32_t status;
2259
2260 status = mfi_read(sc, MFI_OSTS);
2261 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2262 return (0);
2263
2264 /* write status back to acknowledge interrupt */
2265 mfi_write(sc, MFI_ODC, status);
2266
2267 return (1);
2268 }
2269
2270 void
2271 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2272 {
2273 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2274 (ccb->ccb_extra_frames << 1));
2275 }
2276
2277 u_int32_t
2278 mfi_skinny_fw_state(struct mfi_softc *sc)
2279 {
2280 return (mfi_read(sc, MFI_OSP));
2281 }
2282
2283 void
2284 mfi_skinny_intr_dis(struct mfi_softc *sc)
2285 {
2286 mfi_write(sc, MFI_OMSK, 0);
2287 }
2288
2289 void
2290 mfi_skinny_intr_ena(struct mfi_softc *sc)
2291 {
2292 mfi_write(sc, MFI_OMSK, ~0x00000001);
2293 }
2294
2295 int
2296 mfi_skinny_intr(struct mfi_softc *sc)
2297 {
2298 u_int32_t status;
2299
2300 status = mfi_read(sc, MFI_OSTS);
2301 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2302 return (0);
2303
2304 /* write status back to acknowledge interrupt */
2305 mfi_write(sc, MFI_OSTS, status);
2306
2307 return (1);
2308 }
2309
2310 void
2311 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2312 {
2313 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2314 (ccb->ccb_extra_frames << 1));
2315 mfi_write(sc, MFI_IQPH, 0x00000000);
2316 }
2317