mfi.c revision 1.39 1 /* $NetBSD: mfi.c,v 1.39 2012/08/05 14:54:01 bouyer Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.39 2012/08/05 14:54:01 bouyer Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt_internal(struct mfi_softc *,
89 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 static int mfi_ioctl(device_t, u_long, void *);
96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int mfi_ioctl_alarm(struct mfi_softc *,
100 struct bioc_alarm *);
101 static int mfi_ioctl_blink(struct mfi_softc *sc,
102 struct bioc_blink *);
103 static int mfi_ioctl_setstate(struct mfi_softc *,
104 struct bioc_setstate *);
105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int mfi_create_sensors(struct mfi_softc *);
107 static int mfi_destroy_sensors(struct mfi_softc *);
108 static void mfi_sensor_refresh(struct sysmon_envsys *,
109 envsys_data_t *);
110 #endif /* NBIO > 0 */
111
112 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
113 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
114 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
115 static int mfi_xscale_intr(struct mfi_softc *sc);
116 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
117
118 static const struct mfi_iop_ops mfi_iop_xscale = {
119 mfi_xscale_fw_state,
120 mfi_xscale_intr_dis,
121 mfi_xscale_intr_ena,
122 mfi_xscale_intr,
123 mfi_xscale_post
124 };
125
126 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
127 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
128 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
129 static int mfi_ppc_intr(struct mfi_softc *sc);
130 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
131
132 static const struct mfi_iop_ops mfi_iop_ppc = {
133 mfi_ppc_fw_state,
134 mfi_ppc_intr_dis,
135 mfi_ppc_intr_ena,
136 mfi_ppc_intr,
137 mfi_ppc_post
138 };
139
140 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
141 void mfi_gen2_intr_ena(struct mfi_softc *sc);
142 void mfi_gen2_intr_dis(struct mfi_softc *sc);
143 int mfi_gen2_intr(struct mfi_softc *sc);
144 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
145
146 static const struct mfi_iop_ops mfi_iop_gen2 = {
147 mfi_gen2_fw_state,
148 mfi_gen2_intr_dis,
149 mfi_gen2_intr_ena,
150 mfi_gen2_intr,
151 mfi_gen2_post
152 };
153
154 u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
155 void mfi_skinny_intr_dis(struct mfi_softc *);
156 void mfi_skinny_intr_ena(struct mfi_softc *);
157 int mfi_skinny_intr(struct mfi_softc *);
158 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
159
160 static const struct mfi_iop_ops mfi_iop_skinny = {
161 mfi_skinny_fw_state,
162 mfi_skinny_intr_dis,
163 mfi_skinny_intr_ena,
164 mfi_skinny_intr,
165 mfi_skinny_post
166 };
167
168 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
169 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
170 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
171 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
172 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
173
174 static struct mfi_ccb *
175 mfi_get_ccb(struct mfi_softc *sc)
176 {
177 struct mfi_ccb *ccb;
178 int s;
179
180 s = splbio();
181 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
182 if (ccb) {
183 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
184 ccb->ccb_state = MFI_CCB_READY;
185 }
186 splx(s);
187
188 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
189
190 return ccb;
191 }
192
193 static void
194 mfi_put_ccb(struct mfi_ccb *ccb)
195 {
196 struct mfi_softc *sc = ccb->ccb_sc;
197 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
198 int s;
199
200 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
201
202 hdr->mfh_cmd_status = 0x0;
203 hdr->mfh_flags = 0x0;
204 ccb->ccb_state = MFI_CCB_FREE;
205 ccb->ccb_xs = NULL;
206 ccb->ccb_flags = 0;
207 ccb->ccb_done = NULL;
208 ccb->ccb_direction = 0;
209 ccb->ccb_frame_size = 0;
210 ccb->ccb_extra_frames = 0;
211 ccb->ccb_sgl = NULL;
212 ccb->ccb_data = NULL;
213 ccb->ccb_len = 0;
214
215 s = splbio();
216 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
217 splx(s);
218 }
219
220 static int
221 mfi_destroy_ccb(struct mfi_softc *sc)
222 {
223 struct mfi_ccb *ccb;
224 uint32_t i;
225
226 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
227
228
229 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
230 /* create a dma map for transfer */
231 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
232 }
233
234 if (i < sc->sc_max_cmds)
235 return EBUSY;
236
237 free(sc->sc_ccb, M_DEVBUF);
238
239 return 0;
240 }
241
242 static int
243 mfi_init_ccb(struct mfi_softc *sc)
244 {
245 struct mfi_ccb *ccb;
246 uint32_t i;
247 int error;
248
249 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
250
251 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
252 M_DEVBUF, M_WAITOK|M_ZERO);
253
254 for (i = 0; i < sc->sc_max_cmds; i++) {
255 ccb = &sc->sc_ccb[i];
256
257 ccb->ccb_sc = sc;
258
259 /* select i'th frame */
260 ccb->ccb_frame = (union mfi_frame *)
261 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
262 ccb->ccb_pframe =
263 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
264 ccb->ccb_frame->mfr_header.mfh_context = i;
265
266 /* select i'th sense */
267 ccb->ccb_sense = (struct mfi_sense *)
268 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
269 ccb->ccb_psense =
270 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
271
272 /* create a dma map for transfer */
273 error = bus_dmamap_create(sc->sc_dmat,
274 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
275 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
276 if (error) {
277 printf("%s: cannot create ccb dmamap (%d)\n",
278 DEVNAME(sc), error);
279 goto destroy;
280 }
281
282 DNPRINTF(MFI_D_CCB,
283 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
284 ccb->ccb_frame->mfr_header.mfh_context, ccb,
285 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
286 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
287 (u_long)ccb->ccb_dmamap);
288
289 /* add ccb to queue */
290 mfi_put_ccb(ccb);
291 }
292
293 return 0;
294 destroy:
295 /* free dma maps and ccb memory */
296 while (i) {
297 i--;
298 ccb = &sc->sc_ccb[i];
299 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
300 }
301
302 free(sc->sc_ccb, M_DEVBUF);
303
304 return 1;
305 }
306
307 static uint32_t
308 mfi_read(struct mfi_softc *sc, bus_size_t r)
309 {
310 uint32_t rv;
311
312 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
313 BUS_SPACE_BARRIER_READ);
314 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
315
316 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
317 return rv;
318 }
319
320 static void
321 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
322 {
323 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
324
325 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
326 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
327 BUS_SPACE_BARRIER_WRITE);
328 }
329
330 static struct mfi_mem *
331 mfi_allocmem(struct mfi_softc *sc, size_t size)
332 {
333 struct mfi_mem *mm;
334 int nsegs;
335
336 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
337 (long)size);
338
339 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
340 if (mm == NULL)
341 return NULL;
342
343 mm->am_size = size;
344
345 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
346 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
347 goto amfree;
348
349 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
350 &nsegs, BUS_DMA_NOWAIT) != 0)
351 goto destroy;
352
353 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
354 BUS_DMA_NOWAIT) != 0)
355 goto free;
356
357 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
358 BUS_DMA_NOWAIT) != 0)
359 goto unmap;
360
361 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
362 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
363
364 memset(mm->am_kva, 0, size);
365 return mm;
366
367 unmap:
368 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 free(mm, M_DEVBUF);
375
376 return NULL;
377 }
378
379 static void
380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
381 {
382 struct mfi_mem *mm = *mmp;
383
384 if (mm == NULL)
385 return;
386
387 *mmp = NULL;
388
389 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
390
391 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
392 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
393 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
394 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
395 free(mm, M_DEVBUF);
396 }
397
398 static int
399 mfi_transition_firmware(struct mfi_softc *sc)
400 {
401 uint32_t fw_state, cur_state;
402 int max_wait, i;
403
404 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
405
406 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
407 fw_state);
408
409 while (fw_state != MFI_STATE_READY) {
410 DNPRINTF(MFI_D_MISC,
411 "%s: waiting for firmware to become ready\n",
412 DEVNAME(sc));
413 cur_state = fw_state;
414 switch (fw_state) {
415 case MFI_STATE_FAULT:
416 printf("%s: firmware fault\n", DEVNAME(sc));
417 return 1;
418 case MFI_STATE_WAIT_HANDSHAKE:
419 if (sc->sc_flags & MFI_IOP_SKINNY)
420 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
421 else
422 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
423 max_wait = 2;
424 break;
425 case MFI_STATE_OPERATIONAL:
426 if (sc->sc_flags & MFI_IOP_SKINNY)
427 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
428 else
429 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
430 max_wait = 10;
431 break;
432 case MFI_STATE_UNDEFINED:
433 case MFI_STATE_BB_INIT:
434 max_wait = 2;
435 break;
436 case MFI_STATE_FW_INIT:
437 case MFI_STATE_DEVICE_SCAN:
438 case MFI_STATE_FLUSH_CACHE:
439 max_wait = 20;
440 break;
441 default:
442 printf("%s: unknown firmware state %d\n",
443 DEVNAME(sc), fw_state);
444 return 1;
445 }
446 for (i = 0; i < (max_wait * 10); i++) {
447 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
448 if (fw_state == cur_state)
449 DELAY(100000);
450 else
451 break;
452 }
453 if (fw_state == cur_state) {
454 printf("%s: firmware stuck in state %#x\n",
455 DEVNAME(sc), fw_state);
456 return 1;
457 }
458 }
459
460 return 0;
461 }
462
463 static int
464 mfi_initialize_firmware(struct mfi_softc *sc)
465 {
466 struct mfi_ccb *ccb;
467 struct mfi_init_frame *init;
468 struct mfi_init_qinfo *qinfo;
469
470 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
471
472 if ((ccb = mfi_get_ccb(sc)) == NULL)
473 return 1;
474
475 init = &ccb->ccb_frame->mfr_init;
476 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
477
478 memset(qinfo, 0, sizeof *qinfo);
479 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
480 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
481 offsetof(struct mfi_prod_cons, mpc_reply_q));
482 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
483 offsetof(struct mfi_prod_cons, mpc_producer));
484 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
485 offsetof(struct mfi_prod_cons, mpc_consumer));
486
487 init->mif_header.mfh_cmd = MFI_CMD_INIT;
488 init->mif_header.mfh_data_len = sizeof *qinfo;
489 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
490
491 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
492 DEVNAME(sc),
493 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
494 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
495
496 if (mfi_poll(ccb)) {
497 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
498 return 1;
499 }
500
501 mfi_put_ccb(ccb);
502
503 return 0;
504 }
505
506 static int
507 mfi_get_info(struct mfi_softc *sc)
508 {
509 #ifdef MFI_DEBUG
510 int i;
511 #endif
512 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
513
514 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
515 sizeof(sc->sc_info), &sc->sc_info, NULL))
516 return 1;
517
518 #ifdef MFI_DEBUG
519
520 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
521 printf("%s: active FW %s Version %s date %s time %s\n",
522 DEVNAME(sc),
523 sc->sc_info.mci_image_component[i].mic_name,
524 sc->sc_info.mci_image_component[i].mic_version,
525 sc->sc_info.mci_image_component[i].mic_build_date,
526 sc->sc_info.mci_image_component[i].mic_build_time);
527 }
528
529 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
530 printf("%s: pending FW %s Version %s date %s time %s\n",
531 DEVNAME(sc),
532 sc->sc_info.mci_pending_image_component[i].mic_name,
533 sc->sc_info.mci_pending_image_component[i].mic_version,
534 sc->sc_info.mci_pending_image_component[i].mic_build_date,
535 sc->sc_info.mci_pending_image_component[i].mic_build_time);
536 }
537
538 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
539 DEVNAME(sc),
540 sc->sc_info.mci_max_arms,
541 sc->sc_info.mci_max_spans,
542 sc->sc_info.mci_max_arrays,
543 sc->sc_info.mci_max_lds,
544 sc->sc_info.mci_product_name);
545
546 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
547 DEVNAME(sc),
548 sc->sc_info.mci_serial_number,
549 sc->sc_info.mci_hw_present,
550 sc->sc_info.mci_current_fw_time,
551 sc->sc_info.mci_max_cmds,
552 sc->sc_info.mci_max_sg_elements);
553
554 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
555 DEVNAME(sc),
556 sc->sc_info.mci_max_request_size,
557 sc->sc_info.mci_lds_present,
558 sc->sc_info.mci_lds_degraded,
559 sc->sc_info.mci_lds_offline,
560 sc->sc_info.mci_pd_present);
561
562 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
563 DEVNAME(sc),
564 sc->sc_info.mci_pd_disks_present,
565 sc->sc_info.mci_pd_disks_pred_failure,
566 sc->sc_info.mci_pd_disks_failed);
567
568 printf("%s: nvram %d mem %d flash %d\n",
569 DEVNAME(sc),
570 sc->sc_info.mci_nvram_size,
571 sc->sc_info.mci_memory_size,
572 sc->sc_info.mci_flash_size);
573
574 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
575 DEVNAME(sc),
576 sc->sc_info.mci_ram_correctable_errors,
577 sc->sc_info.mci_ram_uncorrectable_errors,
578 sc->sc_info.mci_cluster_allowed,
579 sc->sc_info.mci_cluster_active);
580
581 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
582 DEVNAME(sc),
583 sc->sc_info.mci_max_strips_per_io,
584 sc->sc_info.mci_raid_levels,
585 sc->sc_info.mci_adapter_ops,
586 sc->sc_info.mci_ld_ops);
587
588 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
589 DEVNAME(sc),
590 sc->sc_info.mci_stripe_sz_ops.min,
591 sc->sc_info.mci_stripe_sz_ops.max,
592 sc->sc_info.mci_pd_ops,
593 sc->sc_info.mci_pd_mix_support);
594
595 printf("%s: ecc_bucket %d pckg_prop %s\n",
596 DEVNAME(sc),
597 sc->sc_info.mci_ecc_bucket_count,
598 sc->sc_info.mci_package_version);
599
600 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
601 DEVNAME(sc),
602 sc->sc_info.mci_properties.mcp_seq_num,
603 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
604 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
605 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
606
607 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
608 DEVNAME(sc),
609 sc->sc_info.mci_properties.mcp_rebuild_rate,
610 sc->sc_info.mci_properties.mcp_patrol_read_rate,
611 sc->sc_info.mci_properties.mcp_bgi_rate,
612 sc->sc_info.mci_properties.mcp_cc_rate);
613
614 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
615 DEVNAME(sc),
616 sc->sc_info.mci_properties.mcp_recon_rate,
617 sc->sc_info.mci_properties.mcp_cache_flush_interval,
618 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
619 sc->sc_info.mci_properties.mcp_spinup_delay,
620 sc->sc_info.mci_properties.mcp_cluster_enable);
621
622 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
623 DEVNAME(sc),
624 sc->sc_info.mci_properties.mcp_coercion_mode,
625 sc->sc_info.mci_properties.mcp_alarm_enable,
626 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
627 sc->sc_info.mci_properties.mcp_disable_battery_warn,
628 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
629
630 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
631 DEVNAME(sc),
632 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
633 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
634 sc->sc_info.mci_properties.mcp_expose_encl_devices);
635
636 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
637 DEVNAME(sc),
638 sc->sc_info.mci_pci.mip_vendor,
639 sc->sc_info.mci_pci.mip_device,
640 sc->sc_info.mci_pci.mip_subvendor,
641 sc->sc_info.mci_pci.mip_subdevice);
642
643 printf("%s: type %#x port_count %d port_addr ",
644 DEVNAME(sc),
645 sc->sc_info.mci_host.mih_type,
646 sc->sc_info.mci_host.mih_port_count);
647
648 for (i = 0; i < 8; i++)
649 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
650 printf("\n");
651
652 printf("%s: type %.x port_count %d port_addr ",
653 DEVNAME(sc),
654 sc->sc_info.mci_device.mid_type,
655 sc->sc_info.mci_device.mid_port_count);
656
657 for (i = 0; i < 8; i++)
658 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
659 printf("\n");
660 #endif /* MFI_DEBUG */
661
662 return 0;
663 }
664
665 static void
666 mfiminphys(struct buf *bp)
667 {
668 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
669
670 /* XXX currently using MFI_MAXFER = MAXPHYS */
671 if (bp->b_bcount > MFI_MAXFER)
672 bp->b_bcount = MFI_MAXFER;
673 minphys(bp);
674 }
675
676 int
677 mfi_rescan(device_t self, const char *ifattr, const int *locators)
678 {
679 struct mfi_softc *sc = device_private(self);
680
681 if (sc->sc_child != NULL)
682 return 0;
683
684 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
685 scsiprint, NULL);
686
687 return 0;
688 }
689
690 void
691 mfi_childdetached(device_t self, device_t child)
692 {
693 struct mfi_softc *sc = device_private(self);
694
695 KASSERT(self == sc->sc_dev);
696 KASSERT(child == sc->sc_child);
697
698 if (child == sc->sc_child)
699 sc->sc_child = NULL;
700 }
701
702 int
703 mfi_detach(struct mfi_softc *sc, int flags)
704 {
705 int error;
706
707 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
708
709 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
710 return error;
711
712 #if NBIO > 0
713 mfi_destroy_sensors(sc);
714 bio_unregister(sc->sc_dev);
715 #endif /* NBIO > 0 */
716
717 mfi_intr_disable(sc);
718
719 /* TBD: shutdown firmware */
720
721 if ((error = mfi_destroy_ccb(sc)) != 0)
722 return error;
723
724 mfi_freemem(sc, &sc->sc_sense);
725
726 mfi_freemem(sc, &sc->sc_frames);
727
728 mfi_freemem(sc, &sc->sc_pcq);
729
730 return 0;
731 }
732
733 int
734 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
735 {
736 struct scsipi_adapter *adapt = &sc->sc_adapt;
737 struct scsipi_channel *chan = &sc->sc_chan;
738 uint32_t status, frames, max_sgl;
739 int i;
740
741 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
742
743 switch (iop) {
744 case MFI_IOP_XSCALE:
745 sc->sc_iop = &mfi_iop_xscale;
746 break;
747 case MFI_IOP_PPC:
748 sc->sc_iop = &mfi_iop_ppc;
749 break;
750 case MFI_IOP_GEN2:
751 sc->sc_iop = &mfi_iop_gen2;
752 break;
753 case MFI_IOP_SKINNY:
754 sc->sc_iop = &mfi_iop_skinny;
755 break;
756 default:
757 panic("%s: unknown iop %d", DEVNAME(sc), iop);
758 }
759
760 if (mfi_transition_firmware(sc))
761 return 1;
762
763 TAILQ_INIT(&sc->sc_ccb_freeq);
764
765 status = mfi_fw_state(sc);
766 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
767 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
768 if (sc->sc_64bit_dma) {
769 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
770 sc->sc_sgl_size = sizeof(struct mfi_sg64);
771 sc->sc_sgl_flags = MFI_FRAME_SGL64;
772 } else {
773 sc->sc_max_sgl = max_sgl;
774 sc->sc_sgl_size = sizeof(struct mfi_sg32);
775 sc->sc_sgl_flags = MFI_FRAME_SGL32;
776 }
777 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
778 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
779
780 /* consumer/producer and reply queue memory */
781 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
782 sizeof(struct mfi_prod_cons));
783 if (sc->sc_pcq == NULL) {
784 aprint_error("%s: unable to allocate reply queue memory\n",
785 DEVNAME(sc));
786 goto nopcq;
787 }
788 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
789 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
790 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
791
792 /* frame memory */
793 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
794 MFI_FRAME_SIZE + 1;
795 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
796 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
797 if (sc->sc_frames == NULL) {
798 aprint_error("%s: unable to allocate frame memory\n",
799 DEVNAME(sc));
800 goto noframe;
801 }
802 /* XXX hack, fix this */
803 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
804 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
805 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
806 goto noframe;
807 }
808
809 /* sense memory */
810 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
811 if (sc->sc_sense == NULL) {
812 aprint_error("%s: unable to allocate sense memory\n",
813 DEVNAME(sc));
814 goto nosense;
815 }
816
817 /* now that we have all memory bits go initialize ccbs */
818 if (mfi_init_ccb(sc)) {
819 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
820 goto noinit;
821 }
822
823 /* kickstart firmware with all addresses and pointers */
824 if (mfi_initialize_firmware(sc)) {
825 aprint_error("%s: could not initialize firmware\n",
826 DEVNAME(sc));
827 goto noinit;
828 }
829
830 if (mfi_get_info(sc)) {
831 aprint_error("%s: could not retrieve controller information\n",
832 DEVNAME(sc));
833 goto noinit;
834 }
835
836 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
837 DEVNAME(sc),
838 sc->sc_info.mci_lds_present,
839 sc->sc_info.mci_package_version,
840 sc->sc_info.mci_memory_size);
841
842 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
843 sc->sc_max_ld = sc->sc_ld_cnt;
844 for (i = 0; i < sc->sc_ld_cnt; i++)
845 sc->sc_ld[i].ld_present = 1;
846
847 memset(adapt, 0, sizeof(*adapt));
848 adapt->adapt_dev = sc->sc_dev;
849 adapt->adapt_nchannels = 1;
850 if (sc->sc_ld_cnt)
851 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
852 else
853 adapt->adapt_openings = sc->sc_max_cmds;
854 adapt->adapt_max_periph = adapt->adapt_openings;
855 adapt->adapt_request = mfi_scsipi_request;
856 adapt->adapt_minphys = mfiminphys;
857
858 memset(chan, 0, sizeof(*chan));
859 chan->chan_adapter = adapt;
860 chan->chan_bustype = &scsi_bustype;
861 chan->chan_channel = 0;
862 chan->chan_flags = 0;
863 chan->chan_nluns = 8;
864 chan->chan_ntargets = MFI_MAX_LD;
865 chan->chan_id = MFI_MAX_LD;
866
867 mfi_rescan(sc->sc_dev, "scsi", NULL);
868
869 /* enable interrupts */
870 mfi_intr_enable(sc);
871
872 #if NBIO > 0
873 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
874 panic("%s: controller registration failed", DEVNAME(sc));
875 if (mfi_create_sensors(sc) != 0)
876 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
877 #endif /* NBIO > 0 */
878
879 return 0;
880 noinit:
881 mfi_freemem(sc, &sc->sc_sense);
882 nosense:
883 mfi_freemem(sc, &sc->sc_frames);
884 noframe:
885 mfi_freemem(sc, &sc->sc_pcq);
886 nopcq:
887 return 1;
888 }
889
890 static int
891 mfi_poll(struct mfi_ccb *ccb)
892 {
893 struct mfi_softc *sc = ccb->ccb_sc;
894 struct mfi_frame_header *hdr;
895 int to = 0;
896
897 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
898
899 hdr = &ccb->ccb_frame->mfr_header;
900 hdr->mfh_cmd_status = 0xff;
901 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
902
903 mfi_post(sc, ccb);
904 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
905 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
906 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
907
908 while (hdr->mfh_cmd_status == 0xff) {
909 delay(1000);
910 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
911 break;
912 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
913 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
914 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
915 }
916 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
917 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
918 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
919
920 if (ccb->ccb_data != NULL) {
921 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
922 DEVNAME(sc));
923 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
924 ccb->ccb_dmamap->dm_mapsize,
925 (ccb->ccb_direction & MFI_DATA_IN) ?
926 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
927
928 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
929 }
930
931 if (hdr->mfh_cmd_status == 0xff) {
932 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
933 hdr->mfh_context);
934 ccb->ccb_flags |= MFI_CCB_F_ERR;
935 return 1;
936 }
937
938 return 0;
939 }
940
941 int
942 mfi_intr(void *arg)
943 {
944 struct mfi_softc *sc = arg;
945 struct mfi_prod_cons *pcq;
946 struct mfi_ccb *ccb;
947 uint32_t producer, consumer, ctx;
948 int claimed = 0;
949
950 if (!mfi_my_intr(sc))
951 return 0;
952
953 pcq = MFIMEM_KVA(sc->sc_pcq);
954
955 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
956 (u_long)sc, (u_long)pcq);
957
958 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
959 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
960 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
961
962 producer = pcq->mpc_producer;
963 consumer = pcq->mpc_consumer;
964
965 while (consumer != producer) {
966 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
967 DEVNAME(sc), producer, consumer);
968
969 ctx = pcq->mpc_reply_q[consumer];
970 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
971 if (ctx == MFI_INVALID_CTX)
972 printf("%s: invalid context, p: %d c: %d\n",
973 DEVNAME(sc), producer, consumer);
974 else {
975 /* XXX remove from queue and call scsi_done */
976 ccb = &sc->sc_ccb[ctx];
977 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
978 DEVNAME(sc), ctx);
979 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
980 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
981 sc->sc_frames_size,
982 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
983 ccb->ccb_done(ccb);
984
985 claimed = 1;
986 }
987 consumer++;
988 if (consumer == (sc->sc_max_cmds + 1))
989 consumer = 0;
990 }
991
992 pcq->mpc_consumer = consumer;
993 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
994 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
995 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996
997 return claimed;
998 }
999
1000 static int
1001 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
1002 uint32_t blockcnt)
1003 {
1004 struct scsipi_periph *periph = xs->xs_periph;
1005 struct mfi_io_frame *io;
1006
1007 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1008 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1009 periph->periph_target);
1010
1011 if (!xs->data)
1012 return 1;
1013
1014 io = &ccb->ccb_frame->mfr_io;
1015 if (xs->xs_control & XS_CTL_DATA_IN) {
1016 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1017 ccb->ccb_direction = MFI_DATA_IN;
1018 } else {
1019 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1020 ccb->ccb_direction = MFI_DATA_OUT;
1021 }
1022 io->mif_header.mfh_target_id = periph->periph_target;
1023 io->mif_header.mfh_timeout = 0;
1024 io->mif_header.mfh_flags = 0;
1025 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1026 io->mif_header.mfh_data_len= blockcnt;
1027 io->mif_lba_hi = 0;
1028 io->mif_lba_lo = blockno;
1029 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1030 io->mif_sense_addr_hi = 0;
1031
1032 ccb->ccb_done = mfi_scsi_xs_done;
1033 ccb->ccb_xs = xs;
1034 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1035 ccb->ccb_sgl = &io->mif_sgl;
1036 ccb->ccb_data = xs->data;
1037 ccb->ccb_len = xs->datalen;
1038
1039 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1040 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1041 return 1;
1042
1043 return 0;
1044 }
1045
1046 static void
1047 mfi_scsi_xs_done(struct mfi_ccb *ccb)
1048 {
1049 struct scsipi_xfer *xs = ccb->ccb_xs;
1050 struct mfi_softc *sc = ccb->ccb_sc;
1051 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1052
1053 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1054 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1055
1056 if (xs->data != NULL) {
1057 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1058 DEVNAME(sc));
1059 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1060 ccb->ccb_dmamap->dm_mapsize,
1061 (xs->xs_control & XS_CTL_DATA_IN) ?
1062 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1063
1064 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1065 }
1066
1067 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1068 xs->error = XS_DRIVER_STUFFUP;
1069 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1070 DEVNAME(sc), hdr->mfh_cmd_status);
1071
1072 if (hdr->mfh_scsi_status != 0) {
1073 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1074 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1075 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1076 DNPRINTF(MFI_D_INTR,
1077 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1078 DEVNAME(sc), hdr->mfh_scsi_status,
1079 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1080 memset(&xs->sense, 0, sizeof(xs->sense));
1081 memcpy(&xs->sense, ccb->ccb_sense,
1082 sizeof(struct scsi_sense_data));
1083 xs->error = XS_SENSE;
1084 }
1085 } else {
1086 xs->error = XS_NOERROR;
1087 xs->status = SCSI_OK;
1088 xs->resid = 0;
1089 }
1090
1091 mfi_put_ccb(ccb);
1092 scsipi_done(xs);
1093 }
1094
1095 static int
1096 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1097 {
1098 struct mfi_pass_frame *pf;
1099 struct scsipi_periph *periph = xs->xs_periph;
1100
1101 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1102 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1103 periph->periph_target);
1104
1105 pf = &ccb->ccb_frame->mfr_pass;
1106 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1107 pf->mpf_header.mfh_target_id = periph->periph_target;
1108 pf->mpf_header.mfh_lun_id = 0;
1109 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1110 pf->mpf_header.mfh_timeout = 0;
1111 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1112 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1113
1114 pf->mpf_sense_addr_hi = 0;
1115 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1116
1117 memset(pf->mpf_cdb, 0, 16);
1118 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1119
1120 ccb->ccb_done = mfi_scsi_xs_done;
1121 ccb->ccb_xs = xs;
1122 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1123 ccb->ccb_sgl = &pf->mpf_sgl;
1124
1125 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1126 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1127 MFI_DATA_IN : MFI_DATA_OUT;
1128 else
1129 ccb->ccb_direction = MFI_DATA_NONE;
1130
1131 if (xs->data) {
1132 ccb->ccb_data = xs->data;
1133 ccb->ccb_len = xs->datalen;
1134
1135 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1136 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1137 return 1;
1138 }
1139
1140 return 0;
1141 }
1142
1143 static void
1144 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1145 void *arg)
1146 {
1147 struct scsipi_periph *periph;
1148 struct scsipi_xfer *xs;
1149 struct scsipi_adapter *adapt = chan->chan_adapter;
1150 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1151 struct mfi_ccb *ccb;
1152 struct scsi_rw_6 *rw;
1153 struct scsipi_rw_10 *rwb;
1154 uint32_t blockno, blockcnt;
1155 uint8_t target;
1156 uint8_t mbox[MFI_MBOX_SIZE];
1157 int s;
1158
1159 switch (req) {
1160 case ADAPTER_REQ_GROW_RESOURCES:
1161 /* Not supported. */
1162 return;
1163 case ADAPTER_REQ_SET_XFER_MODE:
1164 /* Not supported. */
1165 return;
1166 case ADAPTER_REQ_RUN_XFER:
1167 break;
1168 }
1169
1170 xs = arg;
1171
1172 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1173 DEVNAME(sc), req, xs->cmd->opcode);
1174
1175 periph = xs->xs_periph;
1176 target = periph->periph_target;
1177
1178 s = splbio();
1179 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1180 periph->periph_lun != 0) {
1181 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1182 DEVNAME(sc), target);
1183 xs->error = XS_SELTIMEOUT;
1184 scsipi_done(xs);
1185 splx(s);
1186 return;
1187 }
1188
1189 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1190 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1191 xs->error = XS_RESOURCE_SHORTAGE;
1192 scsipi_done(xs);
1193 splx(s);
1194 return;
1195 }
1196
1197 switch (xs->cmd->opcode) {
1198 /* IO path */
1199 case READ_10:
1200 case WRITE_10:
1201 rwb = (struct scsipi_rw_10 *)xs->cmd;
1202 blockno = _4btol(rwb->addr);
1203 blockcnt = _2btol(rwb->length);
1204 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1205 mfi_put_ccb(ccb);
1206 goto stuffup;
1207 }
1208 break;
1209
1210 case SCSI_READ_6_COMMAND:
1211 case SCSI_WRITE_6_COMMAND:
1212 rw = (struct scsi_rw_6 *)xs->cmd;
1213 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1214 blockcnt = rw->length ? rw->length : 0x100;
1215 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1216 mfi_put_ccb(ccb);
1217 goto stuffup;
1218 }
1219 break;
1220
1221 case SCSI_SYNCHRONIZE_CACHE_10:
1222 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1223 if (mfi_mgmt(ccb, xs,
1224 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1225 mfi_put_ccb(ccb);
1226 goto stuffup;
1227 }
1228 break;
1229
1230 /* hand it of to the firmware and let it deal with it */
1231 case SCSI_TEST_UNIT_READY:
1232 /* save off sd? after autoconf */
1233 if (!cold) /* XXX bogus */
1234 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1235 sizeof(sc->sc_ld[target].ld_dev));
1236 /* FALLTHROUGH */
1237
1238 default:
1239 if (mfi_scsi_ld(ccb, xs)) {
1240 mfi_put_ccb(ccb);
1241 goto stuffup;
1242 }
1243 break;
1244 }
1245
1246 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1247
1248 if (xs->xs_control & XS_CTL_POLL) {
1249 if (mfi_poll(ccb)) {
1250 /* XXX check for sense in ccb->ccb_sense? */
1251 printf("%s: mfi_scsipi_request poll failed\n",
1252 DEVNAME(sc));
1253 memset(&xs->sense, 0, sizeof(xs->sense));
1254 xs->sense.scsi_sense.response_code =
1255 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1256 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1257 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1258 xs->error = XS_SENSE;
1259 xs->status = SCSI_CHECK;
1260 } else {
1261 DNPRINTF(MFI_D_DMA,
1262 "%s: mfi_scsipi_request poll complete %d\n",
1263 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1264 xs->error = XS_NOERROR;
1265 xs->status = SCSI_OK;
1266 xs->resid = 0;
1267 }
1268 mfi_put_ccb(ccb);
1269 scsipi_done(xs);
1270 splx(s);
1271 return;
1272 }
1273
1274 mfi_post(sc, ccb);
1275
1276 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1277 ccb->ccb_dmamap->dm_nsegs);
1278
1279 splx(s);
1280 return;
1281
1282 stuffup:
1283 xs->error = XS_DRIVER_STUFFUP;
1284 scsipi_done(xs);
1285 splx(s);
1286 }
1287
1288 static int
1289 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1290 {
1291 struct mfi_softc *sc = ccb->ccb_sc;
1292 struct mfi_frame_header *hdr;
1293 bus_dma_segment_t *sgd;
1294 union mfi_sgl *sgl;
1295 int error, i;
1296
1297 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1298 (u_long)ccb->ccb_data);
1299
1300 if (!ccb->ccb_data)
1301 return 1;
1302
1303 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1304 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1305 if (error) {
1306 if (error == EFBIG)
1307 printf("more than %d dma segs\n",
1308 sc->sc_max_sgl);
1309 else
1310 printf("error %d loading dma map\n", error);
1311 return 1;
1312 }
1313
1314 hdr = &ccb->ccb_frame->mfr_header;
1315 sgl = ccb->ccb_sgl;
1316 sgd = ccb->ccb_dmamap->dm_segs;
1317 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1318 if (sc->sc_64bit_dma) {
1319 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1320 sgl->sg64[i].len = htole64(sgd[i].ds_len);
1321 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1322 PRIx64 "\n",
1323 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1324 } else {
1325 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1326 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1327 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1328 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1329 }
1330 }
1331
1332 if (ccb->ccb_direction == MFI_DATA_IN) {
1333 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1334 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1335 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1336 } else {
1337 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1338 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1339 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1340 }
1341
1342 hdr->mfh_flags |= sc->sc_sgl_flags;
1343 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1344 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1345 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1346
1347 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1348 " dm_nsegs: %d extra_frames: %d\n",
1349 DEVNAME(sc),
1350 hdr->mfh_sg_count,
1351 ccb->ccb_frame_size,
1352 sc->sc_frames_size,
1353 ccb->ccb_dmamap->dm_nsegs,
1354 ccb->ccb_extra_frames);
1355
1356 return 0;
1357 }
1358
1359 static int
1360 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1361 uint32_t len, void *buf, uint8_t *mbox)
1362 {
1363 struct mfi_ccb *ccb;
1364 int rv = 1;
1365
1366 if ((ccb = mfi_get_ccb(sc)) == NULL)
1367 return rv;
1368 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1369 if (rv)
1370 return rv;
1371
1372 if (cold) {
1373 if (mfi_poll(ccb))
1374 goto done;
1375 } else {
1376 mfi_post(sc, ccb);
1377
1378 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1379 DEVNAME(sc));
1380 while (ccb->ccb_state != MFI_CCB_DONE)
1381 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1382
1383 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1384 goto done;
1385 }
1386 rv = 0;
1387
1388 done:
1389 mfi_put_ccb(ccb);
1390 return rv;
1391 }
1392
1393 static int
1394 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1395 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1396 {
1397 struct mfi_dcmd_frame *dcmd;
1398
1399 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1400
1401 dcmd = &ccb->ccb_frame->mfr_dcmd;
1402 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1403 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1404 dcmd->mdf_header.mfh_timeout = 0;
1405
1406 dcmd->mdf_opcode = opc;
1407 dcmd->mdf_header.mfh_data_len = 0;
1408 ccb->ccb_direction = dir;
1409 ccb->ccb_xs = xs;
1410 ccb->ccb_done = mfi_mgmt_done;
1411
1412 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1413
1414 /* handle special opcodes */
1415 if (mbox)
1416 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1417
1418 if (dir != MFI_DATA_NONE) {
1419 dcmd->mdf_header.mfh_data_len = len;
1420 ccb->ccb_data = buf;
1421 ccb->ccb_len = len;
1422 ccb->ccb_sgl = &dcmd->mdf_sgl;
1423
1424 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1425 return 1;
1426 }
1427 return 0;
1428 }
1429
1430 static void
1431 mfi_mgmt_done(struct mfi_ccb *ccb)
1432 {
1433 struct scsipi_xfer *xs = ccb->ccb_xs;
1434 struct mfi_softc *sc = ccb->ccb_sc;
1435 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1436
1437 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1438 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1439
1440 if (ccb->ccb_data != NULL) {
1441 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1442 DEVNAME(sc));
1443 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1444 ccb->ccb_dmamap->dm_mapsize,
1445 (ccb->ccb_direction & MFI_DATA_IN) ?
1446 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1447
1448 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1449 }
1450
1451 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1452 ccb->ccb_flags |= MFI_CCB_F_ERR;
1453
1454 ccb->ccb_state = MFI_CCB_DONE;
1455 if (xs) {
1456 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1457 xs->error = XS_DRIVER_STUFFUP;
1458 } else {
1459 xs->error = XS_NOERROR;
1460 xs->status = SCSI_OK;
1461 xs->resid = 0;
1462 }
1463 mfi_put_ccb(ccb);
1464 scsipi_done(xs);
1465 } else
1466 wakeup(ccb);
1467 }
1468
1469 #if NBIO > 0
1470 int
1471 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1472 {
1473 struct mfi_softc *sc = device_private(dev);
1474 int error = 0;
1475 int s;
1476
1477 KERNEL_LOCK(1, curlwp);
1478 s = splbio();
1479
1480 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1481
1482 switch (cmd) {
1483 case BIOCINQ:
1484 DNPRINTF(MFI_D_IOCTL, "inq\n");
1485 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1486 break;
1487
1488 case BIOCVOL:
1489 DNPRINTF(MFI_D_IOCTL, "vol\n");
1490 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1491 break;
1492
1493 case BIOCDISK:
1494 DNPRINTF(MFI_D_IOCTL, "disk\n");
1495 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1496 break;
1497
1498 case BIOCALARM:
1499 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1500 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1501 break;
1502
1503 case BIOCBLINK:
1504 DNPRINTF(MFI_D_IOCTL, "blink\n");
1505 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1506 break;
1507
1508 case BIOCSETSTATE:
1509 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1510 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1511 break;
1512
1513 default:
1514 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1515 error = EINVAL;
1516 }
1517 splx(s);
1518 KERNEL_UNLOCK_ONE(curlwp);
1519
1520 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1521 return error;
1522 }
1523
1524 static int
1525 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1526 {
1527 struct mfi_conf *cfg;
1528 int rv = EINVAL;
1529
1530 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1531
1532 if (mfi_get_info(sc)) {
1533 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1534 DEVNAME(sc));
1535 return EIO;
1536 }
1537
1538 /* get figures */
1539 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1540 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1541 sizeof *cfg, cfg, NULL))
1542 goto freeme;
1543
1544 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1545 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1546 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1547
1548 rv = 0;
1549 freeme:
1550 free(cfg, M_DEVBUF);
1551 return rv;
1552 }
1553
1554 static int
1555 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1556 {
1557 int i, per, rv = EINVAL;
1558 uint8_t mbox[MFI_MBOX_SIZE];
1559
1560 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1561 DEVNAME(sc), bv->bv_volid);
1562
1563 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1564 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1565 goto done;
1566
1567 i = bv->bv_volid;
1568 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1569 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1570 DEVNAME(sc), mbox[0]);
1571
1572 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1573 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1574 goto done;
1575
1576 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1577 /* go do hotspares */
1578 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1579 goto done;
1580 }
1581
1582 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1583
1584 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1585 case MFI_LD_OFFLINE:
1586 bv->bv_status = BIOC_SVOFFLINE;
1587 break;
1588
1589 case MFI_LD_PART_DEGRADED:
1590 case MFI_LD_DEGRADED:
1591 bv->bv_status = BIOC_SVDEGRADED;
1592 break;
1593
1594 case MFI_LD_ONLINE:
1595 bv->bv_status = BIOC_SVONLINE;
1596 break;
1597
1598 default:
1599 bv->bv_status = BIOC_SVINVALID;
1600 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1601 DEVNAME(sc),
1602 sc->sc_ld_list.mll_list[i].mll_state);
1603 }
1604
1605 /* additional status can modify MFI status */
1606 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1607 case MFI_LD_PROG_CC:
1608 case MFI_LD_PROG_BGI:
1609 bv->bv_status = BIOC_SVSCRUB;
1610 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1611 bv->bv_percent = (per * 100) / 0xffff;
1612 bv->bv_seconds =
1613 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1614 break;
1615
1616 case MFI_LD_PROG_FGI:
1617 case MFI_LD_PROG_RECONSTRUCT:
1618 /* nothing yet */
1619 break;
1620 }
1621
1622 /*
1623 * The RAID levels are determined per the SNIA DDF spec, this is only
1624 * a subset that is valid for the MFI contrller.
1625 */
1626 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1627 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1628 MFI_DDF_SRL_SPANNED)
1629 bv->bv_level *= 10;
1630
1631 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1632 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1633
1634 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1635
1636 rv = 0;
1637 done:
1638 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1639 DEVNAME(sc), rv);
1640 return rv;
1641 }
1642
1643 static int
1644 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1645 {
1646 struct mfi_conf *cfg;
1647 struct mfi_array *ar;
1648 struct mfi_ld_cfg *ld;
1649 struct mfi_pd_details *pd;
1650 struct scsipi_inquiry_data *inqbuf;
1651 char vend[8+16+4+1];
1652 int i, rv = EINVAL;
1653 int arr, vol, disk;
1654 uint32_t size;
1655 uint8_t mbox[MFI_MBOX_SIZE];
1656
1657 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1658 DEVNAME(sc), bd->bd_diskid);
1659
1660 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1661
1662 /* send single element command to retrieve size for full structure */
1663 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1664 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1665 sizeof *cfg, cfg, NULL))
1666 goto freeme;
1667
1668 size = cfg->mfc_size;
1669 free(cfg, M_DEVBUF);
1670
1671 /* memory for read config */
1672 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1673 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1674 size, cfg, NULL))
1675 goto freeme;
1676
1677 ar = cfg->mfc_array;
1678
1679 /* calculate offset to ld structure */
1680 ld = (struct mfi_ld_cfg *)(
1681 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1682 cfg->mfc_array_size * cfg->mfc_no_array);
1683
1684 vol = bd->bd_volid;
1685
1686 if (vol >= cfg->mfc_no_ld) {
1687 /* do hotspares */
1688 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1689 goto freeme;
1690 }
1691
1692 /* find corresponding array for ld */
1693 for (i = 0, arr = 0; i < vol; i++)
1694 arr += ld[i].mlc_parm.mpa_span_depth;
1695
1696 /* offset disk into pd list */
1697 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1698
1699 /* offset array index into the next spans */
1700 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1701
1702 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1703 switch (ar[arr].pd[disk].mar_pd_state){
1704 case MFI_PD_UNCONFIG_GOOD:
1705 bd->bd_status = BIOC_SDUNUSED;
1706 break;
1707
1708 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1709 bd->bd_status = BIOC_SDHOTSPARE;
1710 break;
1711
1712 case MFI_PD_OFFLINE:
1713 bd->bd_status = BIOC_SDOFFLINE;
1714 break;
1715
1716 case MFI_PD_FAILED:
1717 bd->bd_status = BIOC_SDFAILED;
1718 break;
1719
1720 case MFI_PD_REBUILD:
1721 bd->bd_status = BIOC_SDREBUILD;
1722 break;
1723
1724 case MFI_PD_ONLINE:
1725 bd->bd_status = BIOC_SDONLINE;
1726 break;
1727
1728 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1729 default:
1730 bd->bd_status = BIOC_SDINVALID;
1731 break;
1732
1733 }
1734
1735 /* get the remaining fields */
1736 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1737 memset(pd, 0, sizeof(*pd));
1738 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1739 sizeof *pd, pd, mbox))
1740 goto freeme;
1741
1742 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1743
1744 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1745 bd->bd_channel = pd->mpd_enc_idx;
1746
1747 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1748 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1749 vend[sizeof vend - 1] = '\0';
1750 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1751
1752 /* XXX find a way to retrieve serial nr from drive */
1753 /* XXX find a way to get bd_procdev */
1754
1755 rv = 0;
1756 freeme:
1757 free(pd, M_DEVBUF);
1758 free(cfg, M_DEVBUF);
1759
1760 return rv;
1761 }
1762
1763 static int
1764 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1765 {
1766 uint32_t opc, dir = MFI_DATA_NONE;
1767 int rv = 0;
1768 int8_t ret;
1769
1770 switch(ba->ba_opcode) {
1771 case BIOC_SADISABLE:
1772 opc = MR_DCMD_SPEAKER_DISABLE;
1773 break;
1774
1775 case BIOC_SAENABLE:
1776 opc = MR_DCMD_SPEAKER_ENABLE;
1777 break;
1778
1779 case BIOC_SASILENCE:
1780 opc = MR_DCMD_SPEAKER_SILENCE;
1781 break;
1782
1783 case BIOC_GASTATUS:
1784 opc = MR_DCMD_SPEAKER_GET;
1785 dir = MFI_DATA_IN;
1786 break;
1787
1788 case BIOC_SATEST:
1789 opc = MR_DCMD_SPEAKER_TEST;
1790 break;
1791
1792 default:
1793 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1794 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1795 return EINVAL;
1796 }
1797
1798 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1799 rv = EINVAL;
1800 else
1801 if (ba->ba_opcode == BIOC_GASTATUS)
1802 ba->ba_status = ret;
1803 else
1804 ba->ba_status = 0;
1805
1806 return rv;
1807 }
1808
1809 static int
1810 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1811 {
1812 int i, found, rv = EINVAL;
1813 uint8_t mbox[MFI_MBOX_SIZE];
1814 uint32_t cmd;
1815 struct mfi_pd_list *pd;
1816
1817 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1818 bb->bb_status);
1819
1820 /* channel 0 means not in an enclosure so can't be blinked */
1821 if (bb->bb_channel == 0)
1822 return EINVAL;
1823
1824 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1825
1826 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1827 MFI_PD_LIST_SIZE, pd, NULL))
1828 goto done;
1829
1830 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1831 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1832 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1833 found = 1;
1834 break;
1835 }
1836
1837 if (!found)
1838 goto done;
1839
1840 memset(mbox, 0, sizeof mbox);
1841
1842 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1843
1844 switch (bb->bb_status) {
1845 case BIOC_SBUNBLINK:
1846 cmd = MR_DCMD_PD_UNBLINK;
1847 break;
1848
1849 case BIOC_SBBLINK:
1850 cmd = MR_DCMD_PD_BLINK;
1851 break;
1852
1853 case BIOC_SBALARM:
1854 default:
1855 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1856 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1857 goto done;
1858 }
1859
1860
1861 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1862 goto done;
1863
1864 rv = 0;
1865 done:
1866 free(pd, M_DEVBUF);
1867 return rv;
1868 }
1869
1870 static int
1871 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1872 {
1873 struct mfi_pd_list *pd;
1874 int i, found, rv = EINVAL;
1875 uint8_t mbox[MFI_MBOX_SIZE];
1876 uint32_t cmd;
1877
1878 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1879 bs->bs_status);
1880
1881 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1882
1883 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1884 MFI_PD_LIST_SIZE, pd, NULL))
1885 goto done;
1886
1887 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1888 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1889 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1890 found = 1;
1891 break;
1892 }
1893
1894 if (!found)
1895 goto done;
1896
1897 memset(mbox, 0, sizeof mbox);
1898
1899 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1900
1901 switch (bs->bs_status) {
1902 case BIOC_SSONLINE:
1903 mbox[2] = MFI_PD_ONLINE;
1904 cmd = MD_DCMD_PD_SET_STATE;
1905 break;
1906
1907 case BIOC_SSOFFLINE:
1908 mbox[2] = MFI_PD_OFFLINE;
1909 cmd = MD_DCMD_PD_SET_STATE;
1910 break;
1911
1912 case BIOC_SSHOTSPARE:
1913 mbox[2] = MFI_PD_HOTSPARE;
1914 cmd = MD_DCMD_PD_SET_STATE;
1915 break;
1916 /*
1917 case BIOC_SSREBUILD:
1918 cmd = MD_DCMD_PD_REBUILD;
1919 break;
1920 */
1921 default:
1922 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1923 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1924 goto done;
1925 }
1926
1927
1928 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1929 0, NULL, mbox))
1930 goto done;
1931
1932 rv = 0;
1933 done:
1934 free(pd, M_DEVBUF);
1935 return rv;
1936 }
1937
1938 static int
1939 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1940 {
1941 struct mfi_conf *cfg;
1942 struct mfi_hotspare *hs;
1943 struct mfi_pd_details *pd;
1944 struct bioc_disk *sdhs;
1945 struct bioc_vol *vdhs;
1946 struct scsipi_inquiry_data *inqbuf;
1947 char vend[8+16+4+1];
1948 int i, rv = EINVAL;
1949 uint32_t size;
1950 uint8_t mbox[MFI_MBOX_SIZE];
1951
1952 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1953
1954 if (!bio_hs)
1955 return EINVAL;
1956
1957 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1958
1959 /* send single element command to retrieve size for full structure */
1960 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1961 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1962 sizeof *cfg, cfg, NULL))
1963 goto freeme;
1964
1965 size = cfg->mfc_size;
1966 free(cfg, M_DEVBUF);
1967
1968 /* memory for read config */
1969 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1970 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1971 size, cfg, NULL))
1972 goto freeme;
1973
1974 /* calculate offset to hs structure */
1975 hs = (struct mfi_hotspare *)(
1976 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1977 cfg->mfc_array_size * cfg->mfc_no_array +
1978 cfg->mfc_ld_size * cfg->mfc_no_ld);
1979
1980 if (volid < cfg->mfc_no_ld)
1981 goto freeme; /* not a hotspare */
1982
1983 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1984 goto freeme; /* not a hotspare */
1985
1986 /* offset into hotspare structure */
1987 i = volid - cfg->mfc_no_ld;
1988
1989 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1990 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1991 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1992
1993 /* get pd fields */
1994 memset(mbox, 0, sizeof mbox);
1995 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1996 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1997 sizeof *pd, pd, mbox)) {
1998 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1999 DEVNAME(sc));
2000 goto freeme;
2001 }
2002
2003 switch (type) {
2004 case MFI_MGMT_VD:
2005 vdhs = bio_hs;
2006 vdhs->bv_status = BIOC_SVONLINE;
2007 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2008 vdhs->bv_level = -1; /* hotspare */
2009 vdhs->bv_nodisk = 1;
2010 break;
2011
2012 case MFI_MGMT_SD:
2013 sdhs = bio_hs;
2014 sdhs->bd_status = BIOC_SDHOTSPARE;
2015 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2016 sdhs->bd_channel = pd->mpd_enc_idx;
2017 sdhs->bd_target = pd->mpd_enc_slot;
2018 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2019 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2020 vend[sizeof vend - 1] = '\0';
2021 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2022 break;
2023
2024 default:
2025 goto freeme;
2026 }
2027
2028 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2029 rv = 0;
2030 freeme:
2031 free(pd, M_DEVBUF);
2032 free(cfg, M_DEVBUF);
2033
2034 return rv;
2035 }
2036
2037 static int
2038 mfi_destroy_sensors(struct mfi_softc *sc)
2039 {
2040 if (sc->sc_sme == NULL)
2041 return 0;
2042 sysmon_envsys_unregister(sc->sc_sme);
2043 sc->sc_sme = NULL;
2044 free(sc->sc_sensor, M_DEVBUF);
2045 return 0;
2046 }
2047
2048 static int
2049 mfi_create_sensors(struct mfi_softc *sc)
2050 {
2051 int i;
2052 int nsensors = sc->sc_ld_cnt;
2053 int rv;
2054
2055 sc->sc_sme = sysmon_envsys_create();
2056 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2057 M_DEVBUF, M_NOWAIT | M_ZERO);
2058 if (sc->sc_sensor == NULL) {
2059 aprint_error("%s: can't allocate envsys_data_t\n",
2060 DEVNAME(sc));
2061 return ENOMEM;
2062 }
2063
2064 for (i = 0; i < nsensors; i++) {
2065 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2066 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2067 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2068 /* Enable monitoring for drive state changes */
2069 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2070 /* logical drives */
2071 snprintf(sc->sc_sensor[i].desc,
2072 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2073 DEVNAME(sc), i);
2074 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2075 &sc->sc_sensor[i]))
2076 goto out;
2077 }
2078
2079 sc->sc_sme->sme_name = DEVNAME(sc);
2080 sc->sc_sme->sme_cookie = sc;
2081 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2082 rv = sysmon_envsys_register(sc->sc_sme);
2083 if (rv != 0) {
2084 aprint_error("%s: unable to register with sysmon (rv = %d)\n",
2085 DEVNAME(sc), rv);
2086 goto out;
2087 }
2088 return 0;
2089
2090 out:
2091 free(sc->sc_sensor, M_DEVBUF);
2092 sysmon_envsys_destroy(sc->sc_sme);
2093 sc->sc_sme = NULL;
2094 return EINVAL;
2095 }
2096
2097 static void
2098 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2099 {
2100 struct mfi_softc *sc = sme->sme_cookie;
2101 struct bioc_vol bv;
2102 int s;
2103 int error;
2104
2105 if (edata->sensor >= sc->sc_ld_cnt)
2106 return;
2107
2108 memset(&bv, 0, sizeof(bv));
2109 bv.bv_volid = edata->sensor;
2110 KERNEL_LOCK(1, curlwp);
2111 s = splbio();
2112 error = mfi_ioctl_vol(sc, &bv);
2113 splx(s);
2114 KERNEL_UNLOCK_ONE(curlwp);
2115 if (error)
2116 return;
2117
2118 switch(bv.bv_status) {
2119 case BIOC_SVOFFLINE:
2120 edata->value_cur = ENVSYS_DRIVE_FAIL;
2121 edata->state = ENVSYS_SCRITICAL;
2122 break;
2123
2124 case BIOC_SVDEGRADED:
2125 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2126 edata->state = ENVSYS_SCRITICAL;
2127 break;
2128
2129 case BIOC_SVSCRUB:
2130 case BIOC_SVONLINE:
2131 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2132 edata->state = ENVSYS_SVALID;
2133 break;
2134
2135 case BIOC_SVINVALID:
2136 /* FALLTRHOUGH */
2137 default:
2138 edata->value_cur = 0; /* unknown */
2139 edata->state = ENVSYS_SINVALID;
2140 }
2141 }
2142
2143 #endif /* NBIO > 0 */
2144
2145 static uint32_t
2146 mfi_xscale_fw_state(struct mfi_softc *sc)
2147 {
2148 return mfi_read(sc, MFI_OMSG0);
2149 }
2150
2151 static void
2152 mfi_xscale_intr_dis(struct mfi_softc *sc)
2153 {
2154 mfi_write(sc, MFI_OMSK, 0);
2155 }
2156
2157 static void
2158 mfi_xscale_intr_ena(struct mfi_softc *sc)
2159 {
2160 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2161 }
2162
2163 static int
2164 mfi_xscale_intr(struct mfi_softc *sc)
2165 {
2166 uint32_t status;
2167
2168 status = mfi_read(sc, MFI_OSTS);
2169 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2170 return 0;
2171
2172 /* write status back to acknowledge interrupt */
2173 mfi_write(sc, MFI_OSTS, status);
2174 return 1;
2175 }
2176
2177 static void
2178 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2179 {
2180 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2181 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2182 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2183 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2184 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2185 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2186
2187 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2188 ccb->ccb_extra_frames);
2189 }
2190
2191 static uint32_t
2192 mfi_ppc_fw_state(struct mfi_softc *sc)
2193 {
2194 return mfi_read(sc, MFI_OSP);
2195 }
2196
2197 static void
2198 mfi_ppc_intr_dis(struct mfi_softc *sc)
2199 {
2200 /* Taking a wild guess --dyoung */
2201 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2202 mfi_write(sc, MFI_ODC, 0xffffffff);
2203 }
2204
2205 static void
2206 mfi_ppc_intr_ena(struct mfi_softc *sc)
2207 {
2208 mfi_write(sc, MFI_ODC, 0xffffffff);
2209 mfi_write(sc, MFI_OMSK, ~0x80000004);
2210 }
2211
2212 static int
2213 mfi_ppc_intr(struct mfi_softc *sc)
2214 {
2215 uint32_t status;
2216
2217 status = mfi_read(sc, MFI_OSTS);
2218 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2219 return 0;
2220
2221 /* write status back to acknowledge interrupt */
2222 mfi_write(sc, MFI_ODC, status);
2223 return 1;
2224 }
2225
2226 static void
2227 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2228 {
2229 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2230 (ccb->ccb_extra_frames << 1));
2231 }
2232
2233 u_int32_t
2234 mfi_gen2_fw_state(struct mfi_softc *sc)
2235 {
2236 return (mfi_read(sc, MFI_OSP));
2237 }
2238
2239 void
2240 mfi_gen2_intr_dis(struct mfi_softc *sc)
2241 {
2242 mfi_write(sc, MFI_OMSK, 0xffffffff);
2243 mfi_write(sc, MFI_ODC, 0xffffffff);
2244 }
2245
2246 void
2247 mfi_gen2_intr_ena(struct mfi_softc *sc)
2248 {
2249 mfi_write(sc, MFI_ODC, 0xffffffff);
2250 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2251 }
2252
2253 int
2254 mfi_gen2_intr(struct mfi_softc *sc)
2255 {
2256 u_int32_t status;
2257
2258 status = mfi_read(sc, MFI_OSTS);
2259 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2260 return (0);
2261
2262 /* write status back to acknowledge interrupt */
2263 mfi_write(sc, MFI_ODC, status);
2264
2265 return (1);
2266 }
2267
2268 void
2269 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2270 {
2271 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2272 (ccb->ccb_extra_frames << 1));
2273 }
2274
2275 u_int32_t
2276 mfi_skinny_fw_state(struct mfi_softc *sc)
2277 {
2278 return (mfi_read(sc, MFI_OSP));
2279 }
2280
2281 void
2282 mfi_skinny_intr_dis(struct mfi_softc *sc)
2283 {
2284 mfi_write(sc, MFI_OMSK, 0);
2285 }
2286
2287 void
2288 mfi_skinny_intr_ena(struct mfi_softc *sc)
2289 {
2290 mfi_write(sc, MFI_OMSK, ~0x00000001);
2291 }
2292
2293 int
2294 mfi_skinny_intr(struct mfi_softc *sc)
2295 {
2296 u_int32_t status;
2297
2298 status = mfi_read(sc, MFI_OSTS);
2299 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2300 return (0);
2301
2302 /* write status back to acknowledge interrupt */
2303 mfi_write(sc, MFI_OSTS, status);
2304
2305 return (1);
2306 }
2307
2308 void
2309 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2310 {
2311 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2312 (ccb->ccb_extra_frames << 1));
2313 mfi_write(sc, MFI_IQPH, 0x00000000);
2314 }
2315