mfi.c revision 1.37 1 /* $NetBSD: mfi.c,v 1.37 2012/03/21 13:19:11 sborrill Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.37 2012/03/21 13:19:11 sborrill Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt_internal(struct mfi_softc *,
89 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 static int mfi_ioctl(device_t, u_long, void *);
96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int mfi_ioctl_alarm(struct mfi_softc *,
100 struct bioc_alarm *);
101 static int mfi_ioctl_blink(struct mfi_softc *sc,
102 struct bioc_blink *);
103 static int mfi_ioctl_setstate(struct mfi_softc *,
104 struct bioc_setstate *);
105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int mfi_create_sensors(struct mfi_softc *);
107 static int mfi_destroy_sensors(struct mfi_softc *);
108 static void mfi_sensor_refresh(struct sysmon_envsys *,
109 envsys_data_t *);
110 #endif /* NBIO > 0 */
111
112 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
113 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
114 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
115 static int mfi_xscale_intr(struct mfi_softc *sc);
116 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
117
118 static const struct mfi_iop_ops mfi_iop_xscale = {
119 mfi_xscale_fw_state,
120 mfi_xscale_intr_dis,
121 mfi_xscale_intr_ena,
122 mfi_xscale_intr,
123 mfi_xscale_post
124 };
125
126 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
127 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
128 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
129 static int mfi_ppc_intr(struct mfi_softc *sc);
130 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
131
132 static const struct mfi_iop_ops mfi_iop_ppc = {
133 mfi_ppc_fw_state,
134 mfi_ppc_intr_dis,
135 mfi_ppc_intr_ena,
136 mfi_ppc_intr,
137 mfi_ppc_post
138 };
139
140 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
141 void mfi_gen2_intr_ena(struct mfi_softc *sc);
142 void mfi_gen2_intr_dis(struct mfi_softc *sc);
143 int mfi_gen2_intr(struct mfi_softc *sc);
144 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
145
146 static const struct mfi_iop_ops mfi_iop_gen2 = {
147 mfi_gen2_fw_state,
148 mfi_gen2_intr_dis,
149 mfi_gen2_intr_ena,
150 mfi_gen2_intr,
151 mfi_gen2_post
152 };
153
154 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
155 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
156 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
157 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
158 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
159
160 static struct mfi_ccb *
161 mfi_get_ccb(struct mfi_softc *sc)
162 {
163 struct mfi_ccb *ccb;
164 int s;
165
166 s = splbio();
167 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
168 if (ccb) {
169 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
170 ccb->ccb_state = MFI_CCB_READY;
171 }
172 splx(s);
173
174 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
175
176 return ccb;
177 }
178
179 static void
180 mfi_put_ccb(struct mfi_ccb *ccb)
181 {
182 struct mfi_softc *sc = ccb->ccb_sc;
183 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
184 int s;
185
186 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
187
188 hdr->mfh_cmd_status = 0x0;
189 hdr->mfh_flags = 0x0;
190 ccb->ccb_state = MFI_CCB_FREE;
191 ccb->ccb_xs = NULL;
192 ccb->ccb_flags = 0;
193 ccb->ccb_done = NULL;
194 ccb->ccb_direction = 0;
195 ccb->ccb_frame_size = 0;
196 ccb->ccb_extra_frames = 0;
197 ccb->ccb_sgl = NULL;
198 ccb->ccb_data = NULL;
199 ccb->ccb_len = 0;
200
201 s = splbio();
202 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
203 splx(s);
204 }
205
206 static int
207 mfi_destroy_ccb(struct mfi_softc *sc)
208 {
209 struct mfi_ccb *ccb;
210 uint32_t i;
211
212 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
213
214
215 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
216 /* create a dma map for transfer */
217 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
218 }
219
220 if (i < sc->sc_max_cmds)
221 return EBUSY;
222
223 free(sc->sc_ccb, M_DEVBUF);
224
225 return 0;
226 }
227
228 static int
229 mfi_init_ccb(struct mfi_softc *sc)
230 {
231 struct mfi_ccb *ccb;
232 uint32_t i;
233 int error;
234
235 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
236
237 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
238 M_DEVBUF, M_WAITOK|M_ZERO);
239
240 for (i = 0; i < sc->sc_max_cmds; i++) {
241 ccb = &sc->sc_ccb[i];
242
243 ccb->ccb_sc = sc;
244
245 /* select i'th frame */
246 ccb->ccb_frame = (union mfi_frame *)
247 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
248 ccb->ccb_pframe =
249 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
250 ccb->ccb_frame->mfr_header.mfh_context = i;
251
252 /* select i'th sense */
253 ccb->ccb_sense = (struct mfi_sense *)
254 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
255 ccb->ccb_psense =
256 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
257
258 /* create a dma map for transfer */
259 error = bus_dmamap_create(sc->sc_dmat,
260 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
261 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
262 if (error) {
263 printf("%s: cannot create ccb dmamap (%d)\n",
264 DEVNAME(sc), error);
265 goto destroy;
266 }
267
268 DNPRINTF(MFI_D_CCB,
269 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
270 ccb->ccb_frame->mfr_header.mfh_context, ccb,
271 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
272 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
273 (u_long)ccb->ccb_dmamap);
274
275 /* add ccb to queue */
276 mfi_put_ccb(ccb);
277 }
278
279 return 0;
280 destroy:
281 /* free dma maps and ccb memory */
282 while (i) {
283 i--;
284 ccb = &sc->sc_ccb[i];
285 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
286 }
287
288 free(sc->sc_ccb, M_DEVBUF);
289
290 return 1;
291 }
292
293 static uint32_t
294 mfi_read(struct mfi_softc *sc, bus_size_t r)
295 {
296 uint32_t rv;
297
298 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
299 BUS_SPACE_BARRIER_READ);
300 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
301
302 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
303 return rv;
304 }
305
306 static void
307 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
308 {
309 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
310
311 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
312 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
313 BUS_SPACE_BARRIER_WRITE);
314 }
315
316 static struct mfi_mem *
317 mfi_allocmem(struct mfi_softc *sc, size_t size)
318 {
319 struct mfi_mem *mm;
320 int nsegs;
321
322 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
323 (long)size);
324
325 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
326 if (mm == NULL)
327 return NULL;
328
329 mm->am_size = size;
330
331 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
332 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
333 goto amfree;
334
335 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
336 &nsegs, BUS_DMA_NOWAIT) != 0)
337 goto destroy;
338
339 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
340 BUS_DMA_NOWAIT) != 0)
341 goto free;
342
343 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
344 BUS_DMA_NOWAIT) != 0)
345 goto unmap;
346
347 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
348 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
349
350 memset(mm->am_kva, 0, size);
351 return mm;
352
353 unmap:
354 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
355 free:
356 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
357 destroy:
358 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
359 amfree:
360 free(mm, M_DEVBUF);
361
362 return NULL;
363 }
364
365 static void
366 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
367 {
368 struct mfi_mem *mm = *mmp;
369
370 if (mm == NULL)
371 return;
372
373 *mmp = NULL;
374
375 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
376
377 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
378 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
379 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
380 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
381 free(mm, M_DEVBUF);
382 }
383
384 static int
385 mfi_transition_firmware(struct mfi_softc *sc)
386 {
387 uint32_t fw_state, cur_state;
388 int max_wait, i;
389
390 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
391
392 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
393 fw_state);
394
395 while (fw_state != MFI_STATE_READY) {
396 DNPRINTF(MFI_D_MISC,
397 "%s: waiting for firmware to become ready\n",
398 DEVNAME(sc));
399 cur_state = fw_state;
400 switch (fw_state) {
401 case MFI_STATE_FAULT:
402 printf("%s: firmware fault\n", DEVNAME(sc));
403 return 1;
404 case MFI_STATE_WAIT_HANDSHAKE:
405 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
406 max_wait = 2;
407 break;
408 case MFI_STATE_OPERATIONAL:
409 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
410 max_wait = 10;
411 break;
412 case MFI_STATE_UNDEFINED:
413 case MFI_STATE_BB_INIT:
414 max_wait = 2;
415 break;
416 case MFI_STATE_FW_INIT:
417 case MFI_STATE_DEVICE_SCAN:
418 case MFI_STATE_FLUSH_CACHE:
419 max_wait = 20;
420 break;
421 default:
422 printf("%s: unknown firmware state %d\n",
423 DEVNAME(sc), fw_state);
424 return 1;
425 }
426 for (i = 0; i < (max_wait * 10); i++) {
427 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
428 if (fw_state == cur_state)
429 DELAY(100000);
430 else
431 break;
432 }
433 if (fw_state == cur_state) {
434 printf("%s: firmware stuck in state %#x\n",
435 DEVNAME(sc), fw_state);
436 return 1;
437 }
438 }
439
440 return 0;
441 }
442
443 static int
444 mfi_initialize_firmware(struct mfi_softc *sc)
445 {
446 struct mfi_ccb *ccb;
447 struct mfi_init_frame *init;
448 struct mfi_init_qinfo *qinfo;
449
450 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
451
452 if ((ccb = mfi_get_ccb(sc)) == NULL)
453 return 1;
454
455 init = &ccb->ccb_frame->mfr_init;
456 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
457
458 memset(qinfo, 0, sizeof *qinfo);
459 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
460 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
461 offsetof(struct mfi_prod_cons, mpc_reply_q));
462 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
463 offsetof(struct mfi_prod_cons, mpc_producer));
464 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
465 offsetof(struct mfi_prod_cons, mpc_consumer));
466
467 init->mif_header.mfh_cmd = MFI_CMD_INIT;
468 init->mif_header.mfh_data_len = sizeof *qinfo;
469 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
470
471 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
472 DEVNAME(sc),
473 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
474 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
475
476 if (mfi_poll(ccb)) {
477 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
478 return 1;
479 }
480
481 mfi_put_ccb(ccb);
482
483 return 0;
484 }
485
486 static int
487 mfi_get_info(struct mfi_softc *sc)
488 {
489 #ifdef MFI_DEBUG
490 int i;
491 #endif
492 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
493
494 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
495 sizeof(sc->sc_info), &sc->sc_info, NULL))
496 return 1;
497
498 #ifdef MFI_DEBUG
499
500 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
501 printf("%s: active FW %s Version %s date %s time %s\n",
502 DEVNAME(sc),
503 sc->sc_info.mci_image_component[i].mic_name,
504 sc->sc_info.mci_image_component[i].mic_version,
505 sc->sc_info.mci_image_component[i].mic_build_date,
506 sc->sc_info.mci_image_component[i].mic_build_time);
507 }
508
509 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
510 printf("%s: pending FW %s Version %s date %s time %s\n",
511 DEVNAME(sc),
512 sc->sc_info.mci_pending_image_component[i].mic_name,
513 sc->sc_info.mci_pending_image_component[i].mic_version,
514 sc->sc_info.mci_pending_image_component[i].mic_build_date,
515 sc->sc_info.mci_pending_image_component[i].mic_build_time);
516 }
517
518 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
519 DEVNAME(sc),
520 sc->sc_info.mci_max_arms,
521 sc->sc_info.mci_max_spans,
522 sc->sc_info.mci_max_arrays,
523 sc->sc_info.mci_max_lds,
524 sc->sc_info.mci_product_name);
525
526 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
527 DEVNAME(sc),
528 sc->sc_info.mci_serial_number,
529 sc->sc_info.mci_hw_present,
530 sc->sc_info.mci_current_fw_time,
531 sc->sc_info.mci_max_cmds,
532 sc->sc_info.mci_max_sg_elements);
533
534 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
535 DEVNAME(sc),
536 sc->sc_info.mci_max_request_size,
537 sc->sc_info.mci_lds_present,
538 sc->sc_info.mci_lds_degraded,
539 sc->sc_info.mci_lds_offline,
540 sc->sc_info.mci_pd_present);
541
542 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
543 DEVNAME(sc),
544 sc->sc_info.mci_pd_disks_present,
545 sc->sc_info.mci_pd_disks_pred_failure,
546 sc->sc_info.mci_pd_disks_failed);
547
548 printf("%s: nvram %d mem %d flash %d\n",
549 DEVNAME(sc),
550 sc->sc_info.mci_nvram_size,
551 sc->sc_info.mci_memory_size,
552 sc->sc_info.mci_flash_size);
553
554 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
555 DEVNAME(sc),
556 sc->sc_info.mci_ram_correctable_errors,
557 sc->sc_info.mci_ram_uncorrectable_errors,
558 sc->sc_info.mci_cluster_allowed,
559 sc->sc_info.mci_cluster_active);
560
561 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
562 DEVNAME(sc),
563 sc->sc_info.mci_max_strips_per_io,
564 sc->sc_info.mci_raid_levels,
565 sc->sc_info.mci_adapter_ops,
566 sc->sc_info.mci_ld_ops);
567
568 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
569 DEVNAME(sc),
570 sc->sc_info.mci_stripe_sz_ops.min,
571 sc->sc_info.mci_stripe_sz_ops.max,
572 sc->sc_info.mci_pd_ops,
573 sc->sc_info.mci_pd_mix_support);
574
575 printf("%s: ecc_bucket %d pckg_prop %s\n",
576 DEVNAME(sc),
577 sc->sc_info.mci_ecc_bucket_count,
578 sc->sc_info.mci_package_version);
579
580 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
581 DEVNAME(sc),
582 sc->sc_info.mci_properties.mcp_seq_num,
583 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
584 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
585 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
586
587 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
588 DEVNAME(sc),
589 sc->sc_info.mci_properties.mcp_rebuild_rate,
590 sc->sc_info.mci_properties.mcp_patrol_read_rate,
591 sc->sc_info.mci_properties.mcp_bgi_rate,
592 sc->sc_info.mci_properties.mcp_cc_rate);
593
594 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
595 DEVNAME(sc),
596 sc->sc_info.mci_properties.mcp_recon_rate,
597 sc->sc_info.mci_properties.mcp_cache_flush_interval,
598 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
599 sc->sc_info.mci_properties.mcp_spinup_delay,
600 sc->sc_info.mci_properties.mcp_cluster_enable);
601
602 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
603 DEVNAME(sc),
604 sc->sc_info.mci_properties.mcp_coercion_mode,
605 sc->sc_info.mci_properties.mcp_alarm_enable,
606 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
607 sc->sc_info.mci_properties.mcp_disable_battery_warn,
608 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
609
610 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
611 DEVNAME(sc),
612 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
613 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
614 sc->sc_info.mci_properties.mcp_expose_encl_devices);
615
616 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
617 DEVNAME(sc),
618 sc->sc_info.mci_pci.mip_vendor,
619 sc->sc_info.mci_pci.mip_device,
620 sc->sc_info.mci_pci.mip_subvendor,
621 sc->sc_info.mci_pci.mip_subdevice);
622
623 printf("%s: type %#x port_count %d port_addr ",
624 DEVNAME(sc),
625 sc->sc_info.mci_host.mih_type,
626 sc->sc_info.mci_host.mih_port_count);
627
628 for (i = 0; i < 8; i++)
629 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
630 printf("\n");
631
632 printf("%s: type %.x port_count %d port_addr ",
633 DEVNAME(sc),
634 sc->sc_info.mci_device.mid_type,
635 sc->sc_info.mci_device.mid_port_count);
636
637 for (i = 0; i < 8; i++)
638 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
639 printf("\n");
640 #endif /* MFI_DEBUG */
641
642 return 0;
643 }
644
645 static void
646 mfiminphys(struct buf *bp)
647 {
648 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
649
650 /* XXX currently using MFI_MAXFER = MAXPHYS */
651 if (bp->b_bcount > MFI_MAXFER)
652 bp->b_bcount = MFI_MAXFER;
653 minphys(bp);
654 }
655
656 int
657 mfi_rescan(device_t self, const char *ifattr, const int *locators)
658 {
659 struct mfi_softc *sc = device_private(self);
660
661 if (sc->sc_child != NULL)
662 return 0;
663
664 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
665 scsiprint, NULL);
666
667 return 0;
668 }
669
670 void
671 mfi_childdetached(device_t self, device_t child)
672 {
673 struct mfi_softc *sc = device_private(self);
674
675 KASSERT(self == sc->sc_dev);
676 KASSERT(child == sc->sc_child);
677
678 if (child == sc->sc_child)
679 sc->sc_child = NULL;
680 }
681
682 int
683 mfi_detach(struct mfi_softc *sc, int flags)
684 {
685 int error;
686
687 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
688
689 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
690 return error;
691
692 #if NBIO > 0
693 mfi_destroy_sensors(sc);
694 bio_unregister(sc->sc_dev);
695 #endif /* NBIO > 0 */
696
697 mfi_intr_disable(sc);
698
699 /* TBD: shutdown firmware */
700
701 if ((error = mfi_destroy_ccb(sc)) != 0)
702 return error;
703
704 mfi_freemem(sc, &sc->sc_sense);
705
706 mfi_freemem(sc, &sc->sc_frames);
707
708 mfi_freemem(sc, &sc->sc_pcq);
709
710 return 0;
711 }
712
713 int
714 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
715 {
716 struct scsipi_adapter *adapt = &sc->sc_adapt;
717 struct scsipi_channel *chan = &sc->sc_chan;
718 uint32_t status, frames;
719 int i;
720
721 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
722
723 switch (iop) {
724 case MFI_IOP_XSCALE:
725 sc->sc_iop = &mfi_iop_xscale;
726 break;
727 case MFI_IOP_PPC:
728 sc->sc_iop = &mfi_iop_ppc;
729 break;
730 case MFI_IOP_GEN2:
731 sc->sc_iop = &mfi_iop_gen2;
732 break;
733 default:
734 panic("%s: unknown iop %d", DEVNAME(sc), iop);
735 }
736
737 if (mfi_transition_firmware(sc))
738 return 1;
739
740 TAILQ_INIT(&sc->sc_ccb_freeq);
741
742 status = mfi_fw_state(sc);
743 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
744 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
745 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
746 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
747
748 /* consumer/producer and reply queue memory */
749 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
750 sizeof(struct mfi_prod_cons));
751 if (sc->sc_pcq == NULL) {
752 aprint_error("%s: unable to allocate reply queue memory\n",
753 DEVNAME(sc));
754 goto nopcq;
755 }
756 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
757 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
758 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
759
760 /* frame memory */
761 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
762 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
763 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
764 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
765 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
766 if (sc->sc_frames == NULL) {
767 aprint_error("%s: unable to allocate frame memory\n",
768 DEVNAME(sc));
769 goto noframe;
770 }
771 /* XXX hack, fix this */
772 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
773 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
774 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
775 goto noframe;
776 }
777
778 /* sense memory */
779 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
780 if (sc->sc_sense == NULL) {
781 aprint_error("%s: unable to allocate sense memory\n",
782 DEVNAME(sc));
783 goto nosense;
784 }
785
786 /* now that we have all memory bits go initialize ccbs */
787 if (mfi_init_ccb(sc)) {
788 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
789 goto noinit;
790 }
791
792 /* kickstart firmware with all addresses and pointers */
793 if (mfi_initialize_firmware(sc)) {
794 aprint_error("%s: could not initialize firmware\n",
795 DEVNAME(sc));
796 goto noinit;
797 }
798
799 if (mfi_get_info(sc)) {
800 aprint_error("%s: could not retrieve controller information\n",
801 DEVNAME(sc));
802 goto noinit;
803 }
804
805 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
806 DEVNAME(sc),
807 sc->sc_info.mci_lds_present,
808 sc->sc_info.mci_package_version,
809 sc->sc_info.mci_memory_size);
810
811 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
812 sc->sc_max_ld = sc->sc_ld_cnt;
813 for (i = 0; i < sc->sc_ld_cnt; i++)
814 sc->sc_ld[i].ld_present = 1;
815
816 memset(adapt, 0, sizeof(*adapt));
817 adapt->adapt_dev = sc->sc_dev;
818 adapt->adapt_nchannels = 1;
819 if (sc->sc_ld_cnt)
820 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
821 else
822 adapt->adapt_openings = sc->sc_max_cmds;
823 adapt->adapt_max_periph = adapt->adapt_openings;
824 adapt->adapt_request = mfi_scsipi_request;
825 adapt->adapt_minphys = mfiminphys;
826
827 memset(chan, 0, sizeof(*chan));
828 chan->chan_adapter = adapt;
829 chan->chan_bustype = &scsi_bustype;
830 chan->chan_channel = 0;
831 chan->chan_flags = 0;
832 chan->chan_nluns = 8;
833 chan->chan_ntargets = MFI_MAX_LD;
834 chan->chan_id = MFI_MAX_LD;
835
836 mfi_rescan(sc->sc_dev, "scsi", NULL);
837
838 /* enable interrupts */
839 mfi_intr_enable(sc);
840
841 #if NBIO > 0
842 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
843 panic("%s: controller registration failed", DEVNAME(sc));
844 if (mfi_create_sensors(sc) != 0)
845 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
846 #endif /* NBIO > 0 */
847
848 return 0;
849 noinit:
850 mfi_freemem(sc, &sc->sc_sense);
851 nosense:
852 mfi_freemem(sc, &sc->sc_frames);
853 noframe:
854 mfi_freemem(sc, &sc->sc_pcq);
855 nopcq:
856 return 1;
857 }
858
859 static int
860 mfi_poll(struct mfi_ccb *ccb)
861 {
862 struct mfi_softc *sc = ccb->ccb_sc;
863 struct mfi_frame_header *hdr;
864 int to = 0;
865
866 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
867
868 hdr = &ccb->ccb_frame->mfr_header;
869 hdr->mfh_cmd_status = 0xff;
870 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
871
872 mfi_post(sc, ccb);
873 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
874 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
875 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
876
877 while (hdr->mfh_cmd_status == 0xff) {
878 delay(1000);
879 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
880 break;
881 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
882 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
883 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
884 }
885 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
886 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
887 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
888
889 if (ccb->ccb_data != NULL) {
890 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
891 DEVNAME(sc));
892 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
893 ccb->ccb_dmamap->dm_mapsize,
894 (ccb->ccb_direction & MFI_DATA_IN) ?
895 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
896
897 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
898 }
899
900 if (hdr->mfh_cmd_status == 0xff) {
901 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
902 hdr->mfh_context);
903 ccb->ccb_flags |= MFI_CCB_F_ERR;
904 return 1;
905 }
906
907 return 0;
908 }
909
910 int
911 mfi_intr(void *arg)
912 {
913 struct mfi_softc *sc = arg;
914 struct mfi_prod_cons *pcq;
915 struct mfi_ccb *ccb;
916 uint32_t producer, consumer, ctx;
917 int claimed = 0;
918
919 if (!mfi_my_intr(sc))
920 return 0;
921
922 pcq = MFIMEM_KVA(sc->sc_pcq);
923
924 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
925 (u_long)sc, (u_long)pcq);
926
927 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
928 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
929 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
930
931 producer = pcq->mpc_producer;
932 consumer = pcq->mpc_consumer;
933
934 while (consumer != producer) {
935 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
936 DEVNAME(sc), producer, consumer);
937
938 ctx = pcq->mpc_reply_q[consumer];
939 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
940 if (ctx == MFI_INVALID_CTX)
941 printf("%s: invalid context, p: %d c: %d\n",
942 DEVNAME(sc), producer, consumer);
943 else {
944 /* XXX remove from queue and call scsi_done */
945 ccb = &sc->sc_ccb[ctx];
946 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
947 DEVNAME(sc), ctx);
948 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
949 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
950 sc->sc_frames_size,
951 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
952 ccb->ccb_done(ccb);
953
954 claimed = 1;
955 }
956 consumer++;
957 if (consumer == (sc->sc_max_cmds + 1))
958 consumer = 0;
959 }
960
961 pcq->mpc_consumer = consumer;
962 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
963 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
964 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
965
966 return claimed;
967 }
968
969 static int
970 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
971 uint32_t blockcnt)
972 {
973 struct scsipi_periph *periph = xs->xs_periph;
974 struct mfi_io_frame *io;
975
976 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
977 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
978 periph->periph_target);
979
980 if (!xs->data)
981 return 1;
982
983 io = &ccb->ccb_frame->mfr_io;
984 if (xs->xs_control & XS_CTL_DATA_IN) {
985 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
986 ccb->ccb_direction = MFI_DATA_IN;
987 } else {
988 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
989 ccb->ccb_direction = MFI_DATA_OUT;
990 }
991 io->mif_header.mfh_target_id = periph->periph_target;
992 io->mif_header.mfh_timeout = 0;
993 io->mif_header.mfh_flags = 0;
994 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
995 io->mif_header.mfh_data_len= blockcnt;
996 io->mif_lba_hi = 0;
997 io->mif_lba_lo = blockno;
998 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
999 io->mif_sense_addr_hi = 0;
1000
1001 ccb->ccb_done = mfi_scsi_xs_done;
1002 ccb->ccb_xs = xs;
1003 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1004 ccb->ccb_sgl = &io->mif_sgl;
1005 ccb->ccb_data = xs->data;
1006 ccb->ccb_len = xs->datalen;
1007
1008 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1009 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1010 return 1;
1011
1012 return 0;
1013 }
1014
1015 static void
1016 mfi_scsi_xs_done(struct mfi_ccb *ccb)
1017 {
1018 struct scsipi_xfer *xs = ccb->ccb_xs;
1019 struct mfi_softc *sc = ccb->ccb_sc;
1020 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1021
1022 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1023 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1024
1025 if (xs->data != NULL) {
1026 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1027 DEVNAME(sc));
1028 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1029 ccb->ccb_dmamap->dm_mapsize,
1030 (xs->xs_control & XS_CTL_DATA_IN) ?
1031 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1032
1033 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1034 }
1035
1036 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1037 xs->error = XS_DRIVER_STUFFUP;
1038 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1039 DEVNAME(sc), hdr->mfh_cmd_status);
1040
1041 if (hdr->mfh_scsi_status != 0) {
1042 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1043 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1044 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1045 DNPRINTF(MFI_D_INTR,
1046 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1047 DEVNAME(sc), hdr->mfh_scsi_status,
1048 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1049 memset(&xs->sense, 0, sizeof(xs->sense));
1050 memcpy(&xs->sense, ccb->ccb_sense,
1051 sizeof(struct scsi_sense_data));
1052 xs->error = XS_SENSE;
1053 }
1054 } else {
1055 xs->error = XS_NOERROR;
1056 xs->status = SCSI_OK;
1057 xs->resid = 0;
1058 }
1059
1060 mfi_put_ccb(ccb);
1061 scsipi_done(xs);
1062 }
1063
1064 static int
1065 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1066 {
1067 struct mfi_pass_frame *pf;
1068 struct scsipi_periph *periph = xs->xs_periph;
1069
1070 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1071 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1072 periph->periph_target);
1073
1074 pf = &ccb->ccb_frame->mfr_pass;
1075 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1076 pf->mpf_header.mfh_target_id = periph->periph_target;
1077 pf->mpf_header.mfh_lun_id = 0;
1078 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1079 pf->mpf_header.mfh_timeout = 0;
1080 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1081 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1082
1083 pf->mpf_sense_addr_hi = 0;
1084 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1085
1086 memset(pf->mpf_cdb, 0, 16);
1087 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1088
1089 ccb->ccb_done = mfi_scsi_xs_done;
1090 ccb->ccb_xs = xs;
1091 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1092 ccb->ccb_sgl = &pf->mpf_sgl;
1093
1094 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1095 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1096 MFI_DATA_IN : MFI_DATA_OUT;
1097 else
1098 ccb->ccb_direction = MFI_DATA_NONE;
1099
1100 if (xs->data) {
1101 ccb->ccb_data = xs->data;
1102 ccb->ccb_len = xs->datalen;
1103
1104 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1105 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1106 return 1;
1107 }
1108
1109 return 0;
1110 }
1111
1112 static void
1113 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1114 void *arg)
1115 {
1116 struct scsipi_periph *periph;
1117 struct scsipi_xfer *xs;
1118 struct scsipi_adapter *adapt = chan->chan_adapter;
1119 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1120 struct mfi_ccb *ccb;
1121 struct scsi_rw_6 *rw;
1122 struct scsipi_rw_10 *rwb;
1123 uint32_t blockno, blockcnt;
1124 uint8_t target;
1125 uint8_t mbox[MFI_MBOX_SIZE];
1126 int s;
1127
1128 switch (req) {
1129 case ADAPTER_REQ_GROW_RESOURCES:
1130 /* Not supported. */
1131 return;
1132 case ADAPTER_REQ_SET_XFER_MODE:
1133 /* Not supported. */
1134 return;
1135 case ADAPTER_REQ_RUN_XFER:
1136 break;
1137 }
1138
1139 xs = arg;
1140
1141 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1142 DEVNAME(sc), req, xs->cmd->opcode);
1143
1144 periph = xs->xs_periph;
1145 target = periph->periph_target;
1146
1147 s = splbio();
1148 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1149 periph->periph_lun != 0) {
1150 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1151 DEVNAME(sc), target);
1152 xs->error = XS_SELTIMEOUT;
1153 scsipi_done(xs);
1154 splx(s);
1155 return;
1156 }
1157
1158 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1159 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1160 xs->error = XS_RESOURCE_SHORTAGE;
1161 scsipi_done(xs);
1162 splx(s);
1163 return;
1164 }
1165
1166 switch (xs->cmd->opcode) {
1167 /* IO path */
1168 case READ_10:
1169 case WRITE_10:
1170 rwb = (struct scsipi_rw_10 *)xs->cmd;
1171 blockno = _4btol(rwb->addr);
1172 blockcnt = _2btol(rwb->length);
1173 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1174 mfi_put_ccb(ccb);
1175 goto stuffup;
1176 }
1177 break;
1178
1179 case SCSI_READ_6_COMMAND:
1180 case SCSI_WRITE_6_COMMAND:
1181 rw = (struct scsi_rw_6 *)xs->cmd;
1182 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1183 blockcnt = rw->length ? rw->length : 0x100;
1184 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1185 mfi_put_ccb(ccb);
1186 goto stuffup;
1187 }
1188 break;
1189
1190 case SCSI_SYNCHRONIZE_CACHE_10:
1191 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1192 if (mfi_mgmt(ccb, xs,
1193 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1194 mfi_put_ccb(ccb);
1195 goto stuffup;
1196 }
1197 break;
1198
1199 /* hand it of to the firmware and let it deal with it */
1200 case SCSI_TEST_UNIT_READY:
1201 /* save off sd? after autoconf */
1202 if (!cold) /* XXX bogus */
1203 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1204 sizeof(sc->sc_ld[target].ld_dev));
1205 /* FALLTHROUGH */
1206
1207 default:
1208 if (mfi_scsi_ld(ccb, xs)) {
1209 mfi_put_ccb(ccb);
1210 goto stuffup;
1211 }
1212 break;
1213 }
1214
1215 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1216
1217 if (xs->xs_control & XS_CTL_POLL) {
1218 if (mfi_poll(ccb)) {
1219 /* XXX check for sense in ccb->ccb_sense? */
1220 printf("%s: mfi_scsipi_request poll failed\n",
1221 DEVNAME(sc));
1222 memset(&xs->sense, 0, sizeof(xs->sense));
1223 xs->sense.scsi_sense.response_code =
1224 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1225 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1226 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1227 xs->error = XS_SENSE;
1228 xs->status = SCSI_CHECK;
1229 } else {
1230 DNPRINTF(MFI_D_DMA,
1231 "%s: mfi_scsipi_request poll complete %d\n",
1232 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1233 xs->error = XS_NOERROR;
1234 xs->status = SCSI_OK;
1235 xs->resid = 0;
1236 }
1237 mfi_put_ccb(ccb);
1238 scsipi_done(xs);
1239 splx(s);
1240 return;
1241 }
1242
1243 mfi_post(sc, ccb);
1244
1245 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1246 ccb->ccb_dmamap->dm_nsegs);
1247
1248 splx(s);
1249 return;
1250
1251 stuffup:
1252 xs->error = XS_DRIVER_STUFFUP;
1253 scsipi_done(xs);
1254 splx(s);
1255 }
1256
1257 static int
1258 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1259 {
1260 struct mfi_softc *sc = ccb->ccb_sc;
1261 struct mfi_frame_header *hdr;
1262 bus_dma_segment_t *sgd;
1263 union mfi_sgl *sgl;
1264 int error, i;
1265
1266 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1267 (u_long)ccb->ccb_data);
1268
1269 if (!ccb->ccb_data)
1270 return 1;
1271
1272 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1273 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1274 if (error) {
1275 if (error == EFBIG)
1276 printf("more than %d dma segs\n",
1277 sc->sc_max_sgl);
1278 else
1279 printf("error %d loading dma map\n", error);
1280 return 1;
1281 }
1282
1283 hdr = &ccb->ccb_frame->mfr_header;
1284 sgl = ccb->ccb_sgl;
1285 sgd = ccb->ccb_dmamap->dm_segs;
1286 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1287 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1288 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1289 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1290 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1291 }
1292
1293 if (ccb->ccb_direction == MFI_DATA_IN) {
1294 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1295 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1296 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1297 } else {
1298 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1299 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1300 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1301 }
1302
1303 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1304 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1305 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1306 ccb->ccb_dmamap->dm_nsegs;
1307 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1308
1309 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1310 " dm_nsegs: %d extra_frames: %d\n",
1311 DEVNAME(sc),
1312 hdr->mfh_sg_count,
1313 ccb->ccb_frame_size,
1314 sc->sc_frames_size,
1315 ccb->ccb_dmamap->dm_nsegs,
1316 ccb->ccb_extra_frames);
1317
1318 return 0;
1319 }
1320
1321 static int
1322 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1323 uint32_t len, void *buf, uint8_t *mbox)
1324 {
1325 struct mfi_ccb *ccb;
1326 int rv = 1;
1327
1328 if ((ccb = mfi_get_ccb(sc)) == NULL)
1329 return rv;
1330 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1331 if (rv)
1332 return rv;
1333
1334 if (cold) {
1335 if (mfi_poll(ccb))
1336 goto done;
1337 } else {
1338 mfi_post(sc, ccb);
1339
1340 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1341 DEVNAME(sc));
1342 while (ccb->ccb_state != MFI_CCB_DONE)
1343 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1344
1345 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1346 goto done;
1347 }
1348 rv = 0;
1349
1350 done:
1351 mfi_put_ccb(ccb);
1352 return rv;
1353 }
1354
1355 static int
1356 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1357 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1358 {
1359 struct mfi_dcmd_frame *dcmd;
1360
1361 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1362
1363 dcmd = &ccb->ccb_frame->mfr_dcmd;
1364 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1365 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1366 dcmd->mdf_header.mfh_timeout = 0;
1367
1368 dcmd->mdf_opcode = opc;
1369 dcmd->mdf_header.mfh_data_len = 0;
1370 ccb->ccb_direction = dir;
1371 ccb->ccb_xs = xs;
1372 ccb->ccb_done = mfi_mgmt_done;
1373
1374 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1375
1376 /* handle special opcodes */
1377 if (mbox)
1378 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1379
1380 if (dir != MFI_DATA_NONE) {
1381 dcmd->mdf_header.mfh_data_len = len;
1382 ccb->ccb_data = buf;
1383 ccb->ccb_len = len;
1384 ccb->ccb_sgl = &dcmd->mdf_sgl;
1385
1386 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1387 return 1;
1388 }
1389 return 0;
1390 }
1391
1392 static void
1393 mfi_mgmt_done(struct mfi_ccb *ccb)
1394 {
1395 struct scsipi_xfer *xs = ccb->ccb_xs;
1396 struct mfi_softc *sc = ccb->ccb_sc;
1397 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1398
1399 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1400 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1401
1402 if (ccb->ccb_data != NULL) {
1403 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1404 DEVNAME(sc));
1405 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1406 ccb->ccb_dmamap->dm_mapsize,
1407 (ccb->ccb_direction & MFI_DATA_IN) ?
1408 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1409
1410 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1411 }
1412
1413 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1414 ccb->ccb_flags |= MFI_CCB_F_ERR;
1415
1416 ccb->ccb_state = MFI_CCB_DONE;
1417 if (xs) {
1418 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1419 xs->error = XS_DRIVER_STUFFUP;
1420 } else {
1421 xs->error = XS_NOERROR;
1422 xs->status = SCSI_OK;
1423 xs->resid = 0;
1424 }
1425 mfi_put_ccb(ccb);
1426 scsipi_done(xs);
1427 } else
1428 wakeup(ccb);
1429 }
1430
1431 #if NBIO > 0
1432 int
1433 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1434 {
1435 struct mfi_softc *sc = device_private(dev);
1436 int error = 0;
1437 int s;
1438
1439 KERNEL_LOCK(1, curlwp);
1440 s = splbio();
1441
1442 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1443
1444 switch (cmd) {
1445 case BIOCINQ:
1446 DNPRINTF(MFI_D_IOCTL, "inq\n");
1447 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1448 break;
1449
1450 case BIOCVOL:
1451 DNPRINTF(MFI_D_IOCTL, "vol\n");
1452 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1453 break;
1454
1455 case BIOCDISK:
1456 DNPRINTF(MFI_D_IOCTL, "disk\n");
1457 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1458 break;
1459
1460 case BIOCALARM:
1461 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1462 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1463 break;
1464
1465 case BIOCBLINK:
1466 DNPRINTF(MFI_D_IOCTL, "blink\n");
1467 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1468 break;
1469
1470 case BIOCSETSTATE:
1471 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1472 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1473 break;
1474
1475 default:
1476 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1477 error = EINVAL;
1478 }
1479 splx(s);
1480 KERNEL_UNLOCK_ONE(curlwp);
1481
1482 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1483 return error;
1484 }
1485
1486 static int
1487 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1488 {
1489 struct mfi_conf *cfg;
1490 int rv = EINVAL;
1491
1492 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1493
1494 if (mfi_get_info(sc)) {
1495 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1496 DEVNAME(sc));
1497 return EIO;
1498 }
1499
1500 /* get figures */
1501 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1502 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1503 sizeof *cfg, cfg, NULL))
1504 goto freeme;
1505
1506 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1507 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1508 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1509
1510 rv = 0;
1511 freeme:
1512 free(cfg, M_DEVBUF);
1513 return rv;
1514 }
1515
1516 static int
1517 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1518 {
1519 int i, per, rv = EINVAL;
1520 uint8_t mbox[MFI_MBOX_SIZE];
1521
1522 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1523 DEVNAME(sc), bv->bv_volid);
1524
1525 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1526 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1527 goto done;
1528
1529 i = bv->bv_volid;
1530 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1531 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1532 DEVNAME(sc), mbox[0]);
1533
1534 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1535 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1536 goto done;
1537
1538 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1539 /* go do hotspares */
1540 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1541 goto done;
1542 }
1543
1544 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1545
1546 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1547 case MFI_LD_OFFLINE:
1548 bv->bv_status = BIOC_SVOFFLINE;
1549 break;
1550
1551 case MFI_LD_PART_DEGRADED:
1552 case MFI_LD_DEGRADED:
1553 bv->bv_status = BIOC_SVDEGRADED;
1554 break;
1555
1556 case MFI_LD_ONLINE:
1557 bv->bv_status = BIOC_SVONLINE;
1558 break;
1559
1560 default:
1561 bv->bv_status = BIOC_SVINVALID;
1562 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1563 DEVNAME(sc),
1564 sc->sc_ld_list.mll_list[i].mll_state);
1565 }
1566
1567 /* additional status can modify MFI status */
1568 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1569 case MFI_LD_PROG_CC:
1570 case MFI_LD_PROG_BGI:
1571 bv->bv_status = BIOC_SVSCRUB;
1572 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1573 bv->bv_percent = (per * 100) / 0xffff;
1574 bv->bv_seconds =
1575 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1576 break;
1577
1578 case MFI_LD_PROG_FGI:
1579 case MFI_LD_PROG_RECONSTRUCT:
1580 /* nothing yet */
1581 break;
1582 }
1583
1584 /*
1585 * The RAID levels are determined per the SNIA DDF spec, this is only
1586 * a subset that is valid for the MFI contrller.
1587 */
1588 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1589 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1590 MFI_DDF_SRL_SPANNED)
1591 bv->bv_level *= 10;
1592
1593 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1594 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1595
1596 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1597
1598 rv = 0;
1599 done:
1600 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1601 DEVNAME(sc), rv);
1602 return rv;
1603 }
1604
1605 static int
1606 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1607 {
1608 struct mfi_conf *cfg;
1609 struct mfi_array *ar;
1610 struct mfi_ld_cfg *ld;
1611 struct mfi_pd_details *pd;
1612 struct scsipi_inquiry_data *inqbuf;
1613 char vend[8+16+4+1];
1614 int i, rv = EINVAL;
1615 int arr, vol, disk;
1616 uint32_t size;
1617 uint8_t mbox[MFI_MBOX_SIZE];
1618
1619 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1620 DEVNAME(sc), bd->bd_diskid);
1621
1622 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1623
1624 /* send single element command to retrieve size for full structure */
1625 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1626 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1627 sizeof *cfg, cfg, NULL))
1628 goto freeme;
1629
1630 size = cfg->mfc_size;
1631 free(cfg, M_DEVBUF);
1632
1633 /* memory for read config */
1634 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1635 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1636 size, cfg, NULL))
1637 goto freeme;
1638
1639 ar = cfg->mfc_array;
1640
1641 /* calculate offset to ld structure */
1642 ld = (struct mfi_ld_cfg *)(
1643 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1644 cfg->mfc_array_size * cfg->mfc_no_array);
1645
1646 vol = bd->bd_volid;
1647
1648 if (vol >= cfg->mfc_no_ld) {
1649 /* do hotspares */
1650 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1651 goto freeme;
1652 }
1653
1654 /* find corresponding array for ld */
1655 for (i = 0, arr = 0; i < vol; i++)
1656 arr += ld[i].mlc_parm.mpa_span_depth;
1657
1658 /* offset disk into pd list */
1659 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1660
1661 /* offset array index into the next spans */
1662 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1663
1664 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1665 switch (ar[arr].pd[disk].mar_pd_state){
1666 case MFI_PD_UNCONFIG_GOOD:
1667 bd->bd_status = BIOC_SDUNUSED;
1668 break;
1669
1670 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1671 bd->bd_status = BIOC_SDHOTSPARE;
1672 break;
1673
1674 case MFI_PD_OFFLINE:
1675 bd->bd_status = BIOC_SDOFFLINE;
1676 break;
1677
1678 case MFI_PD_FAILED:
1679 bd->bd_status = BIOC_SDFAILED;
1680 break;
1681
1682 case MFI_PD_REBUILD:
1683 bd->bd_status = BIOC_SDREBUILD;
1684 break;
1685
1686 case MFI_PD_ONLINE:
1687 bd->bd_status = BIOC_SDONLINE;
1688 break;
1689
1690 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1691 default:
1692 bd->bd_status = BIOC_SDINVALID;
1693 break;
1694
1695 }
1696
1697 /* get the remaining fields */
1698 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1699 memset(pd, 0, sizeof(*pd));
1700 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1701 sizeof *pd, pd, mbox))
1702 goto freeme;
1703
1704 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1705
1706 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1707 bd->bd_channel = pd->mpd_enc_idx;
1708
1709 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1710 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1711 vend[sizeof vend - 1] = '\0';
1712 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1713
1714 /* XXX find a way to retrieve serial nr from drive */
1715 /* XXX find a way to get bd_procdev */
1716
1717 rv = 0;
1718 freeme:
1719 free(pd, M_DEVBUF);
1720 free(cfg, M_DEVBUF);
1721
1722 return rv;
1723 }
1724
1725 static int
1726 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1727 {
1728 uint32_t opc, dir = MFI_DATA_NONE;
1729 int rv = 0;
1730 int8_t ret;
1731
1732 switch(ba->ba_opcode) {
1733 case BIOC_SADISABLE:
1734 opc = MR_DCMD_SPEAKER_DISABLE;
1735 break;
1736
1737 case BIOC_SAENABLE:
1738 opc = MR_DCMD_SPEAKER_ENABLE;
1739 break;
1740
1741 case BIOC_SASILENCE:
1742 opc = MR_DCMD_SPEAKER_SILENCE;
1743 break;
1744
1745 case BIOC_GASTATUS:
1746 opc = MR_DCMD_SPEAKER_GET;
1747 dir = MFI_DATA_IN;
1748 break;
1749
1750 case BIOC_SATEST:
1751 opc = MR_DCMD_SPEAKER_TEST;
1752 break;
1753
1754 default:
1755 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1756 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1757 return EINVAL;
1758 }
1759
1760 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1761 rv = EINVAL;
1762 else
1763 if (ba->ba_opcode == BIOC_GASTATUS)
1764 ba->ba_status = ret;
1765 else
1766 ba->ba_status = 0;
1767
1768 return rv;
1769 }
1770
1771 static int
1772 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1773 {
1774 int i, found, rv = EINVAL;
1775 uint8_t mbox[MFI_MBOX_SIZE];
1776 uint32_t cmd;
1777 struct mfi_pd_list *pd;
1778
1779 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1780 bb->bb_status);
1781
1782 /* channel 0 means not in an enclosure so can't be blinked */
1783 if (bb->bb_channel == 0)
1784 return EINVAL;
1785
1786 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1787
1788 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1789 MFI_PD_LIST_SIZE, pd, NULL))
1790 goto done;
1791
1792 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1793 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1794 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1795 found = 1;
1796 break;
1797 }
1798
1799 if (!found)
1800 goto done;
1801
1802 memset(mbox, 0, sizeof mbox);
1803
1804 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1805
1806 switch (bb->bb_status) {
1807 case BIOC_SBUNBLINK:
1808 cmd = MR_DCMD_PD_UNBLINK;
1809 break;
1810
1811 case BIOC_SBBLINK:
1812 cmd = MR_DCMD_PD_BLINK;
1813 break;
1814
1815 case BIOC_SBALARM:
1816 default:
1817 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1818 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1819 goto done;
1820 }
1821
1822
1823 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1824 goto done;
1825
1826 rv = 0;
1827 done:
1828 free(pd, M_DEVBUF);
1829 return rv;
1830 }
1831
1832 static int
1833 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1834 {
1835 struct mfi_pd_list *pd;
1836 int i, found, rv = EINVAL;
1837 uint8_t mbox[MFI_MBOX_SIZE];
1838 uint32_t cmd;
1839
1840 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1841 bs->bs_status);
1842
1843 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1844
1845 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1846 MFI_PD_LIST_SIZE, pd, NULL))
1847 goto done;
1848
1849 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1850 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1851 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1852 found = 1;
1853 break;
1854 }
1855
1856 if (!found)
1857 goto done;
1858
1859 memset(mbox, 0, sizeof mbox);
1860
1861 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1862
1863 switch (bs->bs_status) {
1864 case BIOC_SSONLINE:
1865 mbox[2] = MFI_PD_ONLINE;
1866 cmd = MD_DCMD_PD_SET_STATE;
1867 break;
1868
1869 case BIOC_SSOFFLINE:
1870 mbox[2] = MFI_PD_OFFLINE;
1871 cmd = MD_DCMD_PD_SET_STATE;
1872 break;
1873
1874 case BIOC_SSHOTSPARE:
1875 mbox[2] = MFI_PD_HOTSPARE;
1876 cmd = MD_DCMD_PD_SET_STATE;
1877 break;
1878 /*
1879 case BIOC_SSREBUILD:
1880 cmd = MD_DCMD_PD_REBUILD;
1881 break;
1882 */
1883 default:
1884 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1885 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1886 goto done;
1887 }
1888
1889
1890 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1891 0, NULL, mbox))
1892 goto done;
1893
1894 rv = 0;
1895 done:
1896 free(pd, M_DEVBUF);
1897 return rv;
1898 }
1899
1900 static int
1901 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1902 {
1903 struct mfi_conf *cfg;
1904 struct mfi_hotspare *hs;
1905 struct mfi_pd_details *pd;
1906 struct bioc_disk *sdhs;
1907 struct bioc_vol *vdhs;
1908 struct scsipi_inquiry_data *inqbuf;
1909 char vend[8+16+4+1];
1910 int i, rv = EINVAL;
1911 uint32_t size;
1912 uint8_t mbox[MFI_MBOX_SIZE];
1913
1914 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1915
1916 if (!bio_hs)
1917 return EINVAL;
1918
1919 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1920
1921 /* send single element command to retrieve size for full structure */
1922 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1923 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1924 sizeof *cfg, cfg, NULL))
1925 goto freeme;
1926
1927 size = cfg->mfc_size;
1928 free(cfg, M_DEVBUF);
1929
1930 /* memory for read config */
1931 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1932 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1933 size, cfg, NULL))
1934 goto freeme;
1935
1936 /* calculate offset to hs structure */
1937 hs = (struct mfi_hotspare *)(
1938 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1939 cfg->mfc_array_size * cfg->mfc_no_array +
1940 cfg->mfc_ld_size * cfg->mfc_no_ld);
1941
1942 if (volid < cfg->mfc_no_ld)
1943 goto freeme; /* not a hotspare */
1944
1945 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1946 goto freeme; /* not a hotspare */
1947
1948 /* offset into hotspare structure */
1949 i = volid - cfg->mfc_no_ld;
1950
1951 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1952 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1953 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1954
1955 /* get pd fields */
1956 memset(mbox, 0, sizeof mbox);
1957 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1958 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1959 sizeof *pd, pd, mbox)) {
1960 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1961 DEVNAME(sc));
1962 goto freeme;
1963 }
1964
1965 switch (type) {
1966 case MFI_MGMT_VD:
1967 vdhs = bio_hs;
1968 vdhs->bv_status = BIOC_SVONLINE;
1969 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
1970 vdhs->bv_level = -1; /* hotspare */
1971 vdhs->bv_nodisk = 1;
1972 break;
1973
1974 case MFI_MGMT_SD:
1975 sdhs = bio_hs;
1976 sdhs->bd_status = BIOC_SDHOTSPARE;
1977 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
1978 sdhs->bd_channel = pd->mpd_enc_idx;
1979 sdhs->bd_target = pd->mpd_enc_slot;
1980 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1981 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1982 vend[sizeof vend - 1] = '\0';
1983 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1984 break;
1985
1986 default:
1987 goto freeme;
1988 }
1989
1990 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1991 rv = 0;
1992 freeme:
1993 free(pd, M_DEVBUF);
1994 free(cfg, M_DEVBUF);
1995
1996 return rv;
1997 }
1998
1999 static int
2000 mfi_destroy_sensors(struct mfi_softc *sc)
2001 {
2002 if (sc->sc_sme == NULL)
2003 return 0;
2004 sysmon_envsys_unregister(sc->sc_sme);
2005 sc->sc_sme = NULL;
2006 free(sc->sc_sensor, M_DEVBUF);
2007 return 0;
2008 }
2009
2010 static int
2011 mfi_create_sensors(struct mfi_softc *sc)
2012 {
2013 int i;
2014 int nsensors = sc->sc_ld_cnt;
2015 int rv;
2016
2017 sc->sc_sme = sysmon_envsys_create();
2018 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2019 M_DEVBUF, M_NOWAIT | M_ZERO);
2020 if (sc->sc_sensor == NULL) {
2021 aprint_error("%s: can't allocate envsys_data_t\n",
2022 DEVNAME(sc));
2023 return ENOMEM;
2024 }
2025
2026 for (i = 0; i < nsensors; i++) {
2027 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2028 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2029 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2030 /* Enable monitoring for drive state changes */
2031 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2032 /* logical drives */
2033 snprintf(sc->sc_sensor[i].desc,
2034 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2035 DEVNAME(sc), i);
2036 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2037 &sc->sc_sensor[i]))
2038 goto out;
2039 }
2040
2041 sc->sc_sme->sme_name = DEVNAME(sc);
2042 sc->sc_sme->sme_cookie = sc;
2043 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2044 rv = sysmon_envsys_register(sc->sc_sme);
2045 if (rv != 0) {
2046 aprint_error("%s: unable to register with sysmon (rv = %d)\n",
2047 DEVNAME(sc), rv);
2048 goto out;
2049 }
2050 return 0;
2051
2052 out:
2053 free(sc->sc_sensor, M_DEVBUF);
2054 sysmon_envsys_destroy(sc->sc_sme);
2055 sc->sc_sme = NULL;
2056 return EINVAL;
2057 }
2058
2059 static void
2060 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2061 {
2062 struct mfi_softc *sc = sme->sme_cookie;
2063 struct bioc_vol bv;
2064 int s;
2065 int error;
2066
2067 if (edata->sensor >= sc->sc_ld_cnt)
2068 return;
2069
2070 memset(&bv, 0, sizeof(bv));
2071 bv.bv_volid = edata->sensor;
2072 KERNEL_LOCK(1, curlwp);
2073 s = splbio();
2074 error = mfi_ioctl_vol(sc, &bv);
2075 splx(s);
2076 KERNEL_UNLOCK_ONE(curlwp);
2077 if (error)
2078 return;
2079
2080 switch(bv.bv_status) {
2081 case BIOC_SVOFFLINE:
2082 edata->value_cur = ENVSYS_DRIVE_FAIL;
2083 edata->state = ENVSYS_SCRITICAL;
2084 break;
2085
2086 case BIOC_SVDEGRADED:
2087 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2088 edata->state = ENVSYS_SCRITICAL;
2089 break;
2090
2091 case BIOC_SVSCRUB:
2092 case BIOC_SVONLINE:
2093 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2094 edata->state = ENVSYS_SVALID;
2095 break;
2096
2097 case BIOC_SVINVALID:
2098 /* FALLTRHOUGH */
2099 default:
2100 edata->value_cur = 0; /* unknown */
2101 edata->state = ENVSYS_SINVALID;
2102 }
2103 }
2104
2105 #endif /* NBIO > 0 */
2106
2107 static uint32_t
2108 mfi_xscale_fw_state(struct mfi_softc *sc)
2109 {
2110 return mfi_read(sc, MFI_OMSG0);
2111 }
2112
2113 static void
2114 mfi_xscale_intr_dis(struct mfi_softc *sc)
2115 {
2116 mfi_write(sc, MFI_OMSK, 0);
2117 }
2118
2119 static void
2120 mfi_xscale_intr_ena(struct mfi_softc *sc)
2121 {
2122 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2123 }
2124
2125 static int
2126 mfi_xscale_intr(struct mfi_softc *sc)
2127 {
2128 uint32_t status;
2129
2130 status = mfi_read(sc, MFI_OSTS);
2131 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2132 return 0;
2133
2134 /* write status back to acknowledge interrupt */
2135 mfi_write(sc, MFI_OSTS, status);
2136 return 1;
2137 }
2138
2139 static void
2140 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2141 {
2142 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2143 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2144 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2145 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2146 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2147 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2148
2149 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2150 ccb->ccb_extra_frames);
2151 }
2152
2153 static uint32_t
2154 mfi_ppc_fw_state(struct mfi_softc *sc)
2155 {
2156 return mfi_read(sc, MFI_OSP);
2157 }
2158
2159 static void
2160 mfi_ppc_intr_dis(struct mfi_softc *sc)
2161 {
2162 /* Taking a wild guess --dyoung */
2163 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2164 mfi_write(sc, MFI_ODC, 0xffffffff);
2165 }
2166
2167 static void
2168 mfi_ppc_intr_ena(struct mfi_softc *sc)
2169 {
2170 mfi_write(sc, MFI_ODC, 0xffffffff);
2171 mfi_write(sc, MFI_OMSK, ~0x80000004);
2172 }
2173
2174 static int
2175 mfi_ppc_intr(struct mfi_softc *sc)
2176 {
2177 uint32_t status;
2178
2179 status = mfi_read(sc, MFI_OSTS);
2180 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2181 return 0;
2182
2183 /* write status back to acknowledge interrupt */
2184 mfi_write(sc, MFI_ODC, status);
2185 return 1;
2186 }
2187
2188 static void
2189 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2190 {
2191 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2192 (ccb->ccb_extra_frames << 1));
2193 }
2194
2195 u_int32_t
2196 mfi_gen2_fw_state(struct mfi_softc *sc)
2197 {
2198 return (mfi_read(sc, MFI_OSP));
2199 }
2200
2201 void
2202 mfi_gen2_intr_dis(struct mfi_softc *sc)
2203 {
2204 mfi_write(sc, MFI_OMSK, 0xffffffff);
2205 mfi_write(sc, MFI_ODC, 0xffffffff);
2206 }
2207
2208 void
2209 mfi_gen2_intr_ena(struct mfi_softc *sc)
2210 {
2211 mfi_write(sc, MFI_ODC, 0xffffffff);
2212 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2213 }
2214
2215 int
2216 mfi_gen2_intr(struct mfi_softc *sc)
2217 {
2218 u_int32_t status;
2219
2220 status = mfi_read(sc, MFI_OSTS);
2221 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2222 return (0);
2223
2224 /* write status back to acknowledge interrupt */
2225 mfi_write(sc, MFI_ODC, status);
2226
2227 return (1);
2228 }
2229
2230 void
2231 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2232 {
2233 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2234 (ccb->ccb_extra_frames << 1));
2235 }
2236