mfi.c revision 1.17 1 /* $NetBSD: mfi.c,v 1.17 2008/04/22 22:49:49 cegger Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.17 2008/04/22 22:49:49 cegger Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t,
89 uint32_t, void *, uint8_t *);
90 static void mfi_mgmt_done(struct mfi_ccb *);
91
92 #if NBIO > 0
93 static int mfi_ioctl(struct device *, u_long, void *);
94 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
95 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
96 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
97 static int mfi_ioctl_alarm(struct mfi_softc *,
98 struct bioc_alarm *);
99 static int mfi_ioctl_blink(struct mfi_softc *sc,
100 struct bioc_blink *);
101 static int mfi_ioctl_setstate(struct mfi_softc *,
102 struct bioc_setstate *);
103 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
104 static int mfi_create_sensors(struct mfi_softc *);
105 static void mfi_sensor_refresh(struct sysmon_envsys *,
106 envsys_data_t *);
107 #endif /* NBIO > 0 */
108
109 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
110 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
111 static int mfi_xscale_intr(struct mfi_softc *sc);
112 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
113
114 static const struct mfi_iop_ops mfi_iop_xscale = {
115 mfi_xscale_fw_state,
116 mfi_xscale_intr_ena,
117 mfi_xscale_intr,
118 mfi_xscale_post
119 };
120
121 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
122 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
123 static int mfi_ppc_intr(struct mfi_softc *sc);
124 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
125
126 static const struct mfi_iop_ops mfi_iop_ppc = {
127 mfi_ppc_fw_state,
128 mfi_ppc_intr_ena,
129 mfi_ppc_intr,
130 mfi_ppc_post
131 };
132
133 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
134 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
135 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
136 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
137
138 static struct mfi_ccb *
139 mfi_get_ccb(struct mfi_softc *sc)
140 {
141 struct mfi_ccb *ccb;
142 int s;
143
144 s = splbio();
145 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
146 if (ccb) {
147 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
148 ccb->ccb_state = MFI_CCB_READY;
149 }
150 splx(s);
151
152 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
153
154 return ccb;
155 }
156
157 static void
158 mfi_put_ccb(struct mfi_ccb *ccb)
159 {
160 struct mfi_softc *sc = ccb->ccb_sc;
161 int s;
162
163 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
164
165 s = splbio();
166 ccb->ccb_state = MFI_CCB_FREE;
167 ccb->ccb_xs = NULL;
168 ccb->ccb_flags = 0;
169 ccb->ccb_done = NULL;
170 ccb->ccb_direction = 0;
171 ccb->ccb_frame_size = 0;
172 ccb->ccb_extra_frames = 0;
173 ccb->ccb_sgl = NULL;
174 ccb->ccb_data = NULL;
175 ccb->ccb_len = 0;
176 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
177 splx(s);
178 }
179
180 static int
181 mfi_init_ccb(struct mfi_softc *sc)
182 {
183 struct mfi_ccb *ccb;
184 uint32_t i;
185 int error;
186
187 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
188
189 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
190 M_DEVBUF, M_WAITOK|M_ZERO);
191
192 for (i = 0; i < sc->sc_max_cmds; i++) {
193 ccb = &sc->sc_ccb[i];
194
195 ccb->ccb_sc = sc;
196
197 /* select i'th frame */
198 ccb->ccb_frame = (union mfi_frame *)
199 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
200 ccb->ccb_pframe =
201 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
202 ccb->ccb_frame->mfr_header.mfh_context = i;
203
204 /* select i'th sense */
205 ccb->ccb_sense = (struct mfi_sense *)
206 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
207 ccb->ccb_psense =
208 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
209
210 /* create a dma map for transfer */
211 error = bus_dmamap_create(sc->sc_dmat,
212 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
213 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
214 if (error) {
215 printf("%s: cannot create ccb dmamap (%d)\n",
216 DEVNAME(sc), error);
217 goto destroy;
218 }
219
220 DNPRINTF(MFI_D_CCB,
221 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
222 ccb->ccb_frame->mfr_header.mfh_context, ccb,
223 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
224 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
225 (u_long)ccb->ccb_dmamap);
226
227 /* add ccb to queue */
228 mfi_put_ccb(ccb);
229 }
230
231 return 0;
232 destroy:
233 /* free dma maps and ccb memory */
234 while (i) {
235 i--;
236 ccb = &sc->sc_ccb[i];
237 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
238 }
239
240 free(sc->sc_ccb, M_DEVBUF);
241
242 return 1;
243 }
244
245 static uint32_t
246 mfi_read(struct mfi_softc *sc, bus_size_t r)
247 {
248 uint32_t rv;
249
250 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
251 BUS_SPACE_BARRIER_READ);
252 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
253
254 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
255 return rv;
256 }
257
258 static void
259 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
260 {
261 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
262
263 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
264 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
265 BUS_SPACE_BARRIER_WRITE);
266 }
267
268 static struct mfi_mem *
269 mfi_allocmem(struct mfi_softc *sc, size_t size)
270 {
271 struct mfi_mem *mm;
272 int nsegs;
273
274 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
275 (long)size);
276
277 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
278 if (mm == NULL)
279 return NULL;
280
281 mm->am_size = size;
282
283 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
284 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
285 goto amfree;
286
287 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
288 &nsegs, BUS_DMA_NOWAIT) != 0)
289 goto destroy;
290
291 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
292 BUS_DMA_NOWAIT) != 0)
293 goto free;
294
295 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
296 BUS_DMA_NOWAIT) != 0)
297 goto unmap;
298
299 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
300 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
301
302 memset(mm->am_kva, 0, size);
303 return mm;
304
305 unmap:
306 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
307 free:
308 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
309 destroy:
310 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
311 amfree:
312 free(mm, M_DEVBUF);
313
314 return NULL;
315 }
316
317 static void
318 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
319 {
320 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
321
322 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
323 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
324 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
325 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
326 free(mm, M_DEVBUF);
327 }
328
329 static int
330 mfi_transition_firmware(struct mfi_softc *sc)
331 {
332 int32_t fw_state, cur_state;
333 int max_wait, i;
334
335 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
336
337 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
338 fw_state);
339
340 while (fw_state != MFI_STATE_READY) {
341 DNPRINTF(MFI_D_MISC,
342 "%s: waiting for firmware to become ready\n",
343 DEVNAME(sc));
344 cur_state = fw_state;
345 switch (fw_state) {
346 case MFI_STATE_FAULT:
347 printf("%s: firmware fault\n", DEVNAME(sc));
348 return 1;
349 case MFI_STATE_WAIT_HANDSHAKE:
350 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
351 max_wait = 2;
352 break;
353 case MFI_STATE_OPERATIONAL:
354 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
355 max_wait = 10;
356 break;
357 case MFI_STATE_UNDEFINED:
358 case MFI_STATE_BB_INIT:
359 max_wait = 2;
360 break;
361 case MFI_STATE_FW_INIT:
362 case MFI_STATE_DEVICE_SCAN:
363 case MFI_STATE_FLUSH_CACHE:
364 max_wait = 20;
365 break;
366 default:
367 printf("%s: unknown firmware state %d\n",
368 DEVNAME(sc), fw_state);
369 return 1;
370 }
371 for (i = 0; i < (max_wait * 10); i++) {
372 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
373 if (fw_state == cur_state)
374 DELAY(100000);
375 else
376 break;
377 }
378 if (fw_state == cur_state) {
379 printf("%s: firmware stuck in state %#x\n",
380 DEVNAME(sc), fw_state);
381 return 1;
382 }
383 }
384
385 return 0;
386 }
387
388 static int
389 mfi_initialize_firmware(struct mfi_softc *sc)
390 {
391 struct mfi_ccb *ccb;
392 struct mfi_init_frame *init;
393 struct mfi_init_qinfo *qinfo;
394
395 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
396
397 if ((ccb = mfi_get_ccb(sc)) == NULL)
398 return 1;
399
400 init = &ccb->ccb_frame->mfr_init;
401 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
402
403 memset(qinfo, 0, sizeof *qinfo);
404 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
405 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
406 offsetof(struct mfi_prod_cons, mpc_reply_q));
407 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
408 offsetof(struct mfi_prod_cons, mpc_producer));
409 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
410 offsetof(struct mfi_prod_cons, mpc_consumer));
411
412 init->mif_header.mfh_cmd = MFI_CMD_INIT;
413 init->mif_header.mfh_data_len = sizeof *qinfo;
414 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
415
416 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
417 DEVNAME(sc),
418 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
419 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
420
421 if (mfi_poll(ccb)) {
422 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
423 return 1;
424 }
425
426 mfi_put_ccb(ccb);
427
428 return 0;
429 }
430
431 static int
432 mfi_get_info(struct mfi_softc *sc)
433 {
434 #ifdef MFI_DEBUG
435 int i;
436 #endif
437 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
438
439 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
440 sizeof(sc->sc_info), &sc->sc_info, NULL))
441 return 1;
442
443 #ifdef MFI_DEBUG
444
445 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
446 printf("%s: active FW %s Version %s date %s time %s\n",
447 DEVNAME(sc),
448 sc->sc_info.mci_image_component[i].mic_name,
449 sc->sc_info.mci_image_component[i].mic_version,
450 sc->sc_info.mci_image_component[i].mic_build_date,
451 sc->sc_info.mci_image_component[i].mic_build_time);
452 }
453
454 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
455 printf("%s: pending FW %s Version %s date %s time %s\n",
456 DEVNAME(sc),
457 sc->sc_info.mci_pending_image_component[i].mic_name,
458 sc->sc_info.mci_pending_image_component[i].mic_version,
459 sc->sc_info.mci_pending_image_component[i].mic_build_date,
460 sc->sc_info.mci_pending_image_component[i].mic_build_time);
461 }
462
463 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
464 DEVNAME(sc),
465 sc->sc_info.mci_max_arms,
466 sc->sc_info.mci_max_spans,
467 sc->sc_info.mci_max_arrays,
468 sc->sc_info.mci_max_lds,
469 sc->sc_info.mci_product_name);
470
471 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
472 DEVNAME(sc),
473 sc->sc_info.mci_serial_number,
474 sc->sc_info.mci_hw_present,
475 sc->sc_info.mci_current_fw_time,
476 sc->sc_info.mci_max_cmds,
477 sc->sc_info.mci_max_sg_elements);
478
479 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
480 DEVNAME(sc),
481 sc->sc_info.mci_max_request_size,
482 sc->sc_info.mci_lds_present,
483 sc->sc_info.mci_lds_degraded,
484 sc->sc_info.mci_lds_offline,
485 sc->sc_info.mci_pd_present);
486
487 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
488 DEVNAME(sc),
489 sc->sc_info.mci_pd_disks_present,
490 sc->sc_info.mci_pd_disks_pred_failure,
491 sc->sc_info.mci_pd_disks_failed);
492
493 printf("%s: nvram %d mem %d flash %d\n",
494 DEVNAME(sc),
495 sc->sc_info.mci_nvram_size,
496 sc->sc_info.mci_memory_size,
497 sc->sc_info.mci_flash_size);
498
499 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
500 DEVNAME(sc),
501 sc->sc_info.mci_ram_correctable_errors,
502 sc->sc_info.mci_ram_uncorrectable_errors,
503 sc->sc_info.mci_cluster_allowed,
504 sc->sc_info.mci_cluster_active);
505
506 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
507 DEVNAME(sc),
508 sc->sc_info.mci_max_strips_per_io,
509 sc->sc_info.mci_raid_levels,
510 sc->sc_info.mci_adapter_ops,
511 sc->sc_info.mci_ld_ops);
512
513 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
514 DEVNAME(sc),
515 sc->sc_info.mci_stripe_sz_ops.min,
516 sc->sc_info.mci_stripe_sz_ops.max,
517 sc->sc_info.mci_pd_ops,
518 sc->sc_info.mci_pd_mix_support);
519
520 printf("%s: ecc_bucket %d pckg_prop %s\n",
521 DEVNAME(sc),
522 sc->sc_info.mci_ecc_bucket_count,
523 sc->sc_info.mci_package_version);
524
525 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
526 DEVNAME(sc),
527 sc->sc_info.mci_properties.mcp_seq_num,
528 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
529 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
530 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
531
532 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
533 DEVNAME(sc),
534 sc->sc_info.mci_properties.mcp_rebuild_rate,
535 sc->sc_info.mci_properties.mcp_patrol_read_rate,
536 sc->sc_info.mci_properties.mcp_bgi_rate,
537 sc->sc_info.mci_properties.mcp_cc_rate);
538
539 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
540 DEVNAME(sc),
541 sc->sc_info.mci_properties.mcp_recon_rate,
542 sc->sc_info.mci_properties.mcp_cache_flush_interval,
543 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
544 sc->sc_info.mci_properties.mcp_spinup_delay,
545 sc->sc_info.mci_properties.mcp_cluster_enable);
546
547 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
548 DEVNAME(sc),
549 sc->sc_info.mci_properties.mcp_coercion_mode,
550 sc->sc_info.mci_properties.mcp_alarm_enable,
551 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
552 sc->sc_info.mci_properties.mcp_disable_battery_warn,
553 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
554
555 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
556 DEVNAME(sc),
557 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
558 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
559 sc->sc_info.mci_properties.mcp_expose_encl_devices);
560
561 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
562 DEVNAME(sc),
563 sc->sc_info.mci_pci.mip_vendor,
564 sc->sc_info.mci_pci.mip_device,
565 sc->sc_info.mci_pci.mip_subvendor,
566 sc->sc_info.mci_pci.mip_subdevice);
567
568 printf("%s: type %#x port_count %d port_addr ",
569 DEVNAME(sc),
570 sc->sc_info.mci_host.mih_type,
571 sc->sc_info.mci_host.mih_port_count);
572
573 for (i = 0; i < 8; i++)
574 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
575 printf("\n");
576
577 printf("%s: type %.x port_count %d port_addr ",
578 DEVNAME(sc),
579 sc->sc_info.mci_device.mid_type,
580 sc->sc_info.mci_device.mid_port_count);
581
582 for (i = 0; i < 8; i++)
583 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
584 printf("\n");
585 #endif /* MFI_DEBUG */
586
587 return 0;
588 }
589
590 static void
591 mfiminphys(struct buf *bp)
592 {
593 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
594
595 /* XXX currently using MFI_MAXFER = MAXPHYS */
596 if (bp->b_bcount > MFI_MAXFER)
597 bp->b_bcount = MFI_MAXFER;
598 minphys(bp);
599 }
600
601 int
602 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
603 {
604 struct scsipi_adapter *adapt = &sc->sc_adapt;
605 struct scsipi_channel *chan = &sc->sc_chan;
606 uint32_t status, frames;
607 int i;
608
609 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
610
611 switch (iop) {
612 case MFI_IOP_XSCALE:
613 sc->sc_iop = &mfi_iop_xscale;
614 break;
615 case MFI_IOP_PPC:
616 sc->sc_iop = &mfi_iop_ppc;
617 break;
618 default:
619 panic("%s: unknown iop %d", DEVNAME(sc), iop);
620 }
621
622 if (mfi_transition_firmware(sc))
623 return 1;
624
625 TAILQ_INIT(&sc->sc_ccb_freeq);
626
627 status = mfi_fw_state(sc);
628 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
629 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
630 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
631 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
632
633 /* consumer/producer and reply queue memory */
634 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
635 sizeof(struct mfi_prod_cons));
636 if (sc->sc_pcq == NULL) {
637 aprint_error("%s: unable to allocate reply queue memory\n",
638 DEVNAME(sc));
639 goto nopcq;
640 }
641 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
642 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
643 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
644
645 /* frame memory */
646 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
647 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
648 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
649 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
650 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
651 if (sc->sc_frames == NULL) {
652 aprint_error("%s: unable to allocate frame memory\n",
653 DEVNAME(sc));
654 goto noframe;
655 }
656 /* XXX hack, fix this */
657 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
658 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
659 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
660 goto noframe;
661 }
662
663 /* sense memory */
664 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
665 if (sc->sc_sense == NULL) {
666 aprint_error("%s: unable to allocate sense memory\n",
667 DEVNAME(sc));
668 goto nosense;
669 }
670
671 /* now that we have all memory bits go initialize ccbs */
672 if (mfi_init_ccb(sc)) {
673 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
674 goto noinit;
675 }
676
677 /* kickstart firmware with all addresses and pointers */
678 if (mfi_initialize_firmware(sc)) {
679 aprint_error("%s: could not initialize firmware\n",
680 DEVNAME(sc));
681 goto noinit;
682 }
683
684 if (mfi_get_info(sc)) {
685 aprint_error("%s: could not retrieve controller information\n",
686 DEVNAME(sc));
687 goto noinit;
688 }
689
690 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
691 DEVNAME(sc),
692 sc->sc_info.mci_lds_present,
693 sc->sc_info.mci_package_version,
694 sc->sc_info.mci_memory_size);
695
696 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
697 sc->sc_max_ld = sc->sc_ld_cnt;
698 for (i = 0; i < sc->sc_ld_cnt; i++)
699 sc->sc_ld[i].ld_present = 1;
700
701 memset(adapt, 0, sizeof(*adapt));
702 adapt->adapt_dev = &sc->sc_dev;
703 adapt->adapt_nchannels = 1;
704 if (sc->sc_ld_cnt)
705 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
706 else
707 adapt->adapt_openings = sc->sc_max_cmds;
708 adapt->adapt_max_periph = adapt->adapt_openings;
709 adapt->adapt_request = mfi_scsipi_request;
710 adapt->adapt_minphys = mfiminphys;
711
712 memset(chan, 0, sizeof(*chan));
713 chan->chan_adapter = adapt;
714 chan->chan_bustype = &scsi_bustype;
715 chan->chan_channel = 0;
716 chan->chan_flags = 0;
717 chan->chan_nluns = 8;
718 chan->chan_ntargets = MFI_MAX_LD;
719 chan->chan_id = MFI_MAX_LD;
720
721 (void)config_found(&sc->sc_dev, &sc->sc_chan, scsiprint);
722
723 /* enable interrupts */
724 mfi_intr_enable(sc);
725
726 #if NBIO > 0
727 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
728 panic("%s: controller registration failed", DEVNAME(sc));
729 if (mfi_create_sensors(sc) != 0)
730 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
731 #endif /* NBIO > 0 */
732
733 return 0;
734 noinit:
735 mfi_freemem(sc, sc->sc_sense);
736 nosense:
737 mfi_freemem(sc, sc->sc_frames);
738 noframe:
739 mfi_freemem(sc, sc->sc_pcq);
740 nopcq:
741 return 1;
742 }
743
744 static int
745 mfi_poll(struct mfi_ccb *ccb)
746 {
747 struct mfi_softc *sc = ccb->ccb_sc;
748 struct mfi_frame_header *hdr;
749 int to = 0;
750
751 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
752
753 hdr = &ccb->ccb_frame->mfr_header;
754 hdr->mfh_cmd_status = 0xff;
755 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
756
757 mfi_post(sc, ccb);
758 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
759 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
760 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
761
762 while (hdr->mfh_cmd_status == 0xff) {
763 delay(1000);
764 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
765 break;
766 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
767 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
768 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
769 }
770 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
771 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
772 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
773
774 if (ccb->ccb_data != NULL) {
775 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
776 DEVNAME(sc));
777 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
778 ccb->ccb_dmamap->dm_mapsize,
779 (ccb->ccb_direction & MFI_DATA_IN) ?
780 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
781
782 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
783 }
784
785 if (hdr->mfh_cmd_status == 0xff) {
786 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
787 hdr->mfh_context);
788 ccb->ccb_flags |= MFI_CCB_F_ERR;
789 return 1;
790 }
791
792 return 0;
793 }
794
795 int
796 mfi_intr(void *arg)
797 {
798 struct mfi_softc *sc = arg;
799 struct mfi_prod_cons *pcq;
800 struct mfi_ccb *ccb;
801 uint32_t producer, consumer, ctx;
802 int claimed = 0;
803
804 if (!mfi_my_intr(sc))
805 return 0;
806
807 pcq = MFIMEM_KVA(sc->sc_pcq);
808
809 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
810 (u_long)sc, (u_long)pcq);
811
812 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
813 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
814 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
815
816 producer = pcq->mpc_producer;
817 consumer = pcq->mpc_consumer;
818
819 while (consumer != producer) {
820 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
821 DEVNAME(sc), producer, consumer);
822
823 ctx = pcq->mpc_reply_q[consumer];
824 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
825 if (ctx == MFI_INVALID_CTX)
826 printf("%s: invalid context, p: %d c: %d\n",
827 DEVNAME(sc), producer, consumer);
828 else {
829 /* XXX remove from queue and call scsi_done */
830 ccb = &sc->sc_ccb[ctx];
831 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
832 DEVNAME(sc), ctx);
833 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
834 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
835 sc->sc_frames_size,
836 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
837 ccb->ccb_done(ccb);
838
839 claimed = 1;
840 }
841 consumer++;
842 if (consumer == (sc->sc_max_cmds + 1))
843 consumer = 0;
844 }
845
846 pcq->mpc_consumer = consumer;
847 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
848 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
850
851 return claimed;
852 }
853
854 static int
855 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
856 uint32_t blockcnt)
857 {
858 struct scsipi_periph *periph = xs->xs_periph;
859 struct mfi_io_frame *io;
860
861 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
862 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
863 periph->periph_target);
864
865 if (!xs->data)
866 return 1;
867
868 io = &ccb->ccb_frame->mfr_io;
869 if (xs->xs_control & XS_CTL_DATA_IN) {
870 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
871 ccb->ccb_direction = MFI_DATA_IN;
872 } else {
873 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
874 ccb->ccb_direction = MFI_DATA_OUT;
875 }
876 io->mif_header.mfh_target_id = periph->periph_target;
877 io->mif_header.mfh_timeout = 0;
878 io->mif_header.mfh_flags = 0;
879 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
880 io->mif_header.mfh_data_len= blockcnt;
881 io->mif_lba_hi = 0;
882 io->mif_lba_lo = blockno;
883 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
884 io->mif_sense_addr_hi = 0;
885
886 ccb->ccb_done = mfi_scsi_xs_done;
887 ccb->ccb_xs = xs;
888 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
889 ccb->ccb_sgl = &io->mif_sgl;
890 ccb->ccb_data = xs->data;
891 ccb->ccb_len = xs->datalen;
892
893 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
894 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
895 return 1;
896
897 return 0;
898 }
899
900 static void
901 mfi_scsi_xs_done(struct mfi_ccb *ccb)
902 {
903 struct scsipi_xfer *xs = ccb->ccb_xs;
904 struct mfi_softc *sc = ccb->ccb_sc;
905 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
906
907 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
908 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
909
910 if (xs->data != NULL) {
911 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
912 DEVNAME(sc));
913 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
914 ccb->ccb_dmamap->dm_mapsize,
915 (xs->xs_control & XS_CTL_DATA_IN) ?
916 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
917
918 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
919 }
920
921 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
922 xs->error = XS_DRIVER_STUFFUP;
923 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
924 DEVNAME(sc), hdr->mfh_cmd_status);
925
926 if (hdr->mfh_scsi_status != 0) {
927 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
928 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
929 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
930 DNPRINTF(MFI_D_INTR,
931 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
932 DEVNAME(sc), hdr->mfh_scsi_status,
933 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
934 memset(&xs->sense, 0, sizeof(xs->sense));
935 memcpy(&xs->sense, ccb->ccb_sense,
936 sizeof(struct scsi_sense_data));
937 xs->error = XS_SENSE;
938 }
939 } else {
940 xs->error = XS_NOERROR;
941 xs->status = SCSI_OK;
942 xs->resid = 0;
943 }
944
945 mfi_put_ccb(ccb);
946 scsipi_done(xs);
947 }
948
949 static int
950 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
951 {
952 struct mfi_pass_frame *pf;
953 struct scsipi_periph *periph = xs->xs_periph;
954
955 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
956 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
957 periph->periph_target);
958
959 pf = &ccb->ccb_frame->mfr_pass;
960 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
961 pf->mpf_header.mfh_target_id = periph->periph_target;
962 pf->mpf_header.mfh_lun_id = 0;
963 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
964 pf->mpf_header.mfh_timeout = 0;
965 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
966 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
967
968 pf->mpf_sense_addr_hi = 0;
969 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
970
971 memset(pf->mpf_cdb, 0, 16);
972 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
973
974 ccb->ccb_done = mfi_scsi_xs_done;
975 ccb->ccb_xs = xs;
976 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
977 ccb->ccb_sgl = &pf->mpf_sgl;
978
979 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
980 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
981 MFI_DATA_IN : MFI_DATA_OUT;
982 else
983 ccb->ccb_direction = MFI_DATA_NONE;
984
985 if (xs->data) {
986 ccb->ccb_data = xs->data;
987 ccb->ccb_len = xs->datalen;
988
989 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
990 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
991 return 1;
992 }
993
994 return 0;
995 }
996
997 static void
998 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
999 void *arg)
1000 {
1001 struct scsipi_periph *periph;
1002 struct scsipi_xfer *xs;
1003 struct scsipi_adapter *adapt = chan->chan_adapter;
1004 struct mfi_softc *sc = (void *) adapt->adapt_dev;
1005 struct mfi_ccb *ccb;
1006 struct scsi_rw_6 *rw;
1007 struct scsipi_rw_10 *rwb;
1008 uint32_t blockno, blockcnt;
1009 uint8_t target;
1010 uint8_t mbox[MFI_MBOX_SIZE];
1011 int s;
1012
1013 switch (req) {
1014 case ADAPTER_REQ_GROW_RESOURCES:
1015 /* Not supported. */
1016 return;
1017 case ADAPTER_REQ_SET_XFER_MODE:
1018 /* Not supported. */
1019 return;
1020 case ADAPTER_REQ_RUN_XFER:
1021 break;
1022 }
1023
1024 xs = arg;
1025
1026 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1027 DEVNAME(sc), req, xs->cmd->opcode);
1028
1029 periph = xs->xs_periph;
1030 target = periph->periph_target;
1031
1032 s = splbio();
1033 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1034 periph->periph_lun != 0) {
1035 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1036 DEVNAME(sc), target);
1037 xs->error = XS_SELTIMEOUT;
1038 scsipi_done(xs);
1039 splx(s);
1040 return;
1041 }
1042
1043 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1044 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1045 xs->error = XS_RESOURCE_SHORTAGE;
1046 scsipi_done(xs);
1047 splx(s);
1048 return;
1049 }
1050
1051 switch (xs->cmd->opcode) {
1052 /* IO path */
1053 case READ_10:
1054 case WRITE_10:
1055 rwb = (struct scsipi_rw_10 *)xs->cmd;
1056 blockno = _4btol(rwb->addr);
1057 blockcnt = _2btol(rwb->length);
1058 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1059 mfi_put_ccb(ccb);
1060 goto stuffup;
1061 }
1062 break;
1063
1064 case SCSI_READ_6_COMMAND:
1065 case SCSI_WRITE_6_COMMAND:
1066 rw = (struct scsi_rw_6 *)xs->cmd;
1067 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1068 blockcnt = rw->length ? rw->length : 0x100;
1069 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1070 mfi_put_ccb(ccb);
1071 goto stuffup;
1072 }
1073 break;
1074
1075 case SCSI_SYNCHRONIZE_CACHE_10:
1076 mfi_put_ccb(ccb); /* we don't need this */
1077
1078 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1079 if (mfi_mgmt(sc, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE,
1080 0, NULL, mbox))
1081 goto stuffup;
1082 xs->error = XS_NOERROR;
1083 xs->status = SCSI_OK;
1084 xs->resid = 0;
1085 scsipi_done(xs);
1086 splx(s);
1087 return;
1088 /* NOTREACHED */
1089
1090 /* hand it of to the firmware and let it deal with it */
1091 case SCSI_TEST_UNIT_READY:
1092 /* save off sd? after autoconf */
1093 if (!cold) /* XXX bogus */
1094 strlcpy(sc->sc_ld[target].ld_dev, device_xname(&sc->sc_dev),
1095 sizeof(sc->sc_ld[target].ld_dev));
1096 /* FALLTHROUGH */
1097
1098 default:
1099 if (mfi_scsi_ld(ccb, xs)) {
1100 mfi_put_ccb(ccb);
1101 goto stuffup;
1102 }
1103 break;
1104 }
1105
1106 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1107
1108 if (xs->xs_control & XS_CTL_POLL) {
1109 if (mfi_poll(ccb)) {
1110 /* XXX check for sense in ccb->ccb_sense? */
1111 printf("%s: mfi_scsipi_request poll failed\n",
1112 DEVNAME(sc));
1113 mfi_put_ccb(ccb);
1114 bzero(&xs->sense, sizeof(xs->sense));
1115 xs->sense.scsi_sense.response_code =
1116 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1117 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1118 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1119 xs->error = XS_SENSE;
1120 xs->status = SCSI_CHECK;
1121 } else {
1122 DNPRINTF(MFI_D_DMA,
1123 "%s: mfi_scsipi_request poll complete %d\n",
1124 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1125 xs->error = XS_NOERROR;
1126 xs->status = SCSI_OK;
1127 xs->resid = 0;
1128 }
1129 mfi_put_ccb(ccb);
1130 scsipi_done(xs);
1131 splx(s);
1132 return;
1133 }
1134
1135 mfi_post(sc, ccb);
1136
1137 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1138 ccb->ccb_dmamap->dm_nsegs);
1139
1140 splx(s);
1141 return;
1142
1143 stuffup:
1144 xs->error = XS_DRIVER_STUFFUP;
1145 scsipi_done(xs);
1146 splx(s);
1147 }
1148
1149 static int
1150 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1151 {
1152 struct mfi_softc *sc = ccb->ccb_sc;
1153 struct mfi_frame_header *hdr;
1154 bus_dma_segment_t *sgd;
1155 union mfi_sgl *sgl;
1156 int error, i;
1157
1158 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1159 (u_long)ccb->ccb_data);
1160
1161 if (!ccb->ccb_data)
1162 return 1;
1163
1164 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1165 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1166 if (error) {
1167 if (error == EFBIG)
1168 printf("more than %d dma segs\n",
1169 sc->sc_max_sgl);
1170 else
1171 printf("error %d loading dma map\n", error);
1172 return 1;
1173 }
1174
1175 hdr = &ccb->ccb_frame->mfr_header;
1176 sgl = ccb->ccb_sgl;
1177 sgd = ccb->ccb_dmamap->dm_segs;
1178 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1179 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1180 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1181 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1182 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1183 }
1184
1185 if (ccb->ccb_direction == MFI_DATA_IN) {
1186 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1187 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1188 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1189 } else {
1190 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1191 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1192 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1193 }
1194
1195 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1196 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1197 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1198 ccb->ccb_dmamap->dm_nsegs;
1199 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1200
1201 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1202 " dm_nsegs: %d extra_frames: %d\n",
1203 DEVNAME(sc),
1204 hdr->mfh_sg_count,
1205 ccb->ccb_frame_size,
1206 sc->sc_frames_size,
1207 ccb->ccb_dmamap->dm_nsegs,
1208 ccb->ccb_extra_frames);
1209
1210 return 0;
1211 }
1212
1213 static int
1214 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1215 void *buf, uint8_t *mbox)
1216 {
1217 struct mfi_ccb *ccb;
1218 struct mfi_dcmd_frame *dcmd;
1219 int rv = 1;
1220
1221 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(sc), opc);
1222
1223 if ((ccb = mfi_get_ccb(sc)) == NULL)
1224 return rv;
1225
1226 dcmd = &ccb->ccb_frame->mfr_dcmd;
1227 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1228 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1229 dcmd->mdf_header.mfh_timeout = 0;
1230
1231 dcmd->mdf_opcode = opc;
1232 dcmd->mdf_header.mfh_data_len = 0;
1233 ccb->ccb_direction = dir;
1234 ccb->ccb_done = mfi_mgmt_done;
1235
1236 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1237
1238 /* handle special opcodes */
1239 if (mbox)
1240 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1241
1242 if (dir != MFI_DATA_NONE) {
1243 dcmd->mdf_header.mfh_data_len = len;
1244 ccb->ccb_data = buf;
1245 ccb->ccb_len = len;
1246 ccb->ccb_sgl = &dcmd->mdf_sgl;
1247
1248 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1249 goto done;
1250 }
1251
1252 if (cold) {
1253 if (mfi_poll(ccb))
1254 goto done;
1255 } else {
1256 mfi_post(sc, ccb);
1257
1258 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt sleeping\n", DEVNAME(sc));
1259 while (ccb->ccb_state != MFI_CCB_DONE)
1260 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1261
1262 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1263 goto done;
1264 }
1265
1266 rv = 0;
1267
1268 done:
1269 mfi_put_ccb(ccb);
1270 return rv;
1271 }
1272
1273 static void
1274 mfi_mgmt_done(struct mfi_ccb *ccb)
1275 {
1276 struct mfi_softc *sc = ccb->ccb_sc;
1277 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1278
1279 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1280 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1281
1282 if (ccb->ccb_data != NULL) {
1283 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1284 DEVNAME(sc));
1285 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1286 ccb->ccb_dmamap->dm_mapsize,
1287 (ccb->ccb_direction & MFI_DATA_IN) ?
1288 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1289
1290 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1291 }
1292
1293 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1294 ccb->ccb_flags |= MFI_CCB_F_ERR;
1295
1296 ccb->ccb_state = MFI_CCB_DONE;
1297
1298 wakeup(ccb);
1299 }
1300
1301 #if NBIO > 0
1302 int
1303 mfi_ioctl(struct device *dev, u_long cmd, void *addr)
1304 {
1305 struct mfi_softc *sc = (struct mfi_softc *)dev;
1306 int error = 0;
1307 int s = splbio();
1308
1309 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1310
1311 switch (cmd) {
1312 case BIOCINQ:
1313 DNPRINTF(MFI_D_IOCTL, "inq\n");
1314 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1315 break;
1316
1317 case BIOCVOL:
1318 DNPRINTF(MFI_D_IOCTL, "vol\n");
1319 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1320 break;
1321
1322 case BIOCDISK:
1323 DNPRINTF(MFI_D_IOCTL, "disk\n");
1324 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1325 break;
1326
1327 case BIOCALARM:
1328 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1329 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1330 break;
1331
1332 case BIOCBLINK:
1333 DNPRINTF(MFI_D_IOCTL, "blink\n");
1334 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1335 break;
1336
1337 case BIOCSETSTATE:
1338 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1339 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1340 break;
1341
1342 default:
1343 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1344 error = EINVAL;
1345 }
1346 splx(s);
1347
1348 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1349 return error;
1350 }
1351
1352 static int
1353 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1354 {
1355 struct mfi_conf *cfg;
1356 int rv = EINVAL;
1357
1358 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1359
1360 if (mfi_get_info(sc)) {
1361 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1362 DEVNAME(sc));
1363 return EIO;
1364 }
1365
1366 /* get figures */
1367 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1368 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1369 goto freeme;
1370
1371 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1372 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1373 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1374
1375 rv = 0;
1376 freeme:
1377 free(cfg, M_DEVBUF);
1378 return rv;
1379 }
1380
1381 static int
1382 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1383 {
1384 int i, per, rv = EINVAL;
1385 uint8_t mbox[MFI_MBOX_SIZE];
1386
1387 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1388 DEVNAME(sc), bv->bv_volid);
1389
1390 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1391 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1392 goto done;
1393
1394 i = bv->bv_volid;
1395 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1396 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1397 DEVNAME(sc), mbox[0]);
1398
1399 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1400 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1401 goto done;
1402
1403 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1404 /* go do hotspares */
1405 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1406 goto done;
1407 }
1408
1409 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1410
1411 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1412 case MFI_LD_OFFLINE:
1413 bv->bv_status = BIOC_SVOFFLINE;
1414 break;
1415
1416 case MFI_LD_PART_DEGRADED:
1417 case MFI_LD_DEGRADED:
1418 bv->bv_status = BIOC_SVDEGRADED;
1419 break;
1420
1421 case MFI_LD_ONLINE:
1422 bv->bv_status = BIOC_SVONLINE;
1423 break;
1424
1425 default:
1426 bv->bv_status = BIOC_SVINVALID;
1427 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1428 DEVNAME(sc),
1429 sc->sc_ld_list.mll_list[i].mll_state);
1430 }
1431
1432 /* additional status can modify MFI status */
1433 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1434 case MFI_LD_PROG_CC:
1435 case MFI_LD_PROG_BGI:
1436 bv->bv_status = BIOC_SVSCRUB;
1437 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1438 bv->bv_percent = (per * 100) / 0xffff;
1439 bv->bv_seconds =
1440 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1441 break;
1442
1443 case MFI_LD_PROG_FGI:
1444 case MFI_LD_PROG_RECONSTRUCT:
1445 /* nothing yet */
1446 break;
1447 }
1448
1449 /*
1450 * The RAID levels are determined per the SNIA DDF spec, this is only
1451 * a subset that is valid for the MFI contrller.
1452 */
1453 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1454 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1455 MFI_DDF_SRL_SPANNED)
1456 bv->bv_level *= 10;
1457
1458 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1459 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1460
1461 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1462
1463 rv = 0;
1464 done:
1465 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1466 DEVNAME(sc), rv);
1467 return rv;
1468 }
1469
1470 static int
1471 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1472 {
1473 struct mfi_conf *cfg;
1474 struct mfi_array *ar;
1475 struct mfi_ld_cfg *ld;
1476 struct mfi_pd_details *pd;
1477 struct scsipi_inquiry_data *inqbuf;
1478 char vend[8+16+4+1];
1479 int i, rv = EINVAL;
1480 int arr, vol, disk;
1481 uint32_t size;
1482 uint8_t mbox[MFI_MBOX_SIZE];
1483
1484 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1485 DEVNAME(sc), bd->bd_diskid);
1486
1487 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1488
1489 /* send single element command to retrieve size for full structure */
1490 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1491 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1492 goto freeme;
1493
1494 size = cfg->mfc_size;
1495 free(cfg, M_DEVBUF);
1496
1497 /* memory for read config */
1498 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1499 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1500 goto freeme;
1501
1502 ar = cfg->mfc_array;
1503
1504 /* calculate offset to ld structure */
1505 ld = (struct mfi_ld_cfg *)(
1506 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1507 cfg->mfc_array_size * cfg->mfc_no_array);
1508
1509 vol = bd->bd_volid;
1510
1511 if (vol >= cfg->mfc_no_ld) {
1512 /* do hotspares */
1513 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1514 goto freeme;
1515 }
1516
1517 /* find corresponding array for ld */
1518 for (i = 0, arr = 0; i < vol; i++)
1519 arr += ld[i].mlc_parm.mpa_span_depth;
1520
1521 /* offset disk into pd list */
1522 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1523
1524 /* offset array index into the next spans */
1525 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1526
1527 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1528 switch (ar[arr].pd[disk].mar_pd_state){
1529 case MFI_PD_UNCONFIG_GOOD:
1530 bd->bd_status = BIOC_SDUNUSED;
1531 break;
1532
1533 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1534 bd->bd_status = BIOC_SDHOTSPARE;
1535 break;
1536
1537 case MFI_PD_OFFLINE:
1538 bd->bd_status = BIOC_SDOFFLINE;
1539 break;
1540
1541 case MFI_PD_FAILED:
1542 bd->bd_status = BIOC_SDFAILED;
1543 break;
1544
1545 case MFI_PD_REBUILD:
1546 bd->bd_status = BIOC_SDREBUILD;
1547 break;
1548
1549 case MFI_PD_ONLINE:
1550 bd->bd_status = BIOC_SDONLINE;
1551 break;
1552
1553 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1554 default:
1555 bd->bd_status = BIOC_SDINVALID;
1556 break;
1557
1558 }
1559
1560 /* get the remaining fields */
1561 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1562 memset(pd, 0, sizeof(*pd));
1563 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1564 sizeof *pd, pd, mbox))
1565 goto freeme;
1566
1567 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1568
1569 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1570 bd->bd_channel = pd->mpd_enc_idx;
1571
1572 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1573 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1574 vend[sizeof vend - 1] = '\0';
1575 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1576
1577 /* XXX find a way to retrieve serial nr from drive */
1578 /* XXX find a way to get bd_procdev */
1579
1580 rv = 0;
1581 freeme:
1582 free(pd, M_DEVBUF);
1583 free(cfg, M_DEVBUF);
1584
1585 return rv;
1586 }
1587
1588 static int
1589 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1590 {
1591 uint32_t opc, dir = MFI_DATA_NONE;
1592 int rv = 0;
1593 int8_t ret;
1594
1595 switch(ba->ba_opcode) {
1596 case BIOC_SADISABLE:
1597 opc = MR_DCMD_SPEAKER_DISABLE;
1598 break;
1599
1600 case BIOC_SAENABLE:
1601 opc = MR_DCMD_SPEAKER_ENABLE;
1602 break;
1603
1604 case BIOC_SASILENCE:
1605 opc = MR_DCMD_SPEAKER_SILENCE;
1606 break;
1607
1608 case BIOC_GASTATUS:
1609 opc = MR_DCMD_SPEAKER_GET;
1610 dir = MFI_DATA_IN;
1611 break;
1612
1613 case BIOC_SATEST:
1614 opc = MR_DCMD_SPEAKER_TEST;
1615 break;
1616
1617 default:
1618 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1619 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1620 return EINVAL;
1621 }
1622
1623 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1624 rv = EINVAL;
1625 else
1626 if (ba->ba_opcode == BIOC_GASTATUS)
1627 ba->ba_status = ret;
1628 else
1629 ba->ba_status = 0;
1630
1631 return rv;
1632 }
1633
1634 static int
1635 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1636 {
1637 int i, found, rv = EINVAL;
1638 uint8_t mbox[MFI_MBOX_SIZE];
1639 uint32_t cmd;
1640 struct mfi_pd_list *pd;
1641
1642 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1643 bb->bb_status);
1644
1645 /* channel 0 means not in an enclosure so can't be blinked */
1646 if (bb->bb_channel == 0)
1647 return EINVAL;
1648
1649 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1650
1651 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1652 MFI_PD_LIST_SIZE, pd, NULL))
1653 goto done;
1654
1655 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1656 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1657 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1658 found = 1;
1659 break;
1660 }
1661
1662 if (!found)
1663 goto done;
1664
1665 memset(mbox, 0, sizeof mbox);
1666
1667 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1668
1669 switch (bb->bb_status) {
1670 case BIOC_SBUNBLINK:
1671 cmd = MR_DCMD_PD_UNBLINK;
1672 break;
1673
1674 case BIOC_SBBLINK:
1675 cmd = MR_DCMD_PD_BLINK;
1676 break;
1677
1678 case BIOC_SBALARM:
1679 default:
1680 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1681 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1682 goto done;
1683 }
1684
1685
1686 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1687 goto done;
1688
1689 rv = 0;
1690 done:
1691 free(pd, M_DEVBUF);
1692 return rv;
1693 }
1694
1695 static int
1696 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1697 {
1698 struct mfi_pd_list *pd;
1699 int i, found, rv = EINVAL;
1700 uint8_t mbox[MFI_MBOX_SIZE];
1701 uint32_t cmd;
1702
1703 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1704 bs->bs_status);
1705
1706 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1707
1708 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1709 MFI_PD_LIST_SIZE, pd, NULL))
1710 goto done;
1711
1712 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1713 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1714 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1715 found = 1;
1716 break;
1717 }
1718
1719 if (!found)
1720 goto done;
1721
1722 memset(mbox, 0, sizeof mbox);
1723
1724 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1725
1726 switch (bs->bs_status) {
1727 case BIOC_SSONLINE:
1728 mbox[2] = MFI_PD_ONLINE;
1729 cmd = MD_DCMD_PD_SET_STATE;
1730 break;
1731
1732 case BIOC_SSOFFLINE:
1733 mbox[2] = MFI_PD_OFFLINE;
1734 cmd = MD_DCMD_PD_SET_STATE;
1735 break;
1736
1737 case BIOC_SSHOTSPARE:
1738 mbox[2] = MFI_PD_HOTSPARE;
1739 cmd = MD_DCMD_PD_SET_STATE;
1740 break;
1741 /*
1742 case BIOC_SSREBUILD:
1743 cmd = MD_DCMD_PD_REBUILD;
1744 break;
1745 */
1746 default:
1747 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1748 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1749 goto done;
1750 }
1751
1752
1753 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1754 goto done;
1755
1756 rv = 0;
1757 done:
1758 free(pd, M_DEVBUF);
1759 return rv;
1760 }
1761
1762 static int
1763 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1764 {
1765 struct mfi_conf *cfg;
1766 struct mfi_hotspare *hs;
1767 struct mfi_pd_details *pd;
1768 struct bioc_disk *sdhs;
1769 struct bioc_vol *vdhs;
1770 struct scsipi_inquiry_data *inqbuf;
1771 char vend[8+16+4+1];
1772 int i, rv = EINVAL;
1773 uint32_t size;
1774 uint8_t mbox[MFI_MBOX_SIZE];
1775
1776 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1777
1778 if (!bio_hs)
1779 return EINVAL;
1780
1781 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1782
1783 /* send single element command to retrieve size for full structure */
1784 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1785 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1786 goto freeme;
1787
1788 size = cfg->mfc_size;
1789 free(cfg, M_DEVBUF);
1790
1791 /* memory for read config */
1792 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1793 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1794 goto freeme;
1795
1796 /* calculate offset to hs structure */
1797 hs = (struct mfi_hotspare *)(
1798 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1799 cfg->mfc_array_size * cfg->mfc_no_array +
1800 cfg->mfc_ld_size * cfg->mfc_no_ld);
1801
1802 if (volid < cfg->mfc_no_ld)
1803 goto freeme; /* not a hotspare */
1804
1805 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1806 goto freeme; /* not a hotspare */
1807
1808 /* offset into hotspare structure */
1809 i = volid - cfg->mfc_no_ld;
1810
1811 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1812 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1813 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1814
1815 /* get pd fields */
1816 memset(mbox, 0, sizeof mbox);
1817 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1818 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1819 sizeof *pd, pd, mbox)) {
1820 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1821 DEVNAME(sc));
1822 goto freeme;
1823 }
1824
1825 switch (type) {
1826 case MFI_MGMT_VD:
1827 vdhs = bio_hs;
1828 vdhs->bv_status = BIOC_SVONLINE;
1829 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
1830 vdhs->bv_level = -1; /* hotspare */
1831 vdhs->bv_nodisk = 1;
1832 break;
1833
1834 case MFI_MGMT_SD:
1835 sdhs = bio_hs;
1836 sdhs->bd_status = BIOC_SDHOTSPARE;
1837 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
1838 sdhs->bd_channel = pd->mpd_enc_idx;
1839 sdhs->bd_target = pd->mpd_enc_slot;
1840 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1841 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1842 vend[sizeof vend - 1] = '\0';
1843 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1844 break;
1845
1846 default:
1847 goto freeme;
1848 }
1849
1850 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1851 rv = 0;
1852 freeme:
1853 free(pd, M_DEVBUF);
1854 free(cfg, M_DEVBUF);
1855
1856 return rv;
1857 }
1858
1859 static int
1860 mfi_create_sensors(struct mfi_softc *sc)
1861 {
1862 int i;
1863 int nsensors = sc->sc_ld_cnt;
1864
1865 sc->sc_sme = sysmon_envsys_create();
1866 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1867 M_DEVBUF, M_NOWAIT | M_ZERO);
1868 if (sc->sc_sensor == NULL) {
1869 aprint_error("%s: can't allocate envsys_data_t\n",
1870 DEVNAME(sc));
1871 return ENOMEM;
1872 }
1873
1874 for (i = 0; i < nsensors; i++) {
1875 sc->sc_sensor[i].units = ENVSYS_DRIVE;
1876 sc->sc_sensor[i].monitor = true;
1877 /* Enable monitoring for drive state changes */
1878 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1879 /* logical drives */
1880 snprintf(sc->sc_sensor[i].desc,
1881 sizeof(sc->sc_sensor[i].desc), "%s:%d",
1882 DEVNAME(sc), i);
1883 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1884 &sc->sc_sensor[i]))
1885 goto out;
1886 }
1887
1888 sc->sc_sme->sme_name = DEVNAME(sc);
1889 sc->sc_sme->sme_cookie = sc;
1890 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
1891 if (sysmon_envsys_register(sc->sc_sme)) {
1892 aprint_error("%s: unable to register with sysmon\n",
1893 DEVNAME(sc));
1894 goto out;
1895 }
1896 return 0;
1897
1898 out:
1899 free(sc->sc_sensor, M_DEVBUF);
1900 sysmon_envsys_destroy(sc->sc_sme);
1901 return EINVAL;
1902 }
1903
1904 static void
1905 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1906 {
1907 struct mfi_softc *sc = sme->sme_cookie;
1908 struct bioc_vol bv;
1909 int s;
1910
1911 if (edata->sensor >= sc->sc_ld_cnt)
1912 return;
1913
1914 bzero(&bv, sizeof(bv));
1915 bv.bv_volid = edata->sensor;
1916 s = splbio();
1917 if (mfi_ioctl_vol(sc, &bv)) {
1918 splx(s);
1919 return;
1920 }
1921 splx(s);
1922
1923 switch(bv.bv_status) {
1924 case BIOC_SVOFFLINE:
1925 edata->value_cur = ENVSYS_DRIVE_FAIL;
1926 edata->state = ENVSYS_SCRITICAL;
1927 break;
1928
1929 case BIOC_SVDEGRADED:
1930 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1931 edata->state = ENVSYS_SCRITICAL;
1932 break;
1933
1934 case BIOC_SVSCRUB:
1935 case BIOC_SVONLINE:
1936 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1937 edata->state = ENVSYS_SVALID;
1938 break;
1939
1940 case BIOC_SVINVALID:
1941 /* FALLTRHOUGH */
1942 default:
1943 edata->value_cur = 0; /* unknown */
1944 edata->state = ENVSYS_SINVALID;
1945 }
1946 }
1947
1948 #endif /* NBIO > 0 */
1949
1950 static uint32_t
1951 mfi_xscale_fw_state(struct mfi_softc *sc)
1952 {
1953 return mfi_read(sc, MFI_OMSG0);
1954 }
1955
1956 static void
1957 mfi_xscale_intr_ena(struct mfi_softc *sc)
1958 {
1959 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
1960 }
1961
1962 static int
1963 mfi_xscale_intr(struct mfi_softc *sc)
1964 {
1965 uint32_t status;
1966
1967 status = mfi_read(sc, MFI_OSTS);
1968 if (!ISSET(status, MFI_OSTS_INTR_VALID))
1969 return 0;
1970
1971 /* write status back to acknowledge interrupt */
1972 mfi_write(sc, MFI_OSTS, status);
1973 return 1;
1974 }
1975
1976 static void
1977 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
1978 {
1979 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1980 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1981 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1982 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1983 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1984 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
1985
1986 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
1987 ccb->ccb_extra_frames);
1988 }
1989
1990 static uint32_t
1991 mfi_ppc_fw_state(struct mfi_softc *sc)
1992 {
1993 return mfi_read(sc, MFI_OSP);
1994 }
1995
1996 static void
1997 mfi_ppc_intr_ena(struct mfi_softc *sc)
1998 {
1999 mfi_write(sc, MFI_ODC, 0xffffffff);
2000 mfi_write(sc, MFI_OMSK, ~0x80000004);
2001 }
2002
2003 static int
2004 mfi_ppc_intr(struct mfi_softc *sc)
2005 {
2006 uint32_t status;
2007
2008 status = mfi_read(sc, MFI_OSTS);
2009 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2010 return 0;
2011
2012 /* write status back to acknowledge interrupt */
2013 mfi_write(sc, MFI_ODC, status);
2014 return 1;
2015 }
2016
2017 static void
2018 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2019 {
2020 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2021 (ccb->ccb_extra_frames << 1));
2022 }
2023