mfi.c revision 1.12 1 /* $NetBSD: mfi.c,v 1.12 2008/02/25 10:46:02 xtraeme Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.12 2008/02/25 10:46:02 xtraeme Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 int mfi_scsi_ioctl(struct scsipi_channel *, u_long, void *, int,
67 struct proc *);
68 void mfiminphys(struct buf *bp);
69
70 struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
71 void mfi_put_ccb(struct mfi_ccb *);
72 int mfi_init_ccb(struct mfi_softc *);
73
74 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
75 void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
76
77 int mfi_transition_firmware(struct mfi_softc *);
78 int mfi_initialize_firmware(struct mfi_softc *);
79 int mfi_get_info(struct mfi_softc *);
80 uint32_t mfi_read(struct mfi_softc *, bus_size_t);
81 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
82 int mfi_poll(struct mfi_ccb *);
83 int mfi_create_sgl(struct mfi_ccb *, int);
84
85 /* commands */
86 int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
87 int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, uint32_t,
88 uint32_t);
89 void mfi_scsi_xs_done(struct mfi_ccb *);
90 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
91 void *, uint8_t *);
92 void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 int mfi_ioctl(struct device *, u_long, void *);
96 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
100 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
101 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
102 int mfi_bio_hs(struct mfi_softc *, int, int, void *);
103 int mfi_create_sensors(struct mfi_softc *);
104 void mfi_sensor_refresh(struct sysmon_envsys *, envsys_data_t *);
105 #endif /* NBIO > 0 */
106
107 uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
108 void mfi_xscale_intr_ena(struct mfi_softc *sc);
109 int mfi_xscale_intr(struct mfi_softc *sc);
110 void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
111
112 static const struct mfi_iop_ops mfi_iop_xscale = {
113 mfi_xscale_fw_state,
114 mfi_xscale_intr_ena,
115 mfi_xscale_intr,
116 mfi_xscale_post
117 };
118
119 uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
120 void mfi_ppc_intr_ena(struct mfi_softc *sc);
121 int mfi_ppc_intr(struct mfi_softc *sc);
122 void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
123
124 static const struct mfi_iop_ops mfi_iop_ppc = {
125 mfi_ppc_fw_state,
126 mfi_ppc_intr_ena,
127 mfi_ppc_intr,
128 mfi_ppc_post
129 };
130
131 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
132 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
133 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
134 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
135
136 struct mfi_ccb *
137 mfi_get_ccb(struct mfi_softc *sc)
138 {
139 struct mfi_ccb *ccb;
140 int s;
141
142 s = splbio();
143 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
144 if (ccb) {
145 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
146 ccb->ccb_state = MFI_CCB_READY;
147 }
148 splx(s);
149
150 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
151
152 return (ccb);
153 }
154
155 void
156 mfi_put_ccb(struct mfi_ccb *ccb)
157 {
158 struct mfi_softc *sc = ccb->ccb_sc;
159 int s;
160
161 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
162
163 s = splbio();
164 ccb->ccb_state = MFI_CCB_FREE;
165 ccb->ccb_xs = NULL;
166 ccb->ccb_flags = 0;
167 ccb->ccb_done = NULL;
168 ccb->ccb_direction = 0;
169 ccb->ccb_frame_size = 0;
170 ccb->ccb_extra_frames = 0;
171 ccb->ccb_sgl = NULL;
172 ccb->ccb_data = NULL;
173 ccb->ccb_len = 0;
174 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
175 splx(s);
176 }
177
178 int
179 mfi_init_ccb(struct mfi_softc *sc)
180 {
181 struct mfi_ccb *ccb;
182 uint32_t i;
183 int error;
184
185 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
186
187 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
188 M_DEVBUF, M_WAITOK);
189 memset(sc->sc_ccb, 0, sizeof(struct mfi_ccb) * sc->sc_max_cmds);
190
191 for (i = 0; i < sc->sc_max_cmds; i++) {
192 ccb = &sc->sc_ccb[i];
193
194 ccb->ccb_sc = sc;
195
196 /* select i'th frame */
197 ccb->ccb_frame = (union mfi_frame *)
198 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
199 ccb->ccb_pframe =
200 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
201 ccb->ccb_frame->mfr_header.mfh_context = i;
202
203 /* select i'th sense */
204 ccb->ccb_sense = (struct mfi_sense *)
205 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
206 ccb->ccb_psense =
207 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
208
209 /* create a dma map for transfer */
210 error = bus_dmamap_create(sc->sc_dmat,
211 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
212 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
213 if (error) {
214 printf("%s: cannot create ccb dmamap (%d)\n",
215 DEVNAME(sc), error);
216 goto destroy;
217 }
218
219 DNPRINTF(MFI_D_CCB,
220 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
221 ccb->ccb_frame->mfr_header.mfh_context, ccb,
222 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
223 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
224 (u_long)ccb->ccb_dmamap);
225
226 /* add ccb to queue */
227 mfi_put_ccb(ccb);
228 }
229
230 return (0);
231 destroy:
232 /* free dma maps and ccb memory */
233 while (i) {
234 ccb = &sc->sc_ccb[i];
235 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
236 i--;
237 }
238
239 free(sc->sc_ccb, M_DEVBUF);
240
241 return (1);
242 }
243
244 uint32_t
245 mfi_read(struct mfi_softc *sc, bus_size_t r)
246 {
247 uint32_t rv;
248
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
250 BUS_SPACE_BARRIER_READ);
251 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
252
253 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
254 return (rv);
255 }
256
257 void
258 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
259 {
260 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
261
262 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
263 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
264 BUS_SPACE_BARRIER_WRITE);
265 }
266
267 struct mfi_mem *
268 mfi_allocmem(struct mfi_softc *sc, size_t size)
269 {
270 struct mfi_mem *mm;
271 int nsegs;
272
273 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
274 (long)size);
275
276 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT);
277 if (mm == NULL)
278 return (NULL);
279
280 memset(mm, 0, sizeof(struct mfi_mem));
281 mm->am_size = size;
282
283 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
284 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
285 goto amfree;
286
287 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
288 &nsegs, BUS_DMA_NOWAIT) != 0)
289 goto destroy;
290
291 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
292 BUS_DMA_NOWAIT) != 0)
293 goto free;
294
295 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
296 BUS_DMA_NOWAIT) != 0)
297 goto unmap;
298
299 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
300 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
301
302 memset(mm->am_kva, 0, size);
303 return (mm);
304
305 unmap:
306 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
307 free:
308 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
309 destroy:
310 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
311 amfree:
312 free(mm, M_DEVBUF);
313
314 return (NULL);
315 }
316
317 void
318 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
319 {
320 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
321
322 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
323 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
324 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
325 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
326 free(mm, M_DEVBUF);
327 }
328
329 int
330 mfi_transition_firmware(struct mfi_softc *sc)
331 {
332 int32_t fw_state, cur_state;
333 int max_wait, i;
334
335 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
336
337 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
338 fw_state);
339
340 while (fw_state != MFI_STATE_READY) {
341 DNPRINTF(MFI_D_MISC,
342 "%s: waiting for firmware to become ready\n",
343 DEVNAME(sc));
344 cur_state = fw_state;
345 switch (fw_state) {
346 case MFI_STATE_FAULT:
347 printf("%s: firmware fault\n", DEVNAME(sc));
348 return (1);
349 case MFI_STATE_WAIT_HANDSHAKE:
350 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
351 max_wait = 2;
352 break;
353 case MFI_STATE_OPERATIONAL:
354 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
355 max_wait = 10;
356 break;
357 case MFI_STATE_UNDEFINED:
358 case MFI_STATE_BB_INIT:
359 max_wait = 2;
360 break;
361 case MFI_STATE_FW_INIT:
362 case MFI_STATE_DEVICE_SCAN:
363 case MFI_STATE_FLUSH_CACHE:
364 max_wait = 20;
365 break;
366 default:
367 printf("%s: unknown firmware state %d\n",
368 DEVNAME(sc), fw_state);
369 return (1);
370 }
371 for (i = 0; i < (max_wait * 10); i++) {
372 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
373 if (fw_state == cur_state)
374 DELAY(100000);
375 else
376 break;
377 }
378 if (fw_state == cur_state) {
379 printf("%s: firmware stuck in state %#x\n",
380 DEVNAME(sc), fw_state);
381 return (1);
382 }
383 }
384
385 return (0);
386 }
387
388 int
389 mfi_initialize_firmware(struct mfi_softc *sc)
390 {
391 struct mfi_ccb *ccb;
392 struct mfi_init_frame *init;
393 struct mfi_init_qinfo *qinfo;
394
395 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
396
397 if ((ccb = mfi_get_ccb(sc)) == NULL)
398 return (1);
399
400 init = &ccb->ccb_frame->mfr_init;
401 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
402
403 memset(qinfo, 0, sizeof *qinfo);
404 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
405 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
406 offsetof(struct mfi_prod_cons, mpc_reply_q));
407 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
408 offsetof(struct mfi_prod_cons, mpc_producer));
409 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
410 offsetof(struct mfi_prod_cons, mpc_consumer));
411
412 init->mif_header.mfh_cmd = MFI_CMD_INIT;
413 init->mif_header.mfh_data_len = sizeof *qinfo;
414 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
415
416 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
417 DEVNAME(sc),
418 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
419 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
420
421 if (mfi_poll(ccb)) {
422 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
423 return (1);
424 }
425
426 mfi_put_ccb(ccb);
427
428 return (0);
429 }
430
431 int
432 mfi_get_info(struct mfi_softc *sc)
433 {
434 #ifdef MFI_DEBUG
435 int i;
436 #endif
437 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
438
439 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
440 sizeof(sc->sc_info), &sc->sc_info, NULL))
441 return (1);
442
443 #ifdef MFI_DEBUG
444
445 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
446 printf("%s: active FW %s Version %s date %s time %s\n",
447 DEVNAME(sc),
448 sc->sc_info.mci_image_component[i].mic_name,
449 sc->sc_info.mci_image_component[i].mic_version,
450 sc->sc_info.mci_image_component[i].mic_build_date,
451 sc->sc_info.mci_image_component[i].mic_build_time);
452 }
453
454 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
455 printf("%s: pending FW %s Version %s date %s time %s\n",
456 DEVNAME(sc),
457 sc->sc_info.mci_pending_image_component[i].mic_name,
458 sc->sc_info.mci_pending_image_component[i].mic_version,
459 sc->sc_info.mci_pending_image_component[i].mic_build_date,
460 sc->sc_info.mci_pending_image_component[i].mic_build_time);
461 }
462
463 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
464 DEVNAME(sc),
465 sc->sc_info.mci_max_arms,
466 sc->sc_info.mci_max_spans,
467 sc->sc_info.mci_max_arrays,
468 sc->sc_info.mci_max_lds,
469 sc->sc_info.mci_product_name);
470
471 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
472 DEVNAME(sc),
473 sc->sc_info.mci_serial_number,
474 sc->sc_info.mci_hw_present,
475 sc->sc_info.mci_current_fw_time,
476 sc->sc_info.mci_max_cmds,
477 sc->sc_info.mci_max_sg_elements);
478
479 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
480 DEVNAME(sc),
481 sc->sc_info.mci_max_request_size,
482 sc->sc_info.mci_lds_present,
483 sc->sc_info.mci_lds_degraded,
484 sc->sc_info.mci_lds_offline,
485 sc->sc_info.mci_pd_present);
486
487 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
488 DEVNAME(sc),
489 sc->sc_info.mci_pd_disks_present,
490 sc->sc_info.mci_pd_disks_pred_failure,
491 sc->sc_info.mci_pd_disks_failed);
492
493 printf("%s: nvram %d mem %d flash %d\n",
494 DEVNAME(sc),
495 sc->sc_info.mci_nvram_size,
496 sc->sc_info.mci_memory_size,
497 sc->sc_info.mci_flash_size);
498
499 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
500 DEVNAME(sc),
501 sc->sc_info.mci_ram_correctable_errors,
502 sc->sc_info.mci_ram_uncorrectable_errors,
503 sc->sc_info.mci_cluster_allowed,
504 sc->sc_info.mci_cluster_active);
505
506 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
507 DEVNAME(sc),
508 sc->sc_info.mci_max_strips_per_io,
509 sc->sc_info.mci_raid_levels,
510 sc->sc_info.mci_adapter_ops,
511 sc->sc_info.mci_ld_ops);
512
513 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
514 DEVNAME(sc),
515 sc->sc_info.mci_stripe_sz_ops.min,
516 sc->sc_info.mci_stripe_sz_ops.max,
517 sc->sc_info.mci_pd_ops,
518 sc->sc_info.mci_pd_mix_support);
519
520 printf("%s: ecc_bucket %d pckg_prop %s\n",
521 DEVNAME(sc),
522 sc->sc_info.mci_ecc_bucket_count,
523 sc->sc_info.mci_package_version);
524
525 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
526 DEVNAME(sc),
527 sc->sc_info.mci_properties.mcp_seq_num,
528 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
529 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
530 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
531
532 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
533 DEVNAME(sc),
534 sc->sc_info.mci_properties.mcp_rebuild_rate,
535 sc->sc_info.mci_properties.mcp_patrol_read_rate,
536 sc->sc_info.mci_properties.mcp_bgi_rate,
537 sc->sc_info.mci_properties.mcp_cc_rate);
538
539 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
540 DEVNAME(sc),
541 sc->sc_info.mci_properties.mcp_recon_rate,
542 sc->sc_info.mci_properties.mcp_cache_flush_interval,
543 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
544 sc->sc_info.mci_properties.mcp_spinup_delay,
545 sc->sc_info.mci_properties.mcp_cluster_enable);
546
547 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
548 DEVNAME(sc),
549 sc->sc_info.mci_properties.mcp_coercion_mode,
550 sc->sc_info.mci_properties.mcp_alarm_enable,
551 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
552 sc->sc_info.mci_properties.mcp_disable_battery_warn,
553 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
554
555 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
556 DEVNAME(sc),
557 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
558 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
559 sc->sc_info.mci_properties.mcp_expose_encl_devices);
560
561 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
562 DEVNAME(sc),
563 sc->sc_info.mci_pci.mip_vendor,
564 sc->sc_info.mci_pci.mip_device,
565 sc->sc_info.mci_pci.mip_subvendor,
566 sc->sc_info.mci_pci.mip_subdevice);
567
568 printf("%s: type %#x port_count %d port_addr ",
569 DEVNAME(sc),
570 sc->sc_info.mci_host.mih_type,
571 sc->sc_info.mci_host.mih_port_count);
572
573 for (i = 0; i < 8; i++)
574 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
575 printf("\n");
576
577 printf("%s: type %.x port_count %d port_addr ",
578 DEVNAME(sc),
579 sc->sc_info.mci_device.mid_type,
580 sc->sc_info.mci_device.mid_port_count);
581
582 for (i = 0; i < 8; i++)
583 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
584 printf("\n");
585 #endif /* MFI_DEBUG */
586
587 return (0);
588 }
589
590 void
591 mfiminphys(struct buf *bp)
592 {
593 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
594
595 /* XXX currently using MFI_MAXFER = MAXPHYS */
596 if (bp->b_bcount > MFI_MAXFER)
597 bp->b_bcount = MFI_MAXFER;
598 minphys(bp);
599 }
600
601 int
602 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
603 {
604 struct scsipi_adapter *adapt = &sc->sc_adapt;
605 struct scsipi_channel *chan = &sc->sc_chan;
606 uint32_t status, frames;
607 int i;
608
609 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
610
611 switch (iop) {
612 case MFI_IOP_XSCALE:
613 sc->sc_iop = &mfi_iop_xscale;
614 break;
615 case MFI_IOP_PPC:
616 sc->sc_iop = &mfi_iop_ppc;
617 break;
618 default:
619 panic("%s: unknown iop %d", DEVNAME(sc), iop);
620 }
621
622 if (mfi_transition_firmware(sc))
623 return (1);
624
625 TAILQ_INIT(&sc->sc_ccb_freeq);
626
627 status = mfi_fw_state(sc);
628 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
629 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
630 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
631 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
632
633 /* consumer/producer and reply queue memory */
634 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
635 sizeof(struct mfi_prod_cons));
636 if (sc->sc_pcq == NULL) {
637 aprint_error("%s: unable to allocate reply queue memory\n",
638 DEVNAME(sc));
639 goto nopcq;
640 }
641 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
642 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
643 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
644
645 /* frame memory */
646 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
647 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
648 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
649 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
650 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
651 if (sc->sc_frames == NULL) {
652 aprint_error("%s: unable to allocate frame memory\n",
653 DEVNAME(sc));
654 goto noframe;
655 }
656 /* XXX hack, fix this */
657 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
658 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
659 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
660 goto noframe;
661 }
662
663 /* sense memory */
664 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
665 if (sc->sc_sense == NULL) {
666 aprint_error("%s: unable to allocate sense memory\n",
667 DEVNAME(sc));
668 goto nosense;
669 }
670
671 /* now that we have all memory bits go initialize ccbs */
672 if (mfi_init_ccb(sc)) {
673 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
674 goto noinit;
675 }
676
677 /* kickstart firmware with all addresses and pointers */
678 if (mfi_initialize_firmware(sc)) {
679 aprint_error("%s: could not initialize firmware\n",
680 DEVNAME(sc));
681 goto noinit;
682 }
683
684 if (mfi_get_info(sc)) {
685 aprint_error("%s: could not retrieve controller information\n",
686 DEVNAME(sc));
687 goto noinit;
688 }
689
690 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
691 DEVNAME(sc),
692 sc->sc_info.mci_lds_present,
693 sc->sc_info.mci_package_version,
694 sc->sc_info.mci_memory_size);
695
696 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
697 sc->sc_max_ld = sc->sc_ld_cnt;
698 for (i = 0; i < sc->sc_ld_cnt; i++)
699 sc->sc_ld[i].ld_present = 1;
700
701 memset(adapt, 0, sizeof(*adapt));
702 adapt->adapt_dev = &sc->sc_dev;
703 adapt->adapt_nchannels = 1;
704 if (sc->sc_ld_cnt)
705 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
706 else
707 adapt->adapt_openings = sc->sc_max_cmds;
708 adapt->adapt_max_periph = adapt->adapt_openings;
709 adapt->adapt_request = mfi_scsipi_request;
710 adapt->adapt_minphys = mfiminphys;
711 adapt->adapt_ioctl = mfi_scsi_ioctl;
712
713 memset(chan, 0, sizeof(*chan));
714 chan->chan_adapter = adapt;
715 chan->chan_bustype = &scsi_bustype;
716 chan->chan_channel = 0;
717 chan->chan_flags = 0;
718 chan->chan_nluns = 8;
719 chan->chan_ntargets = MFI_MAX_LD;
720 chan->chan_id = MFI_MAX_LD;
721
722 (void) config_found(&sc->sc_dev, &sc->sc_chan, scsiprint);
723
724 /* enable interrupts */
725 mfi_intr_enable(sc);
726
727 #if NBIO > 0
728 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
729 panic("%s: controller registration failed", DEVNAME(sc));
730 else
731 sc->sc_ioctl = mfi_ioctl;
732 if (mfi_create_sensors(sc) != 0)
733 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
734 #endif /* NBIO > 0 */
735
736 return (0);
737 noinit:
738 mfi_freemem(sc, sc->sc_sense);
739 nosense:
740 mfi_freemem(sc, sc->sc_frames);
741 noframe:
742 mfi_freemem(sc, sc->sc_pcq);
743 nopcq:
744 return (1);
745 }
746
747 int
748 mfi_poll(struct mfi_ccb *ccb)
749 {
750 struct mfi_softc *sc = ccb->ccb_sc;
751 struct mfi_frame_header *hdr;
752 int to = 0;
753
754 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
755
756 hdr = &ccb->ccb_frame->mfr_header;
757 hdr->mfh_cmd_status = 0xff;
758 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
759
760 mfi_post(sc, ccb);
761 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
762 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
763 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
764
765 while (hdr->mfh_cmd_status == 0xff) {
766 delay(1000);
767 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
768 break;
769 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
770 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
771 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
772 }
773 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
774 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
775 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
776
777 if (ccb->ccb_data != NULL) {
778 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
779 DEVNAME(sc));
780 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
781 ccb->ccb_dmamap->dm_mapsize,
782 (ccb->ccb_direction & MFI_DATA_IN) ?
783 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
784
785 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
786 }
787
788 if (hdr->mfh_cmd_status == 0xff) {
789 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
790 hdr->mfh_context);
791 ccb->ccb_flags |= MFI_CCB_F_ERR;
792 return (1);
793 }
794
795 return (0);
796 }
797
798 int
799 mfi_intr(void *arg)
800 {
801 struct mfi_softc *sc = arg;
802 struct mfi_prod_cons *pcq;
803 struct mfi_ccb *ccb;
804 uint32_t producer, consumer, ctx;
805 int claimed = 0;
806
807 if (!mfi_my_intr(sc))
808 return 0;
809
810 pcq = MFIMEM_KVA(sc->sc_pcq);
811
812 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
813 (u_long)sc, (u_long)pcq);
814
815 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
816 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
817 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
818
819 producer = pcq->mpc_producer;
820 consumer = pcq->mpc_consumer;
821
822 while (consumer != producer) {
823 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
824 DEVNAME(sc), producer, consumer);
825
826 ctx = pcq->mpc_reply_q[consumer];
827 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
828 if (ctx == MFI_INVALID_CTX)
829 printf("%s: invalid context, p: %d c: %d\n",
830 DEVNAME(sc), producer, consumer);
831 else {
832 /* XXX remove from queue and call scsi_done */
833 ccb = &sc->sc_ccb[ctx];
834 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
835 DEVNAME(sc), ctx);
836 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
837 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
838 sc->sc_frames_size,
839 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
840 ccb->ccb_done(ccb);
841
842 claimed = 1;
843 }
844 consumer++;
845 if (consumer == (sc->sc_max_cmds + 1))
846 consumer = 0;
847 }
848
849 pcq->mpc_consumer = consumer;
850 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
851 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
852 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
853
854 return (claimed);
855 }
856
857 int
858 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
859 uint32_t blockcnt)
860 {
861 struct scsipi_periph *periph = xs->xs_periph;
862 struct mfi_io_frame *io;
863
864 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
865 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
866 periph->periph_target);
867
868 if (!xs->data)
869 return (1);
870
871 io = &ccb->ccb_frame->mfr_io;
872 if (xs->xs_control & XS_CTL_DATA_IN) {
873 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
874 ccb->ccb_direction = MFI_DATA_IN;
875 } else {
876 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
877 ccb->ccb_direction = MFI_DATA_OUT;
878 }
879 io->mif_header.mfh_target_id = periph->periph_target;
880 io->mif_header.mfh_timeout = 0;
881 io->mif_header.mfh_flags = 0;
882 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
883 io->mif_header.mfh_data_len= blockcnt;
884 io->mif_lba_hi = 0;
885 io->mif_lba_lo = blockno;
886 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
887 io->mif_sense_addr_hi = 0;
888
889 ccb->ccb_done = mfi_scsi_xs_done;
890 ccb->ccb_xs = xs;
891 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
892 ccb->ccb_sgl = &io->mif_sgl;
893 ccb->ccb_data = xs->data;
894 ccb->ccb_len = xs->datalen;
895
896 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
897 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
898 return (1);
899
900 return (0);
901 }
902
903 void
904 mfi_scsi_xs_done(struct mfi_ccb *ccb)
905 {
906 struct scsipi_xfer *xs = ccb->ccb_xs;
907 struct mfi_softc *sc = ccb->ccb_sc;
908 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
909
910 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
911 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
912
913 if (xs->data != NULL) {
914 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
915 DEVNAME(sc));
916 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
917 ccb->ccb_dmamap->dm_mapsize,
918 (xs->xs_control & XS_CTL_DATA_IN) ?
919 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
920
921 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
922 }
923
924 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
925 xs->error = XS_DRIVER_STUFFUP;
926 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
927 DEVNAME(sc), hdr->mfh_cmd_status);
928
929 if (hdr->mfh_scsi_status != 0) {
930 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
931 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
932 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
933 DNPRINTF(MFI_D_INTR,
934 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
935 DEVNAME(sc), hdr->mfh_scsi_status,
936 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
937 memset(&xs->sense, 0, sizeof(xs->sense));
938 memcpy(&xs->sense, ccb->ccb_sense,
939 sizeof(struct scsi_sense_data));
940 xs->error = XS_SENSE;
941 }
942 } else {
943 xs->error = XS_NOERROR;
944 xs->status = SCSI_OK;
945 xs->resid = 0;
946 }
947
948 mfi_put_ccb(ccb);
949 scsipi_done(xs);
950 }
951
952 int
953 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
954 {
955 struct mfi_pass_frame *pf;
956 struct scsipi_periph *periph = xs->xs_periph;
957
958 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
959 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
960 periph->periph_target);
961
962 pf = &ccb->ccb_frame->mfr_pass;
963 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
964 pf->mpf_header.mfh_target_id = periph->periph_target;
965 pf->mpf_header.mfh_lun_id = 0;
966 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
967 pf->mpf_header.mfh_timeout = 0;
968 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
969 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
970
971 pf->mpf_sense_addr_hi = 0;
972 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
973
974 memset(pf->mpf_cdb, 0, 16);
975 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
976
977 ccb->ccb_done = mfi_scsi_xs_done;
978 ccb->ccb_xs = xs;
979 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
980 ccb->ccb_sgl = &pf->mpf_sgl;
981
982 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
983 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
984 MFI_DATA_IN : MFI_DATA_OUT;
985 else
986 ccb->ccb_direction = MFI_DATA_NONE;
987
988 if (xs->data) {
989 ccb->ccb_data = xs->data;
990 ccb->ccb_len = xs->datalen;
991
992 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
993 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
994 return (1);
995 }
996
997 return (0);
998 }
999
1000 void
1001 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1002 void *arg)
1003 {
1004 struct scsipi_periph *periph;
1005 struct scsipi_xfer *xs;
1006 struct scsipi_adapter *adapt = chan->chan_adapter;
1007 struct mfi_softc *sc = (void *) adapt->adapt_dev;
1008 struct mfi_ccb *ccb;
1009 struct scsi_rw_6 *rw;
1010 struct scsipi_rw_10 *rwb;
1011 uint32_t blockno, blockcnt;
1012 uint8_t target;
1013 uint8_t mbox[MFI_MBOX_SIZE];
1014 int s;
1015
1016 switch (req) {
1017 case ADAPTER_REQ_GROW_RESOURCES:
1018 /* Not supported. */
1019 return;
1020 case ADAPTER_REQ_SET_XFER_MODE:
1021 /* Not supported. */
1022 return;
1023 case ADAPTER_REQ_RUN_XFER:
1024 break;
1025 }
1026
1027 xs = arg;
1028
1029 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1030 DEVNAME(sc), req, xs->cmd->opcode);
1031
1032 periph = xs->xs_periph;
1033 target = periph->periph_target;
1034
1035 s = splbio();
1036 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1037 periph->periph_lun != 0) {
1038 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1039 DEVNAME(sc), target);
1040 xs->error = XS_SELTIMEOUT;
1041 scsipi_done(xs);
1042 splx(s);
1043 return;
1044 }
1045
1046 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1047 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1048 xs->error = XS_RESOURCE_SHORTAGE;
1049 scsipi_done(xs);
1050 splx(s);
1051 return;
1052 }
1053
1054 switch (xs->cmd->opcode) {
1055 /* IO path */
1056 case READ_10:
1057 case WRITE_10:
1058 rwb = (struct scsipi_rw_10 *)xs->cmd;
1059 blockno = _4btol(rwb->addr);
1060 blockcnt = _2btol(rwb->length);
1061 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1062 mfi_put_ccb(ccb);
1063 goto stuffup;
1064 }
1065 break;
1066
1067 case SCSI_READ_6_COMMAND:
1068 case SCSI_WRITE_6_COMMAND:
1069 rw = (struct scsi_rw_6 *)xs->cmd;
1070 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1071 blockcnt = rw->length ? rw->length : 0x100;
1072 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1073 mfi_put_ccb(ccb);
1074 goto stuffup;
1075 }
1076 break;
1077
1078 case SCSI_SYNCHRONIZE_CACHE_10:
1079 mfi_put_ccb(ccb); /* we don't need this */
1080
1081 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1082 if (mfi_mgmt(sc, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE,
1083 0, NULL, mbox))
1084 goto stuffup;
1085 xs->error = XS_NOERROR;
1086 xs->status = SCSI_OK;
1087 xs->resid = 0;
1088 scsipi_done(xs);
1089 splx(s);
1090 return;
1091 /* NOTREACHED */
1092
1093 /* hand it of to the firmware and let it deal with it */
1094 case SCSI_TEST_UNIT_READY:
1095 /* save off sd? after autoconf */
1096 if (!cold) /* XXX bogus */
1097 strlcpy(sc->sc_ld[target].ld_dev, sc->sc_dev.dv_xname,
1098 sizeof(sc->sc_ld[target].ld_dev));
1099 /* FALLTHROUGH */
1100
1101 default:
1102 if (mfi_scsi_ld(ccb, xs)) {
1103 mfi_put_ccb(ccb);
1104 goto stuffup;
1105 }
1106 break;
1107 }
1108
1109 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1110
1111 if (xs->xs_control & XS_CTL_POLL) {
1112 if (mfi_poll(ccb)) {
1113 /* XXX check for sense in ccb->ccb_sense? */
1114 printf("%s: mfi_scsipi_request poll failed\n",
1115 DEVNAME(sc));
1116 mfi_put_ccb(ccb);
1117 bzero(&xs->sense, sizeof(xs->sense));
1118 xs->sense.scsi_sense.response_code =
1119 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1120 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1121 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1122 xs->error = XS_SENSE;
1123 xs->status = SCSI_CHECK;
1124 } else {
1125 DNPRINTF(MFI_D_DMA,
1126 "%s: mfi_scsipi_request poll complete %d\n",
1127 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1128 xs->error = XS_NOERROR;
1129 xs->status = SCSI_OK;
1130 xs->resid = 0;
1131 }
1132 mfi_put_ccb(ccb);
1133 scsipi_done(xs);
1134 splx(s);
1135 return;
1136 }
1137
1138 mfi_post(sc, ccb);
1139
1140 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1141 ccb->ccb_dmamap->dm_nsegs);
1142
1143 splx(s);
1144 return;
1145
1146 stuffup:
1147 xs->error = XS_DRIVER_STUFFUP;
1148 scsipi_done(xs);
1149 splx(s);
1150 }
1151
1152 int
1153 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1154 {
1155 struct mfi_softc *sc = ccb->ccb_sc;
1156 struct mfi_frame_header *hdr;
1157 bus_dma_segment_t *sgd;
1158 union mfi_sgl *sgl;
1159 int error, i;
1160
1161 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1162 (u_long)ccb->ccb_data);
1163
1164 if (!ccb->ccb_data)
1165 return (1);
1166
1167 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1168 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1169 if (error) {
1170 if (error == EFBIG)
1171 printf("more than %d dma segs\n",
1172 sc->sc_max_sgl);
1173 else
1174 printf("error %d loading dma map\n", error);
1175 return (1);
1176 }
1177
1178 hdr = &ccb->ccb_frame->mfr_header;
1179 sgl = ccb->ccb_sgl;
1180 sgd = ccb->ccb_dmamap->dm_segs;
1181 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1182 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1183 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1184 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1185 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1186 }
1187
1188 if (ccb->ccb_direction == MFI_DATA_IN) {
1189 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1190 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1191 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1192 } else {
1193 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1194 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1195 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1196 }
1197
1198 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1199 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1200 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1201 ccb->ccb_dmamap->dm_nsegs;
1202 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1203
1204 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1205 " dm_nsegs: %d extra_frames: %d\n",
1206 DEVNAME(sc),
1207 hdr->mfh_sg_count,
1208 ccb->ccb_frame_size,
1209 sc->sc_frames_size,
1210 ccb->ccb_dmamap->dm_nsegs,
1211 ccb->ccb_extra_frames);
1212
1213 return (0);
1214 }
1215
1216 int
1217 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1218 void *buf, uint8_t *mbox)
1219 {
1220 struct mfi_ccb *ccb;
1221 struct mfi_dcmd_frame *dcmd;
1222 int rv = 1;
1223
1224 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(sc), opc);
1225
1226 if ((ccb = mfi_get_ccb(sc)) == NULL)
1227 return (rv);
1228
1229 dcmd = &ccb->ccb_frame->mfr_dcmd;
1230 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1231 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1232 dcmd->mdf_header.mfh_timeout = 0;
1233
1234 dcmd->mdf_opcode = opc;
1235 dcmd->mdf_header.mfh_data_len = 0;
1236 ccb->ccb_direction = dir;
1237 ccb->ccb_done = mfi_mgmt_done;
1238
1239 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1240
1241 /* handle special opcodes */
1242 if (mbox)
1243 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1244
1245 if (dir != MFI_DATA_NONE) {
1246 dcmd->mdf_header.mfh_data_len = len;
1247 ccb->ccb_data = buf;
1248 ccb->ccb_len = len;
1249 ccb->ccb_sgl = &dcmd->mdf_sgl;
1250
1251 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1252 goto done;
1253 }
1254
1255 if (cold) {
1256 if (mfi_poll(ccb))
1257 goto done;
1258 } else {
1259 mfi_post(sc, ccb);
1260
1261 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt sleeping\n", DEVNAME(sc));
1262 while (ccb->ccb_state != MFI_CCB_DONE)
1263 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1264
1265 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1266 goto done;
1267 }
1268
1269 rv = 0;
1270
1271 done:
1272 mfi_put_ccb(ccb);
1273 return (rv);
1274 }
1275
1276 void
1277 mfi_mgmt_done(struct mfi_ccb *ccb)
1278 {
1279 struct mfi_softc *sc = ccb->ccb_sc;
1280 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1281
1282 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1283 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1284
1285 if (ccb->ccb_data != NULL) {
1286 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1287 DEVNAME(sc));
1288 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1289 ccb->ccb_dmamap->dm_mapsize,
1290 (ccb->ccb_direction & MFI_DATA_IN) ?
1291 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1292
1293 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1294 }
1295
1296 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1297 ccb->ccb_flags |= MFI_CCB_F_ERR;
1298
1299 ccb->ccb_state = MFI_CCB_DONE;
1300
1301 wakeup(ccb);
1302 }
1303
1304
1305 int
1306 mfi_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1307 int flag, struct proc *p)
1308 {
1309 return (ENOTTY);
1310 }
1311
1312 #if NBIO > 0
1313 int
1314 mfi_ioctl(struct device *dev, u_long cmd, void *addr)
1315 {
1316 struct mfi_softc *sc = (struct mfi_softc *)dev;
1317 int error = 0;
1318
1319 int s = splbio();
1320 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1321
1322 switch (cmd) {
1323 case BIOCINQ:
1324 DNPRINTF(MFI_D_IOCTL, "inq\n");
1325 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1326 break;
1327
1328 case BIOCVOL:
1329 DNPRINTF(MFI_D_IOCTL, "vol\n");
1330 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1331 break;
1332
1333 case BIOCDISK:
1334 DNPRINTF(MFI_D_IOCTL, "disk\n");
1335 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1336 break;
1337
1338 case BIOCALARM:
1339 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1340 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1341 break;
1342
1343 case BIOCBLINK:
1344 DNPRINTF(MFI_D_IOCTL, "blink\n");
1345 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1346 break;
1347
1348 case BIOCSETSTATE:
1349 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1350 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1351 break;
1352
1353 default:
1354 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1355 error = EINVAL;
1356 }
1357 splx(s);
1358 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1359 return (error);
1360 }
1361
1362 int
1363 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1364 {
1365 struct mfi_conf *cfg;
1366 int rv = EINVAL;
1367
1368 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1369
1370 if (mfi_get_info(sc)) {
1371 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1372 DEVNAME(sc));
1373 return (EIO);
1374 }
1375
1376 /* get figures */
1377 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1378 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1379 goto freeme;
1380
1381 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1382 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1383 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1384
1385 rv = 0;
1386 freeme:
1387 free(cfg, M_DEVBUF);
1388 return (rv);
1389 }
1390
1391 int
1392 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1393 {
1394 int i, per, rv = EINVAL;
1395 uint8_t mbox[MFI_MBOX_SIZE];
1396
1397 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1398 DEVNAME(sc), bv->bv_volid);
1399
1400 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1401 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1402 goto done;
1403
1404 i = bv->bv_volid;
1405 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1406 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1407 DEVNAME(sc), mbox[0]);
1408
1409 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1410 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1411 goto done;
1412
1413 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1414 /* go do hotspares */
1415 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1416 goto done;
1417 }
1418
1419 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1420
1421 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1422 case MFI_LD_OFFLINE:
1423 bv->bv_status = BIOC_SVOFFLINE;
1424 break;
1425
1426 case MFI_LD_PART_DEGRADED:
1427 case MFI_LD_DEGRADED:
1428 bv->bv_status = BIOC_SVDEGRADED;
1429 break;
1430
1431 case MFI_LD_ONLINE:
1432 bv->bv_status = BIOC_SVONLINE;
1433 break;
1434
1435 default:
1436 bv->bv_status = BIOC_SVINVALID;
1437 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1438 DEVNAME(sc),
1439 sc->sc_ld_list.mll_list[i].mll_state);
1440 }
1441
1442 /* additional status can modify MFI status */
1443 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1444 case MFI_LD_PROG_CC:
1445 case MFI_LD_PROG_BGI:
1446 bv->bv_status = BIOC_SVSCRUB;
1447 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1448 bv->bv_percent = (per * 100) / 0xffff;
1449 bv->bv_seconds =
1450 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1451 break;
1452
1453 case MFI_LD_PROG_FGI:
1454 case MFI_LD_PROG_RECONSTRUCT:
1455 /* nothing yet */
1456 break;
1457 }
1458
1459 /*
1460 * The RAID levels are determined per the SNIA DDF spec, this is only
1461 * a subset that is valid for the MFI contrller.
1462 */
1463 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1464 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1465 MFI_DDF_SRL_SPANNED)
1466 bv->bv_level *= 10;
1467
1468 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1469 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1470
1471 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1472
1473 rv = 0;
1474 done:
1475 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1476 DEVNAME(sc), rv);
1477 return (rv);
1478 }
1479
1480 int
1481 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1482 {
1483 struct mfi_conf *cfg;
1484 struct mfi_array *ar;
1485 struct mfi_ld_cfg *ld;
1486 struct mfi_pd_details *pd;
1487 struct scsipi_inquiry_data *inqbuf;
1488 char vend[8+16+4+1];
1489 int i, rv = EINVAL;
1490 int arr, vol, disk;
1491 uint32_t size;
1492 uint8_t mbox[MFI_MBOX_SIZE];
1493
1494 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1495 DEVNAME(sc), bd->bd_diskid);
1496
1497 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1498
1499 /* send single element command to retrieve size for full structure */
1500 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1501 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1502 goto freeme;
1503
1504 size = cfg->mfc_size;
1505 free(cfg, M_DEVBUF);
1506
1507 /* memory for read config */
1508 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1509 memset(cfg, 0, size);
1510 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1511 goto freeme;
1512
1513 ar = cfg->mfc_array;
1514
1515 /* calculate offset to ld structure */
1516 ld = (struct mfi_ld_cfg *)(
1517 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1518 cfg->mfc_array_size * cfg->mfc_no_array);
1519
1520 vol = bd->bd_volid;
1521
1522 if (vol >= cfg->mfc_no_ld) {
1523 /* do hotspares */
1524 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1525 goto freeme;
1526 }
1527
1528 /* find corresponding array for ld */
1529 for (i = 0, arr = 0; i < vol; i++)
1530 arr += ld[i].mlc_parm.mpa_span_depth;
1531
1532 /* offset disk into pd list */
1533 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1534
1535 /* offset array index into the next spans */
1536 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1537
1538 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1539 switch (ar[arr].pd[disk].mar_pd_state){
1540 case MFI_PD_UNCONFIG_GOOD:
1541 bd->bd_status = BIOC_SDUNUSED;
1542 break;
1543
1544 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1545 bd->bd_status = BIOC_SDHOTSPARE;
1546 break;
1547
1548 case MFI_PD_OFFLINE:
1549 bd->bd_status = BIOC_SDOFFLINE;
1550 break;
1551
1552 case MFI_PD_FAILED:
1553 bd->bd_status = BIOC_SDFAILED;
1554 break;
1555
1556 case MFI_PD_REBUILD:
1557 bd->bd_status = BIOC_SDREBUILD;
1558 break;
1559
1560 case MFI_PD_ONLINE:
1561 bd->bd_status = BIOC_SDONLINE;
1562 break;
1563
1564 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1565 default:
1566 bd->bd_status = BIOC_SDINVALID;
1567 break;
1568
1569 }
1570
1571 /* get the remaining fields */
1572 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1573 memset(pd, 0, sizeof(*pd));
1574 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1575 sizeof *pd, pd, mbox))
1576 goto freeme;
1577
1578 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1579
1580 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1581 bd->bd_channel = pd->mpd_enc_idx;
1582
1583 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1584 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1585 vend[sizeof vend - 1] = '\0';
1586 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1587
1588 /* XXX find a way to retrieve serial nr from drive */
1589 /* XXX find a way to get bd_procdev */
1590
1591 rv = 0;
1592 freeme:
1593 free(pd, M_DEVBUF);
1594 free(cfg, M_DEVBUF);
1595
1596 return (rv);
1597 }
1598
1599 int
1600 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1601 {
1602 uint32_t opc, dir = MFI_DATA_NONE;
1603 int rv = 0;
1604 int8_t ret;
1605
1606 switch(ba->ba_opcode) {
1607 case BIOC_SADISABLE:
1608 opc = MR_DCMD_SPEAKER_DISABLE;
1609 break;
1610
1611 case BIOC_SAENABLE:
1612 opc = MR_DCMD_SPEAKER_ENABLE;
1613 break;
1614
1615 case BIOC_SASILENCE:
1616 opc = MR_DCMD_SPEAKER_SILENCE;
1617 break;
1618
1619 case BIOC_GASTATUS:
1620 opc = MR_DCMD_SPEAKER_GET;
1621 dir = MFI_DATA_IN;
1622 break;
1623
1624 case BIOC_SATEST:
1625 opc = MR_DCMD_SPEAKER_TEST;
1626 break;
1627
1628 default:
1629 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1630 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1631 return (EINVAL);
1632 }
1633
1634 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1635 rv = EINVAL;
1636 else
1637 if (ba->ba_opcode == BIOC_GASTATUS)
1638 ba->ba_status = ret;
1639 else
1640 ba->ba_status = 0;
1641
1642 return (rv);
1643 }
1644
1645 int
1646 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1647 {
1648 int i, found, rv = EINVAL;
1649 uint8_t mbox[MFI_MBOX_SIZE];
1650 uint32_t cmd;
1651 struct mfi_pd_list *pd;
1652
1653 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1654 bb->bb_status);
1655
1656 /* channel 0 means not in an enclosure so can't be blinked */
1657 if (bb->bb_channel == 0)
1658 return (EINVAL);
1659
1660 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1661
1662 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1663 MFI_PD_LIST_SIZE, pd, NULL))
1664 goto done;
1665
1666 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1667 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1668 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1669 found = 1;
1670 break;
1671 }
1672
1673 if (!found)
1674 goto done;
1675
1676 memset(mbox, 0, sizeof mbox);
1677
1678 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1679
1680 switch (bb->bb_status) {
1681 case BIOC_SBUNBLINK:
1682 cmd = MR_DCMD_PD_UNBLINK;
1683 break;
1684
1685 case BIOC_SBBLINK:
1686 cmd = MR_DCMD_PD_BLINK;
1687 break;
1688
1689 case BIOC_SBALARM:
1690 default:
1691 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1692 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1693 goto done;
1694 }
1695
1696
1697 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1698 goto done;
1699
1700 rv = 0;
1701 done:
1702 free(pd, M_DEVBUF);
1703 return (rv);
1704 }
1705
1706 int
1707 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1708 {
1709 struct mfi_pd_list *pd;
1710 int i, found, rv = EINVAL;
1711 uint8_t mbox[MFI_MBOX_SIZE];
1712 uint32_t cmd;
1713
1714 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1715 bs->bs_status);
1716
1717 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1718
1719 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1720 MFI_PD_LIST_SIZE, pd, NULL))
1721 goto done;
1722
1723 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1724 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1725 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1726 found = 1;
1727 break;
1728 }
1729
1730 if (!found)
1731 goto done;
1732
1733 memset(mbox, 0, sizeof mbox);
1734
1735 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1736
1737 switch (bs->bs_status) {
1738 case BIOC_SSONLINE:
1739 mbox[2] = MFI_PD_ONLINE;
1740 cmd = MD_DCMD_PD_SET_STATE;
1741 break;
1742
1743 case BIOC_SSOFFLINE:
1744 mbox[2] = MFI_PD_OFFLINE;
1745 cmd = MD_DCMD_PD_SET_STATE;
1746 break;
1747
1748 case BIOC_SSHOTSPARE:
1749 mbox[2] = MFI_PD_HOTSPARE;
1750 cmd = MD_DCMD_PD_SET_STATE;
1751 break;
1752 /*
1753 case BIOC_SSREBUILD:
1754 cmd = MD_DCMD_PD_REBUILD;
1755 break;
1756 */
1757 default:
1758 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1759 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1760 goto done;
1761 }
1762
1763
1764 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1765 goto done;
1766
1767 rv = 0;
1768 done:
1769 free(pd, M_DEVBUF);
1770 return (rv);
1771 }
1772
1773 int
1774 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1775 {
1776 struct mfi_conf *cfg;
1777 struct mfi_hotspare *hs;
1778 struct mfi_pd_details *pd;
1779 struct bioc_disk *sdhs;
1780 struct bioc_vol *vdhs;
1781 struct scsipi_inquiry_data *inqbuf;
1782 char vend[8+16+4+1];
1783 int i, rv = EINVAL;
1784 uint32_t size;
1785 uint8_t mbox[MFI_MBOX_SIZE];
1786
1787 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1788
1789 if (!bio_hs)
1790 return (EINVAL);
1791
1792 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1793
1794 /* send single element command to retrieve size for full structure */
1795 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1796 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1797 goto freeme;
1798
1799 size = cfg->mfc_size;
1800 free(cfg, M_DEVBUF);
1801
1802 /* memory for read config */
1803 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1804 memset(cfg, 0, size);
1805 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1806 goto freeme;
1807
1808 /* calculate offset to hs structure */
1809 hs = (struct mfi_hotspare *)(
1810 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1811 cfg->mfc_array_size * cfg->mfc_no_array +
1812 cfg->mfc_ld_size * cfg->mfc_no_ld);
1813
1814 if (volid < cfg->mfc_no_ld)
1815 goto freeme; /* not a hotspare */
1816
1817 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1818 goto freeme; /* not a hotspare */
1819
1820 /* offset into hotspare structure */
1821 i = volid - cfg->mfc_no_ld;
1822
1823 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1824 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1825 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1826
1827 /* get pd fields */
1828 memset(mbox, 0, sizeof mbox);
1829 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1830 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1831 sizeof *pd, pd, mbox)) {
1832 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1833 DEVNAME(sc));
1834 goto freeme;
1835 }
1836
1837 switch (type) {
1838 case MFI_MGMT_VD:
1839 vdhs = bio_hs;
1840 vdhs->bv_status = BIOC_SVONLINE;
1841 vdhs->bv_size = pd->mpd_size / 2; /* XXX why? / 2 */
1842 vdhs->bv_level = -1; /* hotspare */
1843 vdhs->bv_nodisk = 1;
1844 break;
1845
1846 case MFI_MGMT_SD:
1847 sdhs = bio_hs;
1848 sdhs->bd_status = BIOC_SDHOTSPARE;
1849 sdhs->bd_size = pd->mpd_size / 2; /* XXX why? / 2 */
1850 sdhs->bd_channel = pd->mpd_enc_idx;
1851 sdhs->bd_target = pd->mpd_enc_slot;
1852 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1853 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1854 vend[sizeof vend - 1] = '\0';
1855 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1856 break;
1857
1858 default:
1859 goto freeme;
1860 }
1861
1862 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1863 rv = 0;
1864 freeme:
1865 free(pd, M_DEVBUF);
1866 free(cfg, M_DEVBUF);
1867
1868 return (rv);
1869 }
1870
1871 int
1872 mfi_create_sensors(struct mfi_softc *sc)
1873 {
1874 int i;
1875 int nsensors = sc->sc_ld_cnt;
1876
1877 sc->sc_sme = sysmon_envsys_create();
1878 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1879 M_DEVBUF, M_NOWAIT | M_ZERO);
1880 if (sc->sc_sensor == NULL) {
1881 aprint_error("%s: can't allocate envsys_data_t\n",
1882 DEVNAME(sc));
1883 return(ENOMEM);
1884 }
1885
1886 for (i = 0; i < nsensors; i++) {
1887 sc->sc_sensor[i].units = ENVSYS_DRIVE;
1888 sc->sc_sensor[i].monitor = true;
1889 /* Enable monitoring for drive state changes */
1890 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1891 /* logical drives */
1892 snprintf(sc->sc_sensor[i].desc,
1893 sizeof(sc->sc_sensor[i].desc), "%s:%d",
1894 DEVNAME(sc), i);
1895 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1896 &sc->sc_sensor[i]))
1897 goto out;
1898 }
1899
1900 sc->sc_sme->sme_name = DEVNAME(sc);
1901 sc->sc_sme->sme_cookie = sc;
1902 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
1903 if (sysmon_envsys_register(sc->sc_sme)) {
1904 printf("%s: unable to register with sysmon\n", DEVNAME(sc));
1905 goto out;
1906 }
1907 return (0);
1908
1909 out:
1910 free(sc->sc_sensor, M_DEVBUF);
1911 sysmon_envsys_destroy(sc->sc_sme);
1912 return EINVAL;
1913 }
1914
1915 void
1916 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1917 {
1918 struct mfi_softc *sc = sme->sme_cookie;
1919 struct bioc_vol bv;
1920 int s;
1921
1922 if (edata->sensor >= sc->sc_ld_cnt)
1923 return;
1924
1925 bzero(&bv, sizeof(bv));
1926 bv.bv_volid = edata->sensor;
1927 s = splbio();
1928 if (mfi_ioctl_vol(sc, &bv)) {
1929 splx(s);
1930 return;
1931 }
1932 splx(s);
1933
1934 switch(bv.bv_status) {
1935 case BIOC_SVOFFLINE:
1936 edata->value_cur = ENVSYS_DRIVE_FAIL;
1937 edata->state = ENVSYS_SCRITICAL;
1938 break;
1939
1940 case BIOC_SVDEGRADED:
1941 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1942 edata->state = ENVSYS_SCRITICAL;
1943 break;
1944
1945 case BIOC_SVSCRUB:
1946 case BIOC_SVONLINE:
1947 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1948 edata->state = ENVSYS_SVALID;
1949 break;
1950
1951 case BIOC_SVINVALID:
1952 /* FALLTRHOUGH */
1953 default:
1954 edata->value_cur = 0; /* unknown */
1955 edata->state = ENVSYS_SINVALID;
1956 }
1957 }
1958
1959 #endif /* NBIO > 0 */
1960
1961 uint32_t
1962 mfi_xscale_fw_state(struct mfi_softc *sc)
1963 {
1964 return mfi_read(sc, MFI_OMSG0);
1965 }
1966
1967 void
1968 mfi_xscale_intr_ena(struct mfi_softc *sc)
1969 {
1970 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
1971 }
1972
1973 int
1974 mfi_xscale_intr(struct mfi_softc *sc)
1975 {
1976 uint32_t status;
1977
1978 status = mfi_read(sc, MFI_OSTS);
1979 if (!ISSET(status, MFI_OSTS_INTR_VALID))
1980 return 0;
1981
1982 /* write status back to acknowledge interrupt */
1983 mfi_write(sc, MFI_OSTS, status);
1984 return 1;
1985 }
1986
1987 void
1988 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
1989 {
1990 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
1991 ccb->ccb_extra_frames);
1992 }
1993
1994 uint32_t
1995 mfi_ppc_fw_state(struct mfi_softc *sc)
1996 {
1997 return mfi_read(sc, MFI_OSP);
1998 }
1999
2000 void
2001 mfi_ppc_intr_ena(struct mfi_softc *sc)
2002 {
2003 mfi_write(sc, MFI_ODC, 0xffffffff);
2004 mfi_write(sc, MFI_OMSK, ~0x80000004);
2005 }
2006
2007 int
2008 mfi_ppc_intr(struct mfi_softc *sc)
2009 {
2010 uint32_t status;
2011
2012 status = mfi_read(sc, MFI_OSTS);
2013 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2014 return 0;
2015
2016 /* write status back to acknowledge interrupt */
2017 mfi_write(sc, MFI_ODC, status);
2018 return 1;
2019 }
2020
2021 void
2022 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2023 {
2024 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2025 (ccb->ccb_extra_frames << 1));
2026 }
2027