mfi.c revision 1.32 1 /* $NetBSD: mfi.c,v 1.32 2010/02/08 23:54:33 msaitoh Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.32 2010/02/08 23:54:33 msaitoh Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <sys/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 static void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 static void mfiminphys(struct buf *bp);
67
68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
69 static void mfi_put_ccb(struct mfi_ccb *);
70 static int mfi_init_ccb(struct mfi_softc *);
71
72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
74
75 static int mfi_transition_firmware(struct mfi_softc *);
76 static int mfi_initialize_firmware(struct mfi_softc *);
77 static int mfi_get_info(struct mfi_softc *);
78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
80 static int mfi_poll(struct mfi_ccb *);
81 static int mfi_create_sgl(struct mfi_ccb *, int);
82
83 /* commands */
84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *,
86 uint32_t, uint32_t);
87 static void mfi_scsi_xs_done(struct mfi_ccb *);
88 static int mfi_mgmt_internal(struct mfi_softc *,
89 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
91 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
92 static void mfi_mgmt_done(struct mfi_ccb *);
93
94 #if NBIO > 0
95 static int mfi_ioctl(device_t, u_long, void *);
96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
99 static int mfi_ioctl_alarm(struct mfi_softc *,
100 struct bioc_alarm *);
101 static int mfi_ioctl_blink(struct mfi_softc *sc,
102 struct bioc_blink *);
103 static int mfi_ioctl_setstate(struct mfi_softc *,
104 struct bioc_setstate *);
105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
106 static int mfi_create_sensors(struct mfi_softc *);
107 static int mfi_destroy_sensors(struct mfi_softc *);
108 static void mfi_sensor_refresh(struct sysmon_envsys *,
109 envsys_data_t *);
110 #endif /* NBIO > 0 */
111
112 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
113 static void mfi_xscale_intr_ena(struct mfi_softc *sc);
114 static void mfi_xscale_intr_dis(struct mfi_softc *sc);
115 static int mfi_xscale_intr(struct mfi_softc *sc);
116 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
117
118 static const struct mfi_iop_ops mfi_iop_xscale = {
119 mfi_xscale_fw_state,
120 mfi_xscale_intr_dis,
121 mfi_xscale_intr_ena,
122 mfi_xscale_intr,
123 mfi_xscale_post
124 };
125
126 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
127 static void mfi_ppc_intr_ena(struct mfi_softc *sc);
128 static void mfi_ppc_intr_dis(struct mfi_softc *sc);
129 static int mfi_ppc_intr(struct mfi_softc *sc);
130 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
131
132 static const struct mfi_iop_ops mfi_iop_ppc = {
133 mfi_ppc_fw_state,
134 mfi_ppc_intr_dis,
135 mfi_ppc_intr_ena,
136 mfi_ppc_intr,
137 mfi_ppc_post
138 };
139
140 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
141 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
142 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
143 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
144 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
145
146 static struct mfi_ccb *
147 mfi_get_ccb(struct mfi_softc *sc)
148 {
149 struct mfi_ccb *ccb;
150 int s;
151
152 s = splbio();
153 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
154 if (ccb) {
155 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
156 ccb->ccb_state = MFI_CCB_READY;
157 }
158 splx(s);
159
160 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
161
162 return ccb;
163 }
164
165 static void
166 mfi_put_ccb(struct mfi_ccb *ccb)
167 {
168 struct mfi_softc *sc = ccb->ccb_sc;
169 int s;
170
171 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
172
173 s = splbio();
174 ccb->ccb_state = MFI_CCB_FREE;
175 ccb->ccb_xs = NULL;
176 ccb->ccb_flags = 0;
177 ccb->ccb_done = NULL;
178 ccb->ccb_direction = 0;
179 ccb->ccb_frame_size = 0;
180 ccb->ccb_extra_frames = 0;
181 ccb->ccb_sgl = NULL;
182 ccb->ccb_data = NULL;
183 ccb->ccb_len = 0;
184 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
185 splx(s);
186 }
187
188 static int
189 mfi_destroy_ccb(struct mfi_softc *sc)
190 {
191 struct mfi_ccb *ccb;
192 uint32_t i;
193
194 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
195
196
197 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
198 /* create a dma map for transfer */
199 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
200 }
201
202 if (i < sc->sc_max_cmds)
203 return EBUSY;
204
205 free(sc->sc_ccb, M_DEVBUF);
206
207 return 0;
208 }
209
210 static int
211 mfi_init_ccb(struct mfi_softc *sc)
212 {
213 struct mfi_ccb *ccb;
214 uint32_t i;
215 int error;
216
217 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
218
219 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
220 M_DEVBUF, M_WAITOK|M_ZERO);
221
222 for (i = 0; i < sc->sc_max_cmds; i++) {
223 ccb = &sc->sc_ccb[i];
224
225 ccb->ccb_sc = sc;
226
227 /* select i'th frame */
228 ccb->ccb_frame = (union mfi_frame *)
229 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
230 ccb->ccb_pframe =
231 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
232 ccb->ccb_frame->mfr_header.mfh_context = i;
233
234 /* select i'th sense */
235 ccb->ccb_sense = (struct mfi_sense *)
236 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
237 ccb->ccb_psense =
238 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
239
240 /* create a dma map for transfer */
241 error = bus_dmamap_create(sc->sc_dmat,
242 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
243 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
244 if (error) {
245 printf("%s: cannot create ccb dmamap (%d)\n",
246 DEVNAME(sc), error);
247 goto destroy;
248 }
249
250 DNPRINTF(MFI_D_CCB,
251 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
252 ccb->ccb_frame->mfr_header.mfh_context, ccb,
253 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
254 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
255 (u_long)ccb->ccb_dmamap);
256
257 /* add ccb to queue */
258 mfi_put_ccb(ccb);
259 }
260
261 return 0;
262 destroy:
263 /* free dma maps and ccb memory */
264 while (i) {
265 i--;
266 ccb = &sc->sc_ccb[i];
267 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
268 }
269
270 free(sc->sc_ccb, M_DEVBUF);
271
272 return 1;
273 }
274
275 static uint32_t
276 mfi_read(struct mfi_softc *sc, bus_size_t r)
277 {
278 uint32_t rv;
279
280 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
281 BUS_SPACE_BARRIER_READ);
282 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
283
284 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
285 return rv;
286 }
287
288 static void
289 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
290 {
291 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
292
293 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
294 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
295 BUS_SPACE_BARRIER_WRITE);
296 }
297
298 static struct mfi_mem *
299 mfi_allocmem(struct mfi_softc *sc, size_t size)
300 {
301 struct mfi_mem *mm;
302 int nsegs;
303
304 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
305 (long)size);
306
307 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
308 if (mm == NULL)
309 return NULL;
310
311 mm->am_size = size;
312
313 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
314 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
315 goto amfree;
316
317 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
318 &nsegs, BUS_DMA_NOWAIT) != 0)
319 goto destroy;
320
321 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
322 BUS_DMA_NOWAIT) != 0)
323 goto free;
324
325 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
326 BUS_DMA_NOWAIT) != 0)
327 goto unmap;
328
329 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
330 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
331
332 memset(mm->am_kva, 0, size);
333 return mm;
334
335 unmap:
336 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
337 free:
338 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
339 destroy:
340 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
341 amfree:
342 free(mm, M_DEVBUF);
343
344 return NULL;
345 }
346
347 static void
348 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
349 {
350 struct mfi_mem *mm = *mmp;
351
352 if (mm == NULL)
353 return;
354
355 *mmp = NULL;
356
357 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
358
359 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
360 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
361 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
362 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
363 free(mm, M_DEVBUF);
364 }
365
366 static int
367 mfi_transition_firmware(struct mfi_softc *sc)
368 {
369 uint32_t fw_state, cur_state;
370 int max_wait, i;
371
372 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
373
374 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
375 fw_state);
376
377 while (fw_state != MFI_STATE_READY) {
378 DNPRINTF(MFI_D_MISC,
379 "%s: waiting for firmware to become ready\n",
380 DEVNAME(sc));
381 cur_state = fw_state;
382 switch (fw_state) {
383 case MFI_STATE_FAULT:
384 printf("%s: firmware fault\n", DEVNAME(sc));
385 return 1;
386 case MFI_STATE_WAIT_HANDSHAKE:
387 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
388 max_wait = 2;
389 break;
390 case MFI_STATE_OPERATIONAL:
391 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
392 max_wait = 10;
393 break;
394 case MFI_STATE_UNDEFINED:
395 case MFI_STATE_BB_INIT:
396 max_wait = 2;
397 break;
398 case MFI_STATE_FW_INIT:
399 case MFI_STATE_DEVICE_SCAN:
400 case MFI_STATE_FLUSH_CACHE:
401 max_wait = 20;
402 break;
403 default:
404 printf("%s: unknown firmware state %d\n",
405 DEVNAME(sc), fw_state);
406 return 1;
407 }
408 for (i = 0; i < (max_wait * 10); i++) {
409 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
410 if (fw_state == cur_state)
411 DELAY(100000);
412 else
413 break;
414 }
415 if (fw_state == cur_state) {
416 printf("%s: firmware stuck in state %#x\n",
417 DEVNAME(sc), fw_state);
418 return 1;
419 }
420 }
421
422 return 0;
423 }
424
425 static int
426 mfi_initialize_firmware(struct mfi_softc *sc)
427 {
428 struct mfi_ccb *ccb;
429 struct mfi_init_frame *init;
430 struct mfi_init_qinfo *qinfo;
431
432 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
433
434 if ((ccb = mfi_get_ccb(sc)) == NULL)
435 return 1;
436
437 init = &ccb->ccb_frame->mfr_init;
438 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
439
440 memset(qinfo, 0, sizeof *qinfo);
441 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
442 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
443 offsetof(struct mfi_prod_cons, mpc_reply_q));
444 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
445 offsetof(struct mfi_prod_cons, mpc_producer));
446 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
447 offsetof(struct mfi_prod_cons, mpc_consumer));
448
449 init->mif_header.mfh_cmd = MFI_CMD_INIT;
450 init->mif_header.mfh_data_len = sizeof *qinfo;
451 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
452
453 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
454 DEVNAME(sc),
455 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
456 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
457
458 if (mfi_poll(ccb)) {
459 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
460 return 1;
461 }
462
463 mfi_put_ccb(ccb);
464
465 return 0;
466 }
467
468 static int
469 mfi_get_info(struct mfi_softc *sc)
470 {
471 #ifdef MFI_DEBUG
472 int i;
473 #endif
474 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
475
476 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
477 sizeof(sc->sc_info), &sc->sc_info, NULL))
478 return 1;
479
480 #ifdef MFI_DEBUG
481
482 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
483 printf("%s: active FW %s Version %s date %s time %s\n",
484 DEVNAME(sc),
485 sc->sc_info.mci_image_component[i].mic_name,
486 sc->sc_info.mci_image_component[i].mic_version,
487 sc->sc_info.mci_image_component[i].mic_build_date,
488 sc->sc_info.mci_image_component[i].mic_build_time);
489 }
490
491 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
492 printf("%s: pending FW %s Version %s date %s time %s\n",
493 DEVNAME(sc),
494 sc->sc_info.mci_pending_image_component[i].mic_name,
495 sc->sc_info.mci_pending_image_component[i].mic_version,
496 sc->sc_info.mci_pending_image_component[i].mic_build_date,
497 sc->sc_info.mci_pending_image_component[i].mic_build_time);
498 }
499
500 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
501 DEVNAME(sc),
502 sc->sc_info.mci_max_arms,
503 sc->sc_info.mci_max_spans,
504 sc->sc_info.mci_max_arrays,
505 sc->sc_info.mci_max_lds,
506 sc->sc_info.mci_product_name);
507
508 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
509 DEVNAME(sc),
510 sc->sc_info.mci_serial_number,
511 sc->sc_info.mci_hw_present,
512 sc->sc_info.mci_current_fw_time,
513 sc->sc_info.mci_max_cmds,
514 sc->sc_info.mci_max_sg_elements);
515
516 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
517 DEVNAME(sc),
518 sc->sc_info.mci_max_request_size,
519 sc->sc_info.mci_lds_present,
520 sc->sc_info.mci_lds_degraded,
521 sc->sc_info.mci_lds_offline,
522 sc->sc_info.mci_pd_present);
523
524 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
525 DEVNAME(sc),
526 sc->sc_info.mci_pd_disks_present,
527 sc->sc_info.mci_pd_disks_pred_failure,
528 sc->sc_info.mci_pd_disks_failed);
529
530 printf("%s: nvram %d mem %d flash %d\n",
531 DEVNAME(sc),
532 sc->sc_info.mci_nvram_size,
533 sc->sc_info.mci_memory_size,
534 sc->sc_info.mci_flash_size);
535
536 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
537 DEVNAME(sc),
538 sc->sc_info.mci_ram_correctable_errors,
539 sc->sc_info.mci_ram_uncorrectable_errors,
540 sc->sc_info.mci_cluster_allowed,
541 sc->sc_info.mci_cluster_active);
542
543 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
544 DEVNAME(sc),
545 sc->sc_info.mci_max_strips_per_io,
546 sc->sc_info.mci_raid_levels,
547 sc->sc_info.mci_adapter_ops,
548 sc->sc_info.mci_ld_ops);
549
550 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
551 DEVNAME(sc),
552 sc->sc_info.mci_stripe_sz_ops.min,
553 sc->sc_info.mci_stripe_sz_ops.max,
554 sc->sc_info.mci_pd_ops,
555 sc->sc_info.mci_pd_mix_support);
556
557 printf("%s: ecc_bucket %d pckg_prop %s\n",
558 DEVNAME(sc),
559 sc->sc_info.mci_ecc_bucket_count,
560 sc->sc_info.mci_package_version);
561
562 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
563 DEVNAME(sc),
564 sc->sc_info.mci_properties.mcp_seq_num,
565 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
566 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
567 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
568
569 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
570 DEVNAME(sc),
571 sc->sc_info.mci_properties.mcp_rebuild_rate,
572 sc->sc_info.mci_properties.mcp_patrol_read_rate,
573 sc->sc_info.mci_properties.mcp_bgi_rate,
574 sc->sc_info.mci_properties.mcp_cc_rate);
575
576 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
577 DEVNAME(sc),
578 sc->sc_info.mci_properties.mcp_recon_rate,
579 sc->sc_info.mci_properties.mcp_cache_flush_interval,
580 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
581 sc->sc_info.mci_properties.mcp_spinup_delay,
582 sc->sc_info.mci_properties.mcp_cluster_enable);
583
584 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
585 DEVNAME(sc),
586 sc->sc_info.mci_properties.mcp_coercion_mode,
587 sc->sc_info.mci_properties.mcp_alarm_enable,
588 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
589 sc->sc_info.mci_properties.mcp_disable_battery_warn,
590 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
591
592 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
593 DEVNAME(sc),
594 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
595 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
596 sc->sc_info.mci_properties.mcp_expose_encl_devices);
597
598 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
599 DEVNAME(sc),
600 sc->sc_info.mci_pci.mip_vendor,
601 sc->sc_info.mci_pci.mip_device,
602 sc->sc_info.mci_pci.mip_subvendor,
603 sc->sc_info.mci_pci.mip_subdevice);
604
605 printf("%s: type %#x port_count %d port_addr ",
606 DEVNAME(sc),
607 sc->sc_info.mci_host.mih_type,
608 sc->sc_info.mci_host.mih_port_count);
609
610 for (i = 0; i < 8; i++)
611 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
612 printf("\n");
613
614 printf("%s: type %.x port_count %d port_addr ",
615 DEVNAME(sc),
616 sc->sc_info.mci_device.mid_type,
617 sc->sc_info.mci_device.mid_port_count);
618
619 for (i = 0; i < 8; i++)
620 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
621 printf("\n");
622 #endif /* MFI_DEBUG */
623
624 return 0;
625 }
626
627 static void
628 mfiminphys(struct buf *bp)
629 {
630 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
631
632 /* XXX currently using MFI_MAXFER = MAXPHYS */
633 if (bp->b_bcount > MFI_MAXFER)
634 bp->b_bcount = MFI_MAXFER;
635 minphys(bp);
636 }
637
638 int
639 mfi_rescan(device_t self, const char *ifattr, const int *locators)
640 {
641 struct mfi_softc *sc = device_private(self);
642
643 if (sc->sc_child != NULL)
644 return 0;
645
646 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
647 scsiprint, NULL);
648
649 return 0;
650 }
651
652 void
653 mfi_childdetached(device_t self, device_t child)
654 {
655 struct mfi_softc *sc = device_private(self);
656
657 KASSERT(self == sc->sc_dev);
658 KASSERT(child == sc->sc_child);
659
660 if (child == sc->sc_child)
661 sc->sc_child = NULL;
662 }
663
664 int
665 mfi_detach(struct mfi_softc *sc, int flags)
666 {
667 int error;
668
669 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
670
671 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
672 return error;
673
674 #if NBIO > 0
675 mfi_destroy_sensors(sc);
676 bio_unregister(sc->sc_dev);
677 #endif /* NBIO > 0 */
678
679 mfi_intr_disable(sc);
680
681 /* TBD: shutdown firmware */
682
683 if ((error = mfi_destroy_ccb(sc)) != 0)
684 return error;
685
686 mfi_freemem(sc, &sc->sc_sense);
687
688 mfi_freemem(sc, &sc->sc_frames);
689
690 mfi_freemem(sc, &sc->sc_pcq);
691
692 return 0;
693 }
694
695 int
696 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
697 {
698 struct scsipi_adapter *adapt = &sc->sc_adapt;
699 struct scsipi_channel *chan = &sc->sc_chan;
700 uint32_t status, frames;
701 int i;
702
703 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
704
705 switch (iop) {
706 case MFI_IOP_XSCALE:
707 sc->sc_iop = &mfi_iop_xscale;
708 break;
709 case MFI_IOP_PPC:
710 sc->sc_iop = &mfi_iop_ppc;
711 break;
712 default:
713 panic("%s: unknown iop %d", DEVNAME(sc), iop);
714 }
715
716 if (mfi_transition_firmware(sc))
717 return 1;
718
719 TAILQ_INIT(&sc->sc_ccb_freeq);
720
721 status = mfi_fw_state(sc);
722 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
723 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
724 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
725 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
726
727 /* consumer/producer and reply queue memory */
728 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
729 sizeof(struct mfi_prod_cons));
730 if (sc->sc_pcq == NULL) {
731 aprint_error("%s: unable to allocate reply queue memory\n",
732 DEVNAME(sc));
733 goto nopcq;
734 }
735 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
736 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
737 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
738
739 /* frame memory */
740 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
741 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
742 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
743 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
744 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
745 if (sc->sc_frames == NULL) {
746 aprint_error("%s: unable to allocate frame memory\n",
747 DEVNAME(sc));
748 goto noframe;
749 }
750 /* XXX hack, fix this */
751 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
752 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
753 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
754 goto noframe;
755 }
756
757 /* sense memory */
758 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
759 if (sc->sc_sense == NULL) {
760 aprint_error("%s: unable to allocate sense memory\n",
761 DEVNAME(sc));
762 goto nosense;
763 }
764
765 /* now that we have all memory bits go initialize ccbs */
766 if (mfi_init_ccb(sc)) {
767 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
768 goto noinit;
769 }
770
771 /* kickstart firmware with all addresses and pointers */
772 if (mfi_initialize_firmware(sc)) {
773 aprint_error("%s: could not initialize firmware\n",
774 DEVNAME(sc));
775 goto noinit;
776 }
777
778 if (mfi_get_info(sc)) {
779 aprint_error("%s: could not retrieve controller information\n",
780 DEVNAME(sc));
781 goto noinit;
782 }
783
784 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
785 DEVNAME(sc),
786 sc->sc_info.mci_lds_present,
787 sc->sc_info.mci_package_version,
788 sc->sc_info.mci_memory_size);
789
790 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
791 sc->sc_max_ld = sc->sc_ld_cnt;
792 for (i = 0; i < sc->sc_ld_cnt; i++)
793 sc->sc_ld[i].ld_present = 1;
794
795 memset(adapt, 0, sizeof(*adapt));
796 adapt->adapt_dev = sc->sc_dev;
797 adapt->adapt_nchannels = 1;
798 if (sc->sc_ld_cnt)
799 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
800 else
801 adapt->adapt_openings = sc->sc_max_cmds;
802 adapt->adapt_max_periph = adapt->adapt_openings;
803 adapt->adapt_request = mfi_scsipi_request;
804 adapt->adapt_minphys = mfiminphys;
805
806 memset(chan, 0, sizeof(*chan));
807 chan->chan_adapter = adapt;
808 chan->chan_bustype = &scsi_bustype;
809 chan->chan_channel = 0;
810 chan->chan_flags = 0;
811 chan->chan_nluns = 8;
812 chan->chan_ntargets = MFI_MAX_LD;
813 chan->chan_id = MFI_MAX_LD;
814
815 mfi_rescan(sc->sc_dev, "scsi", NULL);
816
817 /* enable interrupts */
818 mfi_intr_enable(sc);
819
820 #if NBIO > 0
821 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
822 panic("%s: controller registration failed", DEVNAME(sc));
823 if (mfi_create_sensors(sc) != 0)
824 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
825 #endif /* NBIO > 0 */
826
827 return 0;
828 noinit:
829 mfi_freemem(sc, &sc->sc_sense);
830 nosense:
831 mfi_freemem(sc, &sc->sc_frames);
832 noframe:
833 mfi_freemem(sc, &sc->sc_pcq);
834 nopcq:
835 return 1;
836 }
837
838 static int
839 mfi_poll(struct mfi_ccb *ccb)
840 {
841 struct mfi_softc *sc = ccb->ccb_sc;
842 struct mfi_frame_header *hdr;
843 int to = 0;
844
845 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
846
847 hdr = &ccb->ccb_frame->mfr_header;
848 hdr->mfh_cmd_status = 0xff;
849 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
850
851 mfi_post(sc, ccb);
852 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
853 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
854 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
855
856 while (hdr->mfh_cmd_status == 0xff) {
857 delay(1000);
858 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
859 break;
860 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
861 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
862 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
863 }
864 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
865 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
866 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
867
868 if (ccb->ccb_data != NULL) {
869 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
870 DEVNAME(sc));
871 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
872 ccb->ccb_dmamap->dm_mapsize,
873 (ccb->ccb_direction & MFI_DATA_IN) ?
874 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
875
876 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
877 }
878
879 if (hdr->mfh_cmd_status == 0xff) {
880 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
881 hdr->mfh_context);
882 ccb->ccb_flags |= MFI_CCB_F_ERR;
883 return 1;
884 }
885
886 return 0;
887 }
888
889 int
890 mfi_intr(void *arg)
891 {
892 struct mfi_softc *sc = arg;
893 struct mfi_prod_cons *pcq;
894 struct mfi_ccb *ccb;
895 uint32_t producer, consumer, ctx;
896 int claimed = 0;
897
898 if (!mfi_my_intr(sc))
899 return 0;
900
901 pcq = MFIMEM_KVA(sc->sc_pcq);
902
903 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
904 (u_long)sc, (u_long)pcq);
905
906 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
907 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
908 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
909
910 producer = pcq->mpc_producer;
911 consumer = pcq->mpc_consumer;
912
913 while (consumer != producer) {
914 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
915 DEVNAME(sc), producer, consumer);
916
917 ctx = pcq->mpc_reply_q[consumer];
918 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
919 if (ctx == MFI_INVALID_CTX)
920 printf("%s: invalid context, p: %d c: %d\n",
921 DEVNAME(sc), producer, consumer);
922 else {
923 /* XXX remove from queue and call scsi_done */
924 ccb = &sc->sc_ccb[ctx];
925 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
926 DEVNAME(sc), ctx);
927 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
928 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
929 sc->sc_frames_size,
930 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
931 ccb->ccb_done(ccb);
932
933 claimed = 1;
934 }
935 consumer++;
936 if (consumer == (sc->sc_max_cmds + 1))
937 consumer = 0;
938 }
939
940 pcq->mpc_consumer = consumer;
941 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
942 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
944
945 return claimed;
946 }
947
948 static int
949 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
950 uint32_t blockcnt)
951 {
952 struct scsipi_periph *periph = xs->xs_periph;
953 struct mfi_io_frame *io;
954
955 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
956 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
957 periph->periph_target);
958
959 if (!xs->data)
960 return 1;
961
962 io = &ccb->ccb_frame->mfr_io;
963 if (xs->xs_control & XS_CTL_DATA_IN) {
964 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
965 ccb->ccb_direction = MFI_DATA_IN;
966 } else {
967 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
968 ccb->ccb_direction = MFI_DATA_OUT;
969 }
970 io->mif_header.mfh_target_id = periph->periph_target;
971 io->mif_header.mfh_timeout = 0;
972 io->mif_header.mfh_flags = 0;
973 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
974 io->mif_header.mfh_data_len= blockcnt;
975 io->mif_lba_hi = 0;
976 io->mif_lba_lo = blockno;
977 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
978 io->mif_sense_addr_hi = 0;
979
980 ccb->ccb_done = mfi_scsi_xs_done;
981 ccb->ccb_xs = xs;
982 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
983 ccb->ccb_sgl = &io->mif_sgl;
984 ccb->ccb_data = xs->data;
985 ccb->ccb_len = xs->datalen;
986
987 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
988 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
989 return 1;
990
991 return 0;
992 }
993
994 static void
995 mfi_scsi_xs_done(struct mfi_ccb *ccb)
996 {
997 struct scsipi_xfer *xs = ccb->ccb_xs;
998 struct mfi_softc *sc = ccb->ccb_sc;
999 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1000
1001 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1002 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1003
1004 if (xs->data != NULL) {
1005 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1006 DEVNAME(sc));
1007 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1008 ccb->ccb_dmamap->dm_mapsize,
1009 (xs->xs_control & XS_CTL_DATA_IN) ?
1010 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1011
1012 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1013 }
1014
1015 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1016 xs->error = XS_DRIVER_STUFFUP;
1017 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1018 DEVNAME(sc), hdr->mfh_cmd_status);
1019
1020 if (hdr->mfh_scsi_status != 0) {
1021 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1022 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1023 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1024 DNPRINTF(MFI_D_INTR,
1025 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1026 DEVNAME(sc), hdr->mfh_scsi_status,
1027 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1028 memset(&xs->sense, 0, sizeof(xs->sense));
1029 memcpy(&xs->sense, ccb->ccb_sense,
1030 sizeof(struct scsi_sense_data));
1031 xs->error = XS_SENSE;
1032 }
1033 } else {
1034 xs->error = XS_NOERROR;
1035 xs->status = SCSI_OK;
1036 xs->resid = 0;
1037 }
1038
1039 mfi_put_ccb(ccb);
1040 scsipi_done(xs);
1041 }
1042
1043 static int
1044 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1045 {
1046 struct mfi_pass_frame *pf;
1047 struct scsipi_periph *periph = xs->xs_periph;
1048
1049 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1050 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1051 periph->periph_target);
1052
1053 pf = &ccb->ccb_frame->mfr_pass;
1054 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1055 pf->mpf_header.mfh_target_id = periph->periph_target;
1056 pf->mpf_header.mfh_lun_id = 0;
1057 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1058 pf->mpf_header.mfh_timeout = 0;
1059 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1060 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1061
1062 pf->mpf_sense_addr_hi = 0;
1063 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1064
1065 memset(pf->mpf_cdb, 0, 16);
1066 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1067
1068 ccb->ccb_done = mfi_scsi_xs_done;
1069 ccb->ccb_xs = xs;
1070 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1071 ccb->ccb_sgl = &pf->mpf_sgl;
1072
1073 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1074 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1075 MFI_DATA_IN : MFI_DATA_OUT;
1076 else
1077 ccb->ccb_direction = MFI_DATA_NONE;
1078
1079 if (xs->data) {
1080 ccb->ccb_data = xs->data;
1081 ccb->ccb_len = xs->datalen;
1082
1083 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1084 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1085 return 1;
1086 }
1087
1088 return 0;
1089 }
1090
1091 static void
1092 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1093 void *arg)
1094 {
1095 struct scsipi_periph *periph;
1096 struct scsipi_xfer *xs;
1097 struct scsipi_adapter *adapt = chan->chan_adapter;
1098 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1099 struct mfi_ccb *ccb;
1100 struct scsi_rw_6 *rw;
1101 struct scsipi_rw_10 *rwb;
1102 uint32_t blockno, blockcnt;
1103 uint8_t target;
1104 uint8_t mbox[MFI_MBOX_SIZE];
1105 int s;
1106
1107 switch (req) {
1108 case ADAPTER_REQ_GROW_RESOURCES:
1109 /* Not supported. */
1110 return;
1111 case ADAPTER_REQ_SET_XFER_MODE:
1112 /* Not supported. */
1113 return;
1114 case ADAPTER_REQ_RUN_XFER:
1115 break;
1116 }
1117
1118 xs = arg;
1119
1120 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1121 DEVNAME(sc), req, xs->cmd->opcode);
1122
1123 periph = xs->xs_periph;
1124 target = periph->periph_target;
1125
1126 s = splbio();
1127 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1128 periph->periph_lun != 0) {
1129 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1130 DEVNAME(sc), target);
1131 xs->error = XS_SELTIMEOUT;
1132 scsipi_done(xs);
1133 splx(s);
1134 return;
1135 }
1136
1137 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1138 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1139 xs->error = XS_RESOURCE_SHORTAGE;
1140 scsipi_done(xs);
1141 splx(s);
1142 return;
1143 }
1144
1145 switch (xs->cmd->opcode) {
1146 /* IO path */
1147 case READ_10:
1148 case WRITE_10:
1149 rwb = (struct scsipi_rw_10 *)xs->cmd;
1150 blockno = _4btol(rwb->addr);
1151 blockcnt = _2btol(rwb->length);
1152 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1153 mfi_put_ccb(ccb);
1154 goto stuffup;
1155 }
1156 break;
1157
1158 case SCSI_READ_6_COMMAND:
1159 case SCSI_WRITE_6_COMMAND:
1160 rw = (struct scsi_rw_6 *)xs->cmd;
1161 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1162 blockcnt = rw->length ? rw->length : 0x100;
1163 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1164 mfi_put_ccb(ccb);
1165 goto stuffup;
1166 }
1167 break;
1168
1169 case SCSI_SYNCHRONIZE_CACHE_10:
1170 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1171 if (mfi_mgmt(ccb, xs,
1172 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1173 mfi_put_ccb(ccb);
1174 goto stuffup;
1175 }
1176 break;
1177
1178 /* hand it of to the firmware and let it deal with it */
1179 case SCSI_TEST_UNIT_READY:
1180 /* save off sd? after autoconf */
1181 if (!cold) /* XXX bogus */
1182 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1183 sizeof(sc->sc_ld[target].ld_dev));
1184 /* FALLTHROUGH */
1185
1186 default:
1187 if (mfi_scsi_ld(ccb, xs)) {
1188 mfi_put_ccb(ccb);
1189 goto stuffup;
1190 }
1191 break;
1192 }
1193
1194 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1195
1196 if (xs->xs_control & XS_CTL_POLL) {
1197 if (mfi_poll(ccb)) {
1198 /* XXX check for sense in ccb->ccb_sense? */
1199 printf("%s: mfi_scsipi_request poll failed\n",
1200 DEVNAME(sc));
1201 memset(&xs->sense, 0, sizeof(xs->sense));
1202 xs->sense.scsi_sense.response_code =
1203 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1204 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1205 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1206 xs->error = XS_SENSE;
1207 xs->status = SCSI_CHECK;
1208 } else {
1209 DNPRINTF(MFI_D_DMA,
1210 "%s: mfi_scsipi_request poll complete %d\n",
1211 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1212 xs->error = XS_NOERROR;
1213 xs->status = SCSI_OK;
1214 xs->resid = 0;
1215 }
1216 mfi_put_ccb(ccb);
1217 scsipi_done(xs);
1218 splx(s);
1219 return;
1220 }
1221
1222 mfi_post(sc, ccb);
1223
1224 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1225 ccb->ccb_dmamap->dm_nsegs);
1226
1227 splx(s);
1228 return;
1229
1230 stuffup:
1231 xs->error = XS_DRIVER_STUFFUP;
1232 scsipi_done(xs);
1233 splx(s);
1234 }
1235
1236 static int
1237 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1238 {
1239 struct mfi_softc *sc = ccb->ccb_sc;
1240 struct mfi_frame_header *hdr;
1241 bus_dma_segment_t *sgd;
1242 union mfi_sgl *sgl;
1243 int error, i;
1244
1245 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1246 (u_long)ccb->ccb_data);
1247
1248 if (!ccb->ccb_data)
1249 return 1;
1250
1251 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1252 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1253 if (error) {
1254 if (error == EFBIG)
1255 printf("more than %d dma segs\n",
1256 sc->sc_max_sgl);
1257 else
1258 printf("error %d loading dma map\n", error);
1259 return 1;
1260 }
1261
1262 hdr = &ccb->ccb_frame->mfr_header;
1263 sgl = ccb->ccb_sgl;
1264 sgd = ccb->ccb_dmamap->dm_segs;
1265 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1266 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1267 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1268 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1269 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1270 }
1271
1272 if (ccb->ccb_direction == MFI_DATA_IN) {
1273 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1274 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1275 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1276 } else {
1277 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1278 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1279 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1280 }
1281
1282 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1283 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1284 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1285 ccb->ccb_dmamap->dm_nsegs;
1286 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1287
1288 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1289 " dm_nsegs: %d extra_frames: %d\n",
1290 DEVNAME(sc),
1291 hdr->mfh_sg_count,
1292 ccb->ccb_frame_size,
1293 sc->sc_frames_size,
1294 ccb->ccb_dmamap->dm_nsegs,
1295 ccb->ccb_extra_frames);
1296
1297 return 0;
1298 }
1299
1300 static int
1301 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1302 uint32_t len, void *buf, uint8_t *mbox) {
1303 struct mfi_ccb *ccb;
1304 int rv = 1;
1305
1306 if ((ccb = mfi_get_ccb(sc)) == NULL)
1307 return rv;
1308 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1309 if (rv)
1310 return rv;
1311
1312 if (cold) {
1313 if (mfi_poll(ccb))
1314 goto done;
1315 } else {
1316 mfi_post(sc, ccb);
1317
1318 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1319 DEVNAME(sc));
1320 while (ccb->ccb_state != MFI_CCB_DONE)
1321 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1322
1323 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1324 goto done;
1325 }
1326 rv = 0;
1327
1328 done:
1329 mfi_put_ccb(ccb);
1330 return rv;
1331 }
1332
1333 static int
1334 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1335 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1336 {
1337 struct mfi_dcmd_frame *dcmd;
1338
1339 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1340
1341 dcmd = &ccb->ccb_frame->mfr_dcmd;
1342 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1343 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1344 dcmd->mdf_header.mfh_timeout = 0;
1345
1346 dcmd->mdf_opcode = opc;
1347 dcmd->mdf_header.mfh_data_len = 0;
1348 ccb->ccb_direction = dir;
1349 ccb->ccb_xs = xs;
1350 ccb->ccb_done = mfi_mgmt_done;
1351
1352 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1353
1354 /* handle special opcodes */
1355 if (mbox)
1356 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1357
1358 if (dir != MFI_DATA_NONE) {
1359 dcmd->mdf_header.mfh_data_len = len;
1360 ccb->ccb_data = buf;
1361 ccb->ccb_len = len;
1362 ccb->ccb_sgl = &dcmd->mdf_sgl;
1363
1364 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1365 return 1;
1366 }
1367 return 0;
1368 }
1369
1370 static void
1371 mfi_mgmt_done(struct mfi_ccb *ccb)
1372 {
1373 struct scsipi_xfer *xs = ccb->ccb_xs;
1374 struct mfi_softc *sc = ccb->ccb_sc;
1375 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1376
1377 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1378 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1379
1380 if (ccb->ccb_data != NULL) {
1381 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1382 DEVNAME(sc));
1383 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1384 ccb->ccb_dmamap->dm_mapsize,
1385 (ccb->ccb_direction & MFI_DATA_IN) ?
1386 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1387
1388 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1389 }
1390
1391 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1392 ccb->ccb_flags |= MFI_CCB_F_ERR;
1393
1394 ccb->ccb_state = MFI_CCB_DONE;
1395 if (xs) {
1396 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1397 xs->error = XS_DRIVER_STUFFUP;
1398 } else {
1399 xs->error = XS_NOERROR;
1400 xs->status = SCSI_OK;
1401 xs->resid = 0;
1402 }
1403 mfi_put_ccb(ccb);
1404 scsipi_done(xs);
1405 } else
1406 wakeup(ccb);
1407 }
1408
1409 #if NBIO > 0
1410 int
1411 mfi_ioctl(device_t dev, u_long cmd, void *addr)
1412 {
1413 struct mfi_softc *sc = device_private(dev);
1414 int error = 0;
1415 int s;
1416
1417 KERNEL_LOCK(1, curlwp);
1418 s = splbio();
1419
1420 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1421
1422 switch (cmd) {
1423 case BIOCINQ:
1424 DNPRINTF(MFI_D_IOCTL, "inq\n");
1425 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1426 break;
1427
1428 case BIOCVOL:
1429 DNPRINTF(MFI_D_IOCTL, "vol\n");
1430 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1431 break;
1432
1433 case BIOCDISK:
1434 DNPRINTF(MFI_D_IOCTL, "disk\n");
1435 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1436 break;
1437
1438 case BIOCALARM:
1439 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1440 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1441 break;
1442
1443 case BIOCBLINK:
1444 DNPRINTF(MFI_D_IOCTL, "blink\n");
1445 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1446 break;
1447
1448 case BIOCSETSTATE:
1449 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1450 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1451 break;
1452
1453 default:
1454 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1455 error = EINVAL;
1456 }
1457 splx(s);
1458 KERNEL_UNLOCK_ONE(curlwp);
1459
1460 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1461 return error;
1462 }
1463
1464 static int
1465 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1466 {
1467 struct mfi_conf *cfg;
1468 int rv = EINVAL;
1469
1470 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1471
1472 if (mfi_get_info(sc)) {
1473 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1474 DEVNAME(sc));
1475 return EIO;
1476 }
1477
1478 /* get figures */
1479 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1480 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1481 sizeof *cfg, cfg, NULL))
1482 goto freeme;
1483
1484 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1485 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1486 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1487
1488 rv = 0;
1489 freeme:
1490 free(cfg, M_DEVBUF);
1491 return rv;
1492 }
1493
1494 static int
1495 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1496 {
1497 int i, per, rv = EINVAL;
1498 uint8_t mbox[MFI_MBOX_SIZE];
1499
1500 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1501 DEVNAME(sc), bv->bv_volid);
1502
1503 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1504 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1505 goto done;
1506
1507 i = bv->bv_volid;
1508 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1509 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1510 DEVNAME(sc), mbox[0]);
1511
1512 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1513 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1514 goto done;
1515
1516 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1517 /* go do hotspares */
1518 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1519 goto done;
1520 }
1521
1522 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1523
1524 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1525 case MFI_LD_OFFLINE:
1526 bv->bv_status = BIOC_SVOFFLINE;
1527 break;
1528
1529 case MFI_LD_PART_DEGRADED:
1530 case MFI_LD_DEGRADED:
1531 bv->bv_status = BIOC_SVDEGRADED;
1532 break;
1533
1534 case MFI_LD_ONLINE:
1535 bv->bv_status = BIOC_SVONLINE;
1536 break;
1537
1538 default:
1539 bv->bv_status = BIOC_SVINVALID;
1540 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1541 DEVNAME(sc),
1542 sc->sc_ld_list.mll_list[i].mll_state);
1543 }
1544
1545 /* additional status can modify MFI status */
1546 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1547 case MFI_LD_PROG_CC:
1548 case MFI_LD_PROG_BGI:
1549 bv->bv_status = BIOC_SVSCRUB;
1550 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1551 bv->bv_percent = (per * 100) / 0xffff;
1552 bv->bv_seconds =
1553 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1554 break;
1555
1556 case MFI_LD_PROG_FGI:
1557 case MFI_LD_PROG_RECONSTRUCT:
1558 /* nothing yet */
1559 break;
1560 }
1561
1562 /*
1563 * The RAID levels are determined per the SNIA DDF spec, this is only
1564 * a subset that is valid for the MFI contrller.
1565 */
1566 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1567 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1568 MFI_DDF_SRL_SPANNED)
1569 bv->bv_level *= 10;
1570
1571 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1572 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1573
1574 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1575
1576 rv = 0;
1577 done:
1578 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1579 DEVNAME(sc), rv);
1580 return rv;
1581 }
1582
1583 static int
1584 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1585 {
1586 struct mfi_conf *cfg;
1587 struct mfi_array *ar;
1588 struct mfi_ld_cfg *ld;
1589 struct mfi_pd_details *pd;
1590 struct scsipi_inquiry_data *inqbuf;
1591 char vend[8+16+4+1];
1592 int i, rv = EINVAL;
1593 int arr, vol, disk;
1594 uint32_t size;
1595 uint8_t mbox[MFI_MBOX_SIZE];
1596
1597 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1598 DEVNAME(sc), bd->bd_diskid);
1599
1600 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1601
1602 /* send single element command to retrieve size for full structure */
1603 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1604 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1605 sizeof *cfg, cfg, NULL))
1606 goto freeme;
1607
1608 size = cfg->mfc_size;
1609 free(cfg, M_DEVBUF);
1610
1611 /* memory for read config */
1612 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1613 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1614 size, cfg, NULL))
1615 goto freeme;
1616
1617 ar = cfg->mfc_array;
1618
1619 /* calculate offset to ld structure */
1620 ld = (struct mfi_ld_cfg *)(
1621 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1622 cfg->mfc_array_size * cfg->mfc_no_array);
1623
1624 vol = bd->bd_volid;
1625
1626 if (vol >= cfg->mfc_no_ld) {
1627 /* do hotspares */
1628 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1629 goto freeme;
1630 }
1631
1632 /* find corresponding array for ld */
1633 for (i = 0, arr = 0; i < vol; i++)
1634 arr += ld[i].mlc_parm.mpa_span_depth;
1635
1636 /* offset disk into pd list */
1637 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1638
1639 /* offset array index into the next spans */
1640 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1641
1642 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1643 switch (ar[arr].pd[disk].mar_pd_state){
1644 case MFI_PD_UNCONFIG_GOOD:
1645 bd->bd_status = BIOC_SDUNUSED;
1646 break;
1647
1648 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1649 bd->bd_status = BIOC_SDHOTSPARE;
1650 break;
1651
1652 case MFI_PD_OFFLINE:
1653 bd->bd_status = BIOC_SDOFFLINE;
1654 break;
1655
1656 case MFI_PD_FAILED:
1657 bd->bd_status = BIOC_SDFAILED;
1658 break;
1659
1660 case MFI_PD_REBUILD:
1661 bd->bd_status = BIOC_SDREBUILD;
1662 break;
1663
1664 case MFI_PD_ONLINE:
1665 bd->bd_status = BIOC_SDONLINE;
1666 break;
1667
1668 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1669 default:
1670 bd->bd_status = BIOC_SDINVALID;
1671 break;
1672
1673 }
1674
1675 /* get the remaining fields */
1676 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1677 memset(pd, 0, sizeof(*pd));
1678 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1679 sizeof *pd, pd, mbox))
1680 goto freeme;
1681
1682 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1683
1684 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1685 bd->bd_channel = pd->mpd_enc_idx;
1686
1687 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1688 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1689 vend[sizeof vend - 1] = '\0';
1690 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1691
1692 /* XXX find a way to retrieve serial nr from drive */
1693 /* XXX find a way to get bd_procdev */
1694
1695 rv = 0;
1696 freeme:
1697 free(pd, M_DEVBUF);
1698 free(cfg, M_DEVBUF);
1699
1700 return rv;
1701 }
1702
1703 static int
1704 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1705 {
1706 uint32_t opc, dir = MFI_DATA_NONE;
1707 int rv = 0;
1708 int8_t ret;
1709
1710 switch(ba->ba_opcode) {
1711 case BIOC_SADISABLE:
1712 opc = MR_DCMD_SPEAKER_DISABLE;
1713 break;
1714
1715 case BIOC_SAENABLE:
1716 opc = MR_DCMD_SPEAKER_ENABLE;
1717 break;
1718
1719 case BIOC_SASILENCE:
1720 opc = MR_DCMD_SPEAKER_SILENCE;
1721 break;
1722
1723 case BIOC_GASTATUS:
1724 opc = MR_DCMD_SPEAKER_GET;
1725 dir = MFI_DATA_IN;
1726 break;
1727
1728 case BIOC_SATEST:
1729 opc = MR_DCMD_SPEAKER_TEST;
1730 break;
1731
1732 default:
1733 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1734 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1735 return EINVAL;
1736 }
1737
1738 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL))
1739 rv = EINVAL;
1740 else
1741 if (ba->ba_opcode == BIOC_GASTATUS)
1742 ba->ba_status = ret;
1743 else
1744 ba->ba_status = 0;
1745
1746 return rv;
1747 }
1748
1749 static int
1750 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1751 {
1752 int i, found, rv = EINVAL;
1753 uint8_t mbox[MFI_MBOX_SIZE];
1754 uint32_t cmd;
1755 struct mfi_pd_list *pd;
1756
1757 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1758 bb->bb_status);
1759
1760 /* channel 0 means not in an enclosure so can't be blinked */
1761 if (bb->bb_channel == 0)
1762 return EINVAL;
1763
1764 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1765
1766 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1767 MFI_PD_LIST_SIZE, pd, NULL))
1768 goto done;
1769
1770 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1771 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1772 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1773 found = 1;
1774 break;
1775 }
1776
1777 if (!found)
1778 goto done;
1779
1780 memset(mbox, 0, sizeof mbox);
1781
1782 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1783
1784 switch (bb->bb_status) {
1785 case BIOC_SBUNBLINK:
1786 cmd = MR_DCMD_PD_UNBLINK;
1787 break;
1788
1789 case BIOC_SBBLINK:
1790 cmd = MR_DCMD_PD_BLINK;
1791 break;
1792
1793 case BIOC_SBALARM:
1794 default:
1795 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1796 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1797 goto done;
1798 }
1799
1800
1801 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1802 goto done;
1803
1804 rv = 0;
1805 done:
1806 free(pd, M_DEVBUF);
1807 return rv;
1808 }
1809
1810 static int
1811 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1812 {
1813 struct mfi_pd_list *pd;
1814 int i, found, rv = EINVAL;
1815 uint8_t mbox[MFI_MBOX_SIZE];
1816 uint32_t cmd;
1817
1818 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1819 bs->bs_status);
1820
1821 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1822
1823 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1824 MFI_PD_LIST_SIZE, pd, NULL))
1825 goto done;
1826
1827 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1828 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1829 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1830 found = 1;
1831 break;
1832 }
1833
1834 if (!found)
1835 goto done;
1836
1837 memset(mbox, 0, sizeof mbox);
1838
1839 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1840
1841 switch (bs->bs_status) {
1842 case BIOC_SSONLINE:
1843 mbox[2] = MFI_PD_ONLINE;
1844 cmd = MD_DCMD_PD_SET_STATE;
1845 break;
1846
1847 case BIOC_SSOFFLINE:
1848 mbox[2] = MFI_PD_OFFLINE;
1849 cmd = MD_DCMD_PD_SET_STATE;
1850 break;
1851
1852 case BIOC_SSHOTSPARE:
1853 mbox[2] = MFI_PD_HOTSPARE;
1854 cmd = MD_DCMD_PD_SET_STATE;
1855 break;
1856 /*
1857 case BIOC_SSREBUILD:
1858 cmd = MD_DCMD_PD_REBUILD;
1859 break;
1860 */
1861 default:
1862 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1863 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1864 goto done;
1865 }
1866
1867
1868 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
1869 0, NULL, mbox))
1870 goto done;
1871
1872 rv = 0;
1873 done:
1874 free(pd, M_DEVBUF);
1875 return rv;
1876 }
1877
1878 static int
1879 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1880 {
1881 struct mfi_conf *cfg;
1882 struct mfi_hotspare *hs;
1883 struct mfi_pd_details *pd;
1884 struct bioc_disk *sdhs;
1885 struct bioc_vol *vdhs;
1886 struct scsipi_inquiry_data *inqbuf;
1887 char vend[8+16+4+1];
1888 int i, rv = EINVAL;
1889 uint32_t size;
1890 uint8_t mbox[MFI_MBOX_SIZE];
1891
1892 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1893
1894 if (!bio_hs)
1895 return EINVAL;
1896
1897 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1898
1899 /* send single element command to retrieve size for full structure */
1900 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1901 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1902 sizeof *cfg, cfg, NULL))
1903 goto freeme;
1904
1905 size = cfg->mfc_size;
1906 free(cfg, M_DEVBUF);
1907
1908 /* memory for read config */
1909 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
1910 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
1911 size, cfg, NULL))
1912 goto freeme;
1913
1914 /* calculate offset to hs structure */
1915 hs = (struct mfi_hotspare *)(
1916 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1917 cfg->mfc_array_size * cfg->mfc_no_array +
1918 cfg->mfc_ld_size * cfg->mfc_no_ld);
1919
1920 if (volid < cfg->mfc_no_ld)
1921 goto freeme; /* not a hotspare */
1922
1923 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1924 goto freeme; /* not a hotspare */
1925
1926 /* offset into hotspare structure */
1927 i = volid - cfg->mfc_no_ld;
1928
1929 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1930 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1931 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1932
1933 /* get pd fields */
1934 memset(mbox, 0, sizeof mbox);
1935 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1936 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1937 sizeof *pd, pd, mbox)) {
1938 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1939 DEVNAME(sc));
1940 goto freeme;
1941 }
1942
1943 switch (type) {
1944 case MFI_MGMT_VD:
1945 vdhs = bio_hs;
1946 vdhs->bv_status = BIOC_SVONLINE;
1947 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
1948 vdhs->bv_level = -1; /* hotspare */
1949 vdhs->bv_nodisk = 1;
1950 break;
1951
1952 case MFI_MGMT_SD:
1953 sdhs = bio_hs;
1954 sdhs->bd_status = BIOC_SDHOTSPARE;
1955 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
1956 sdhs->bd_channel = pd->mpd_enc_idx;
1957 sdhs->bd_target = pd->mpd_enc_slot;
1958 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1959 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1960 vend[sizeof vend - 1] = '\0';
1961 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1962 break;
1963
1964 default:
1965 goto freeme;
1966 }
1967
1968 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1969 rv = 0;
1970 freeme:
1971 free(pd, M_DEVBUF);
1972 free(cfg, M_DEVBUF);
1973
1974 return rv;
1975 }
1976
1977 static int
1978 mfi_destroy_sensors(struct mfi_softc *sc)
1979 {
1980 if (sc->sc_sme == NULL)
1981 return 0;
1982 sysmon_envsys_unregister(sc->sc_sme);
1983 sc->sc_sme = NULL;
1984 free(sc->sc_sensor, M_DEVBUF);
1985 return 0;
1986 }
1987
1988 static int
1989 mfi_create_sensors(struct mfi_softc *sc)
1990 {
1991 int i;
1992 int nsensors = sc->sc_ld_cnt;
1993 int rv;
1994
1995 sc->sc_sme = sysmon_envsys_create();
1996 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1997 M_DEVBUF, M_NOWAIT | M_ZERO);
1998 if (sc->sc_sensor == NULL) {
1999 aprint_error("%s: can't allocate envsys_data_t\n",
2000 DEVNAME(sc));
2001 return ENOMEM;
2002 }
2003
2004 for (i = 0; i < nsensors; i++) {
2005 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2006 sc->sc_sensor[i].monitor = true;
2007 /* Enable monitoring for drive state changes */
2008 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2009 /* logical drives */
2010 snprintf(sc->sc_sensor[i].desc,
2011 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2012 DEVNAME(sc), i);
2013 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2014 &sc->sc_sensor[i]))
2015 goto out;
2016 }
2017
2018 sc->sc_sme->sme_name = DEVNAME(sc);
2019 sc->sc_sme->sme_cookie = sc;
2020 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2021 rv = sysmon_envsys_register(sc->sc_sme);
2022 if (rv != 0) {
2023 aprint_error("%s: unable to register with sysmon (rv = %d)\n",
2024 DEVNAME(sc), rv);
2025 goto out;
2026 }
2027 return 0;
2028
2029 out:
2030 free(sc->sc_sensor, M_DEVBUF);
2031 sysmon_envsys_destroy(sc->sc_sme);
2032 sc->sc_sme = NULL;
2033 return EINVAL;
2034 }
2035
2036 static void
2037 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2038 {
2039 struct mfi_softc *sc = sme->sme_cookie;
2040 struct bioc_vol bv;
2041 int s;
2042 int error;
2043
2044 if (edata->sensor >= sc->sc_ld_cnt)
2045 return;
2046
2047 memset(&bv, 0, sizeof(bv));
2048 bv.bv_volid = edata->sensor;
2049 KERNEL_LOCK(1, curlwp);
2050 s = splbio();
2051 error = mfi_ioctl_vol(sc, &bv);
2052 splx(s);
2053 KERNEL_UNLOCK_ONE(curlwp);
2054 if (error)
2055 return;
2056
2057 switch(bv.bv_status) {
2058 case BIOC_SVOFFLINE:
2059 edata->value_cur = ENVSYS_DRIVE_FAIL;
2060 edata->state = ENVSYS_SCRITICAL;
2061 break;
2062
2063 case BIOC_SVDEGRADED:
2064 edata->value_cur = ENVSYS_DRIVE_PFAIL;
2065 edata->state = ENVSYS_SCRITICAL;
2066 break;
2067
2068 case BIOC_SVSCRUB:
2069 case BIOC_SVONLINE:
2070 edata->value_cur = ENVSYS_DRIVE_ONLINE;
2071 edata->state = ENVSYS_SVALID;
2072 break;
2073
2074 case BIOC_SVINVALID:
2075 /* FALLTRHOUGH */
2076 default:
2077 edata->value_cur = 0; /* unknown */
2078 edata->state = ENVSYS_SINVALID;
2079 }
2080 }
2081
2082 #endif /* NBIO > 0 */
2083
2084 static uint32_t
2085 mfi_xscale_fw_state(struct mfi_softc *sc)
2086 {
2087 return mfi_read(sc, MFI_OMSG0);
2088 }
2089
2090 static void
2091 mfi_xscale_intr_dis(struct mfi_softc *sc)
2092 {
2093 mfi_write(sc, MFI_OMSK, 0);
2094 }
2095
2096 static void
2097 mfi_xscale_intr_ena(struct mfi_softc *sc)
2098 {
2099 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2100 }
2101
2102 static int
2103 mfi_xscale_intr(struct mfi_softc *sc)
2104 {
2105 uint32_t status;
2106
2107 status = mfi_read(sc, MFI_OSTS);
2108 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2109 return 0;
2110
2111 /* write status back to acknowledge interrupt */
2112 mfi_write(sc, MFI_OSTS, status);
2113 return 1;
2114 }
2115
2116 static void
2117 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2118 {
2119 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2120 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2121 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2122 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2123 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2124 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2125
2126 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2127 ccb->ccb_extra_frames);
2128 }
2129
2130 static uint32_t
2131 mfi_ppc_fw_state(struct mfi_softc *sc)
2132 {
2133 return mfi_read(sc, MFI_OSP);
2134 }
2135
2136 static void
2137 mfi_ppc_intr_dis(struct mfi_softc *sc)
2138 {
2139 /* Taking a wild guess --dyoung */
2140 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2141 mfi_write(sc, MFI_ODC, 0xffffffff);
2142 }
2143
2144 static void
2145 mfi_ppc_intr_ena(struct mfi_softc *sc)
2146 {
2147 mfi_write(sc, MFI_ODC, 0xffffffff);
2148 mfi_write(sc, MFI_OMSK, ~0x80000004);
2149 }
2150
2151 static int
2152 mfi_ppc_intr(struct mfi_softc *sc)
2153 {
2154 uint32_t status;
2155
2156 status = mfi_read(sc, MFI_OSTS);
2157 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2158 return 0;
2159
2160 /* write status back to acknowledge interrupt */
2161 mfi_write(sc, MFI_ODC, status);
2162 return 1;
2163 }
2164
2165 static void
2166 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2167 {
2168 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2169 (ccb->ccb_extra_frames << 1));
2170 }
2171