mfi.c revision 1.6 1 /* $NetBSD: mfi.c,v 1.6 2007/07/01 07:37:14 xtraeme Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.6 2007/07/01 07:37:14 xtraeme Exp $");
21
22 #include "bio.h"
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <machine/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50
51 #ifdef MFI_DEBUG
52 uint32_t mfi_debug = 0
53 /* | MFI_D_CMD */
54 /* | MFI_D_INTR */
55 /* | MFI_D_MISC */
56 /* | MFI_D_DMA */
57 | MFI_D_IOCTL
58 /* | MFI_D_RW */
59 /* | MFI_D_MEM */
60 /* | MFI_D_CCB */
61 ;
62 #endif
63
64 void mfi_scsipi_request(struct scsipi_channel *,
65 scsipi_adapter_req_t, void *);
66 int mfi_scsi_ioctl(struct scsipi_channel *, u_long, void *, int,
67 struct proc *);
68 void mfiminphys(struct buf *bp);
69
70 struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
71 void mfi_put_ccb(struct mfi_ccb *);
72 int mfi_init_ccb(struct mfi_softc *);
73
74 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
75 void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
76
77 int mfi_transition_firmware(struct mfi_softc *);
78 int mfi_initialize_firmware(struct mfi_softc *);
79 int mfi_get_info(struct mfi_softc *);
80 uint32_t mfi_read(struct mfi_softc *, bus_size_t);
81 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
82 int mfi_poll(struct mfi_ccb *);
83 int mfi_despatch_cmd(struct mfi_ccb *);
84 int mfi_create_sgl(struct mfi_ccb *, int);
85
86 /* commands */
87 int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
88 int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, uint32_t,
89 uint32_t);
90 void mfi_scsi_xs_done(struct mfi_ccb *);
91 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
92 void *, uint8_t *);
93 void mfi_mgmt_done(struct mfi_ccb *);
94
95 #if NBIO > 0
96 int mfi_ioctl(struct device *, u_long, void *);
97 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
98 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
99 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
100 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
101 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
102 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
103 int mfi_bio_hs(struct mfi_softc *, int, int, void *);
104 int mfi_create_sensors(struct mfi_softc *);
105 int mfi_sensor_gtredata(struct sysmon_envsys *, envsys_data_t *);
106 #endif /* NBIO > 0 */
107
108 struct mfi_ccb *
109 mfi_get_ccb(struct mfi_softc *sc)
110 {
111 struct mfi_ccb *ccb;
112 int s;
113
114 s = splbio();
115 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
116 if (ccb) {
117 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
118 ccb->ccb_state = MFI_CCB_READY;
119 }
120 splx(s);
121
122 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
123
124 return (ccb);
125 }
126
127 void
128 mfi_put_ccb(struct mfi_ccb *ccb)
129 {
130 struct mfi_softc *sc = ccb->ccb_sc;
131 int s;
132
133 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
134
135 s = splbio();
136 ccb->ccb_state = MFI_CCB_FREE;
137 ccb->ccb_xs = NULL;
138 ccb->ccb_flags = 0;
139 ccb->ccb_done = NULL;
140 ccb->ccb_direction = 0;
141 ccb->ccb_frame_size = 0;
142 ccb->ccb_extra_frames = 0;
143 ccb->ccb_sgl = NULL;
144 ccb->ccb_data = NULL;
145 ccb->ccb_len = 0;
146 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
147 splx(s);
148 }
149
150 int
151 mfi_init_ccb(struct mfi_softc *sc)
152 {
153 struct mfi_ccb *ccb;
154 uint32_t i;
155 int error;
156
157 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
158
159 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
160 M_DEVBUF, M_WAITOK);
161 memset(sc->sc_ccb, 0, sizeof(struct mfi_ccb) * sc->sc_max_cmds);
162
163 for (i = 0; i < sc->sc_max_cmds; i++) {
164 ccb = &sc->sc_ccb[i];
165
166 ccb->ccb_sc = sc;
167
168 /* select i'th frame */
169 ccb->ccb_frame = (union mfi_frame *)
170 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
171 ccb->ccb_pframe =
172 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
173 ccb->ccb_frame->mfr_header.mfh_context = i;
174
175 /* select i'th sense */
176 ccb->ccb_sense = (struct mfi_sense *)
177 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
178 ccb->ccb_psense =
179 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
180
181 /* create a dma map for transfer */
182 error = bus_dmamap_create(sc->sc_dmat,
183 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
184 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
185 if (error) {
186 printf("%s: cannot create ccb dmamap (%d)\n",
187 DEVNAME(sc), error);
188 goto destroy;
189 }
190
191 DNPRINTF(MFI_D_CCB,
192 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
193 ccb->ccb_frame->mfr_header.mfh_context, ccb,
194 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
195 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
196 (u_long)ccb->ccb_dmamap);
197
198 /* add ccb to queue */
199 mfi_put_ccb(ccb);
200 }
201
202 return (0);
203 destroy:
204 /* free dma maps and ccb memory */
205 while (i) {
206 ccb = &sc->sc_ccb[i];
207 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
208 i--;
209 }
210
211 free(sc->sc_ccb, M_DEVBUF);
212
213 return (1);
214 }
215
216 uint32_t
217 mfi_read(struct mfi_softc *sc, bus_size_t r)
218 {
219 uint32_t rv;
220
221 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
222 BUS_SPACE_BARRIER_READ);
223 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
224
225 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
226 return (rv);
227 }
228
229 void
230 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
231 {
232 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
233
234 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
235 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
236 BUS_SPACE_BARRIER_WRITE);
237 }
238
239 struct mfi_mem *
240 mfi_allocmem(struct mfi_softc *sc, size_t size)
241 {
242 struct mfi_mem *mm;
243 int nsegs;
244
245 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
246 (long)size);
247
248 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT);
249 if (mm == NULL)
250 return (NULL);
251
252 memset(mm, 0, sizeof(struct mfi_mem));
253 mm->am_size = size;
254
255 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
256 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
257 goto amfree;
258
259 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
260 &nsegs, BUS_DMA_NOWAIT) != 0)
261 goto destroy;
262
263 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
264 BUS_DMA_NOWAIT) != 0)
265 goto free;
266
267 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
268 BUS_DMA_NOWAIT) != 0)
269 goto unmap;
270
271 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
272 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
273
274 memset(mm->am_kva, 0, size);
275 return (mm);
276
277 unmap:
278 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
279 free:
280 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
281 destroy:
282 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
283 amfree:
284 free(mm, M_DEVBUF);
285
286 return (NULL);
287 }
288
289 void
290 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
291 {
292 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
293
294 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
295 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
296 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
297 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
298 free(mm, M_DEVBUF);
299 }
300
301 int
302 mfi_transition_firmware(struct mfi_softc *sc)
303 {
304 int32_t fw_state, cur_state;
305 int max_wait, i;
306
307 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
308
309 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
310 fw_state);
311
312 while (fw_state != MFI_STATE_READY) {
313 DNPRINTF(MFI_D_MISC,
314 "%s: waiting for firmware to become ready\n",
315 DEVNAME(sc));
316 cur_state = fw_state;
317 switch (fw_state) {
318 case MFI_STATE_FAULT:
319 printf("%s: firmware fault\n", DEVNAME(sc));
320 return (1);
321 case MFI_STATE_WAIT_HANDSHAKE:
322 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
323 max_wait = 2;
324 break;
325 case MFI_STATE_OPERATIONAL:
326 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
327 max_wait = 10;
328 break;
329 case MFI_STATE_UNDEFINED:
330 case MFI_STATE_BB_INIT:
331 max_wait = 2;
332 break;
333 case MFI_STATE_FW_INIT:
334 case MFI_STATE_DEVICE_SCAN:
335 case MFI_STATE_FLUSH_CACHE:
336 max_wait = 20;
337 break;
338 default:
339 printf("%s: unknown firmware state %d\n",
340 DEVNAME(sc), fw_state);
341 return (1);
342 }
343 for (i = 0; i < (max_wait * 10); i++) {
344 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
345 if (fw_state == cur_state)
346 DELAY(100000);
347 else
348 break;
349 }
350 if (fw_state == cur_state) {
351 printf("%s: firmware stuck in state %#x\n",
352 DEVNAME(sc), fw_state);
353 return (1);
354 }
355 }
356
357 return (0);
358 }
359
360 int
361 mfi_initialize_firmware(struct mfi_softc *sc)
362 {
363 struct mfi_ccb *ccb;
364 struct mfi_init_frame *init;
365 struct mfi_init_qinfo *qinfo;
366
367 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
368
369 if ((ccb = mfi_get_ccb(sc)) == NULL)
370 return (1);
371
372 init = &ccb->ccb_frame->mfr_init;
373 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
374
375 memset(qinfo, 0, sizeof *qinfo);
376 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
377 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
378 offsetof(struct mfi_prod_cons, mpc_reply_q));
379 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
380 offsetof(struct mfi_prod_cons, mpc_producer));
381 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
382 offsetof(struct mfi_prod_cons, mpc_consumer));
383
384 init->mif_header.mfh_cmd = MFI_CMD_INIT;
385 init->mif_header.mfh_data_len = sizeof *qinfo;
386 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
387
388 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
389 DEVNAME(sc),
390 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
391 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
392
393 if (mfi_poll(ccb)) {
394 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
395 return (1);
396 }
397
398 mfi_put_ccb(ccb);
399
400 return (0);
401 }
402
403 int
404 mfi_get_info(struct mfi_softc *sc)
405 {
406 #ifdef MFI_DEBUG
407 int i;
408 #endif
409 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
410
411 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
412 sizeof(sc->sc_info), &sc->sc_info, NULL))
413 return (1);
414
415 #ifdef MFI_DEBUG
416
417 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
418 printf("%s: active FW %s Version %s date %s time %s\n",
419 DEVNAME(sc),
420 sc->sc_info.mci_image_component[i].mic_name,
421 sc->sc_info.mci_image_component[i].mic_version,
422 sc->sc_info.mci_image_component[i].mic_build_date,
423 sc->sc_info.mci_image_component[i].mic_build_time);
424 }
425
426 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
427 printf("%s: pending FW %s Version %s date %s time %s\n",
428 DEVNAME(sc),
429 sc->sc_info.mci_pending_image_component[i].mic_name,
430 sc->sc_info.mci_pending_image_component[i].mic_version,
431 sc->sc_info.mci_pending_image_component[i].mic_build_date,
432 sc->sc_info.mci_pending_image_component[i].mic_build_time);
433 }
434
435 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
436 DEVNAME(sc),
437 sc->sc_info.mci_max_arms,
438 sc->sc_info.mci_max_spans,
439 sc->sc_info.mci_max_arrays,
440 sc->sc_info.mci_max_lds,
441 sc->sc_info.mci_product_name);
442
443 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
444 DEVNAME(sc),
445 sc->sc_info.mci_serial_number,
446 sc->sc_info.mci_hw_present,
447 sc->sc_info.mci_current_fw_time,
448 sc->sc_info.mci_max_cmds,
449 sc->sc_info.mci_max_sg_elements);
450
451 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
452 DEVNAME(sc),
453 sc->sc_info.mci_max_request_size,
454 sc->sc_info.mci_lds_present,
455 sc->sc_info.mci_lds_degraded,
456 sc->sc_info.mci_lds_offline,
457 sc->sc_info.mci_pd_present);
458
459 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
460 DEVNAME(sc),
461 sc->sc_info.mci_pd_disks_present,
462 sc->sc_info.mci_pd_disks_pred_failure,
463 sc->sc_info.mci_pd_disks_failed);
464
465 printf("%s: nvram %d mem %d flash %d\n",
466 DEVNAME(sc),
467 sc->sc_info.mci_nvram_size,
468 sc->sc_info.mci_memory_size,
469 sc->sc_info.mci_flash_size);
470
471 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
472 DEVNAME(sc),
473 sc->sc_info.mci_ram_correctable_errors,
474 sc->sc_info.mci_ram_uncorrectable_errors,
475 sc->sc_info.mci_cluster_allowed,
476 sc->sc_info.mci_cluster_active);
477
478 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
479 DEVNAME(sc),
480 sc->sc_info.mci_max_strips_per_io,
481 sc->sc_info.mci_raid_levels,
482 sc->sc_info.mci_adapter_ops,
483 sc->sc_info.mci_ld_ops);
484
485 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
486 DEVNAME(sc),
487 sc->sc_info.mci_stripe_sz_ops.min,
488 sc->sc_info.mci_stripe_sz_ops.max,
489 sc->sc_info.mci_pd_ops,
490 sc->sc_info.mci_pd_mix_support);
491
492 printf("%s: ecc_bucket %d pckg_prop %s\n",
493 DEVNAME(sc),
494 sc->sc_info.mci_ecc_bucket_count,
495 sc->sc_info.mci_package_version);
496
497 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
498 DEVNAME(sc),
499 sc->sc_info.mci_properties.mcp_seq_num,
500 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
501 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
502 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
503
504 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
505 DEVNAME(sc),
506 sc->sc_info.mci_properties.mcp_rebuild_rate,
507 sc->sc_info.mci_properties.mcp_patrol_read_rate,
508 sc->sc_info.mci_properties.mcp_bgi_rate,
509 sc->sc_info.mci_properties.mcp_cc_rate);
510
511 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
512 DEVNAME(sc),
513 sc->sc_info.mci_properties.mcp_recon_rate,
514 sc->sc_info.mci_properties.mcp_cache_flush_interval,
515 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
516 sc->sc_info.mci_properties.mcp_spinup_delay,
517 sc->sc_info.mci_properties.mcp_cluster_enable);
518
519 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
520 DEVNAME(sc),
521 sc->sc_info.mci_properties.mcp_coercion_mode,
522 sc->sc_info.mci_properties.mcp_alarm_enable,
523 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
524 sc->sc_info.mci_properties.mcp_disable_battery_warn,
525 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
526
527 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
528 DEVNAME(sc),
529 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
530 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
531 sc->sc_info.mci_properties.mcp_expose_encl_devices);
532
533 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
534 DEVNAME(sc),
535 sc->sc_info.mci_pci.mip_vendor,
536 sc->sc_info.mci_pci.mip_device,
537 sc->sc_info.mci_pci.mip_subvendor,
538 sc->sc_info.mci_pci.mip_subdevice);
539
540 printf("%s: type %#x port_count %d port_addr ",
541 DEVNAME(sc),
542 sc->sc_info.mci_host.mih_type,
543 sc->sc_info.mci_host.mih_port_count);
544
545 for (i = 0; i < 8; i++)
546 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]);
547 printf("\n");
548
549 printf("%s: type %.x port_count %d port_addr ",
550 DEVNAME(sc),
551 sc->sc_info.mci_device.mid_type,
552 sc->sc_info.mci_device.mid_port_count);
553
554 for (i = 0; i < 8; i++)
555 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]);
556 printf("\n");
557 #endif /* MFI_DEBUG */
558
559 return (0);
560 }
561
562 void
563 mfiminphys(struct buf *bp)
564 {
565 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
566
567 /* XXX currently using MFI_MAXFER = MAXPHYS */
568 if (bp->b_bcount > MFI_MAXFER)
569 bp->b_bcount = MFI_MAXFER;
570 minphys(bp);
571 }
572
573 int
574 mfi_attach(struct mfi_softc *sc)
575 {
576 struct scsipi_adapter *adapt = &sc->sc_adapt;
577 struct scsipi_channel *chan = &sc->sc_chan;
578 uint32_t status, frames;
579 int i;
580
581 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
582
583 if (mfi_transition_firmware(sc))
584 return (1);
585
586 TAILQ_INIT(&sc->sc_ccb_freeq);
587
588 status = mfi_read(sc, MFI_OMSG0);
589 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
590 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
591 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
592 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
593
594 /* consumer/producer and reply queue memory */
595 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
596 sizeof(struct mfi_prod_cons));
597 if (sc->sc_pcq == NULL) {
598 aprint_error("%s: unable to allocate reply queue memory\n",
599 DEVNAME(sc));
600 goto nopcq;
601 }
602 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
603 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
605
606 /* frame memory */
607 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
608 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
609 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
610 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
611 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
612 if (sc->sc_frames == NULL) {
613 aprint_error("%s: unable to allocate frame memory\n",
614 DEVNAME(sc));
615 goto noframe;
616 }
617 /* XXX hack, fix this */
618 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
619 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
620 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
621 goto noframe;
622 }
623
624 /* sense memory */
625 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
626 if (sc->sc_sense == NULL) {
627 aprint_error("%s: unable to allocate sense memory\n",
628 DEVNAME(sc));
629 goto nosense;
630 }
631
632 /* now that we have all memory bits go initialize ccbs */
633 if (mfi_init_ccb(sc)) {
634 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
635 goto noinit;
636 }
637
638 /* kickstart firmware with all addresses and pointers */
639 if (mfi_initialize_firmware(sc)) {
640 aprint_error("%s: could not initialize firmware\n",
641 DEVNAME(sc));
642 goto noinit;
643 }
644
645 if (mfi_get_info(sc)) {
646 aprint_error("%s: could not retrieve controller information\n",
647 DEVNAME(sc));
648 goto noinit;
649 }
650
651 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
652 DEVNAME(sc),
653 sc->sc_info.mci_lds_present,
654 sc->sc_info.mci_package_version,
655 sc->sc_info.mci_memory_size);
656
657 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
658 sc->sc_max_ld = sc->sc_ld_cnt;
659 for (i = 0; i < sc->sc_ld_cnt; i++)
660 sc->sc_ld[i].ld_present = 1;
661
662 memset(adapt, 0, sizeof(*adapt));
663 adapt->adapt_dev = &sc->sc_dev;
664 adapt->adapt_nchannels = 1;
665 if (sc->sc_ld_cnt)
666 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
667 else
668 adapt->adapt_openings = sc->sc_max_cmds;
669 adapt->adapt_max_periph = adapt->adapt_openings;
670 adapt->adapt_request = mfi_scsipi_request;
671 adapt->adapt_minphys = mfiminphys;
672 adapt->adapt_ioctl = mfi_scsi_ioctl;
673
674 memset(chan, 0, sizeof(*chan));
675 chan->chan_adapter = adapt;
676 chan->chan_bustype = &scsi_bustype;
677 chan->chan_channel = 0;
678 chan->chan_flags = 0;
679 chan->chan_nluns = 8;
680 chan->chan_ntargets = MFI_MAX_LD;
681 chan->chan_id = MFI_MAX_LD;
682
683 (void) config_found(&sc->sc_dev, &sc->sc_chan, scsiprint);
684
685 /* enable interrupts */
686 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
687
688 #if NBIO > 0
689 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
690 panic("%s: controller registration failed", DEVNAME(sc));
691 else
692 sc->sc_ioctl = mfi_ioctl;
693 if (mfi_create_sensors(sc) != 0)
694 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
695 #endif /* NBIO > 0 */
696
697 return (0);
698 noinit:
699 mfi_freemem(sc, sc->sc_sense);
700 nosense:
701 mfi_freemem(sc, sc->sc_frames);
702 noframe:
703 mfi_freemem(sc, sc->sc_pcq);
704 nopcq:
705 return (1);
706 }
707
708 int
709 mfi_despatch_cmd(struct mfi_ccb *ccb)
710 {
711 struct mfi_softc *sc = ccb->ccb_sc;
712 DNPRINTF(MFI_D_CMD, "%s: mfi_despatch_cmd\n", DEVNAME(sc));
713
714 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
715 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
716 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
717 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
718 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
719 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
720
721 mfi_write(ccb->ccb_sc, MFI_IQP, htole32((ccb->ccb_pframe >> 3) |
722 ccb->ccb_extra_frames));
723
724 return(0);
725 }
726
727 int
728 mfi_poll(struct mfi_ccb *ccb)
729 {
730 struct mfi_softc *sc = ccb->ccb_sc;
731 struct mfi_frame_header *hdr;
732 int to = 0;
733
734 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
735
736 hdr = &ccb->ccb_frame->mfr_header;
737 hdr->mfh_cmd_status = 0xff;
738 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
739
740 mfi_despatch_cmd(ccb);
741 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
742 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
743 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
744
745 while (hdr->mfh_cmd_status == 0xff) {
746 delay(1000);
747 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
748 break;
749 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
750 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
751 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
752 }
753 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
754 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
755 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
756
757 if (ccb->ccb_data != NULL) {
758 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
759 DEVNAME(sc));
760 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
761 ccb->ccb_dmamap->dm_mapsize,
762 (ccb->ccb_direction & MFI_DATA_IN) ?
763 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
764
765 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
766 }
767
768 if (hdr->mfh_cmd_status == 0xff) {
769 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
770 hdr->mfh_context);
771 ccb->ccb_flags |= MFI_CCB_F_ERR;
772 return (1);
773 }
774
775 return (0);
776 }
777
778 int
779 mfi_intr(void *arg)
780 {
781 struct mfi_softc *sc = arg;
782 struct mfi_prod_cons *pcq;
783 struct mfi_ccb *ccb;
784 uint32_t status, producer, consumer, ctx;
785 int claimed = 0;
786
787 status = mfi_read(sc, MFI_OSTS);
788 if ((status & MFI_OSTS_INTR_VALID) == 0)
789 return (claimed);
790 /* write status back to acknowledge interrupt */
791 mfi_write(sc, MFI_OSTS, status);
792
793 pcq = MFIMEM_KVA(sc->sc_pcq);
794
795 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
796 (u_long)sc, (u_long)pcq);
797
798 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
799 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
800 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
801
802 producer = pcq->mpc_producer;
803 consumer = pcq->mpc_consumer;
804
805 while (consumer != producer) {
806 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
807 DEVNAME(sc), producer, consumer);
808
809 ctx = pcq->mpc_reply_q[consumer];
810 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
811 if (ctx == MFI_INVALID_CTX)
812 printf("%s: invalid context, p: %d c: %d\n",
813 DEVNAME(sc), producer, consumer);
814 else {
815 /* XXX remove from queue and call scsi_done */
816 ccb = &sc->sc_ccb[ctx];
817 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
818 DEVNAME(sc), ctx);
819 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
820 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
821 sc->sc_frames_size,
822 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
823 ccb->ccb_done(ccb);
824
825 claimed = 1;
826 }
827 consumer++;
828 if (consumer == (sc->sc_max_cmds + 1))
829 consumer = 0;
830 }
831
832 pcq->mpc_consumer = consumer;
833 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
834 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
836
837 return (claimed);
838 }
839
840 int
841 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
842 uint32_t blockcnt)
843 {
844 struct scsipi_periph *periph = xs->xs_periph;
845 struct mfi_io_frame *io;
846
847 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
848 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
849 periph->periph_target);
850
851 if (!xs->data)
852 return (1);
853
854 io = &ccb->ccb_frame->mfr_io;
855 if (xs->xs_control & XS_CTL_DATA_IN) {
856 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
857 ccb->ccb_direction = MFI_DATA_IN;
858 } else {
859 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
860 ccb->ccb_direction = MFI_DATA_OUT;
861 }
862 io->mif_header.mfh_target_id = periph->periph_target;
863 io->mif_header.mfh_timeout = 0;
864 io->mif_header.mfh_flags = 0;
865 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
866 io->mif_header.mfh_data_len= blockcnt;
867 io->mif_lba_hi = 0;
868 io->mif_lba_lo = blockno;
869 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
870 io->mif_sense_addr_hi = 0;
871
872 ccb->ccb_done = mfi_scsi_xs_done;
873 ccb->ccb_xs = xs;
874 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
875 ccb->ccb_sgl = &io->mif_sgl;
876 ccb->ccb_data = xs->data;
877 ccb->ccb_len = xs->datalen;
878
879 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
880 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
881 return (1);
882
883 return (0);
884 }
885
886 void
887 mfi_scsi_xs_done(struct mfi_ccb *ccb)
888 {
889 struct scsipi_xfer *xs = ccb->ccb_xs;
890 struct mfi_softc *sc = ccb->ccb_sc;
891 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
892
893 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
894 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
895
896 if (xs->data != NULL) {
897 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
898 DEVNAME(sc));
899 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
900 ccb->ccb_dmamap->dm_mapsize,
901 (xs->xs_control & XS_CTL_DATA_IN) ?
902 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
903
904 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
905 }
906
907 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
908 xs->error = XS_DRIVER_STUFFUP;
909 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
910 DEVNAME(sc), hdr->mfh_cmd_status);
911
912 if (hdr->mfh_scsi_status != 0) {
913 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
914 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
915 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
916 DNPRINTF(MFI_D_INTR,
917 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
918 DEVNAME(sc), hdr->mfh_scsi_status,
919 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
920 memset(&xs->sense, 0, sizeof(xs->sense));
921 memcpy(&xs->sense, ccb->ccb_sense,
922 sizeof(struct scsi_sense_data));
923 xs->error = XS_SENSE;
924 }
925 } else {
926 xs->error = XS_NOERROR;
927 xs->status = SCSI_OK;
928 xs->resid = 0;
929 }
930
931 mfi_put_ccb(ccb);
932 scsipi_done(xs);
933 }
934
935 int
936 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
937 {
938 struct mfi_pass_frame *pf;
939 struct scsipi_periph *periph = xs->xs_periph;
940
941 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
942 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
943 periph->periph_target);
944
945 pf = &ccb->ccb_frame->mfr_pass;
946 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
947 pf->mpf_header.mfh_target_id = periph->periph_target;
948 pf->mpf_header.mfh_lun_id = 0;
949 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
950 pf->mpf_header.mfh_timeout = 0;
951 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
952 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
953
954 pf->mpf_sense_addr_hi = 0;
955 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
956
957 memset(pf->mpf_cdb, 0, 16);
958 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
959
960 ccb->ccb_done = mfi_scsi_xs_done;
961 ccb->ccb_xs = xs;
962 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
963 ccb->ccb_sgl = &pf->mpf_sgl;
964
965 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
966 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
967 MFI_DATA_IN : MFI_DATA_OUT;
968 else
969 ccb->ccb_direction = MFI_DATA_NONE;
970
971 if (xs->data) {
972 ccb->ccb_data = xs->data;
973 ccb->ccb_len = xs->datalen;
974
975 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
976 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
977 return (1);
978 }
979
980 return (0);
981 }
982
983 void
984 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
985 void *arg)
986 {
987 struct scsipi_periph *periph;
988 struct scsipi_xfer *xs;
989 struct scsipi_adapter *adapt = chan->chan_adapter;
990 struct mfi_softc *sc = (void *) adapt->adapt_dev;
991 struct mfi_ccb *ccb;
992 struct scsi_rw_6 *rw;
993 struct scsipi_rw_10 *rwb;
994 uint32_t blockno, blockcnt;
995 uint8_t target;
996 uint8_t mbox[MFI_MBOX_SIZE];
997 int s;
998
999 switch (req) {
1000 case ADAPTER_REQ_GROW_RESOURCES:
1001 /* Not supported. */
1002 return;
1003 case ADAPTER_REQ_SET_XFER_MODE:
1004 /* Not supported. */
1005 return;
1006 case ADAPTER_REQ_RUN_XFER:
1007 break;
1008 }
1009
1010 xs = arg;
1011
1012 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1013 DEVNAME(sc), req, xs->cmd->opcode);
1014
1015 periph = xs->xs_periph;
1016 target = periph->periph_target;
1017
1018 s = splbio();
1019 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1020 periph->periph_lun != 0) {
1021 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1022 DEVNAME(sc), target);
1023 xs->error = XS_SELTIMEOUT;
1024 scsipi_done(xs);
1025 splx(s);
1026 return;
1027 }
1028
1029 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1030 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1031 xs->error = XS_RESOURCE_SHORTAGE;
1032 scsipi_done(xs);
1033 splx(s);
1034 return;
1035 }
1036
1037 switch (xs->cmd->opcode) {
1038 /* IO path */
1039 case READ_10:
1040 case WRITE_10:
1041 rwb = (struct scsipi_rw_10 *)xs->cmd;
1042 blockno = _4btol(rwb->addr);
1043 blockcnt = _2btol(rwb->length);
1044 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1045 mfi_put_ccb(ccb);
1046 goto stuffup;
1047 }
1048 break;
1049
1050 case SCSI_READ_6_COMMAND:
1051 case SCSI_WRITE_6_COMMAND:
1052 rw = (struct scsi_rw_6 *)xs->cmd;
1053 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1054 blockcnt = rw->length ? rw->length : 0x100;
1055 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1056 mfi_put_ccb(ccb);
1057 goto stuffup;
1058 }
1059 break;
1060
1061 case SCSI_SYNCHRONIZE_CACHE_10:
1062 mfi_put_ccb(ccb); /* we don't need this */
1063
1064 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1065 if (mfi_mgmt(sc, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE,
1066 0, NULL, mbox))
1067 goto stuffup;
1068 xs->error = XS_NOERROR;
1069 xs->status = SCSI_OK;
1070 xs->resid = 0;
1071 scsipi_done(xs);
1072 splx(s);
1073 return;
1074 /* NOTREACHED */
1075
1076 /* hand it of to the firmware and let it deal with it */
1077 case SCSI_TEST_UNIT_READY:
1078 /* save off sd? after autoconf */
1079 if (!cold) /* XXX bogus */
1080 strlcpy(sc->sc_ld[target].ld_dev, sc->sc_dev.dv_xname,
1081 sizeof(sc->sc_ld[target].ld_dev));
1082 /* FALLTHROUGH */
1083
1084 default:
1085 if (mfi_scsi_ld(ccb, xs)) {
1086 mfi_put_ccb(ccb);
1087 goto stuffup;
1088 }
1089 break;
1090 }
1091
1092 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1093
1094 if (xs->xs_control & XS_CTL_POLL) {
1095 if (mfi_poll(ccb)) {
1096 /* XXX check for sense in ccb->ccb_sense? */
1097 printf("%s: mfi_scsipi_request poll failed\n",
1098 DEVNAME(sc));
1099 mfi_put_ccb(ccb);
1100 bzero(&xs->sense, sizeof(xs->sense));
1101 xs->sense.scsi_sense.response_code =
1102 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1103 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1104 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1105 xs->error = XS_SENSE;
1106 xs->status = SCSI_CHECK;
1107 } else {
1108 DNPRINTF(MFI_D_DMA,
1109 "%s: mfi_scsipi_request poll complete %d\n",
1110 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1111 xs->error = XS_NOERROR;
1112 xs->status = SCSI_OK;
1113 xs->resid = 0;
1114 }
1115 mfi_put_ccb(ccb);
1116 scsipi_done(xs);
1117 splx(s);
1118 return;
1119 }
1120
1121 mfi_despatch_cmd(ccb);
1122
1123 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1124 ccb->ccb_dmamap->dm_nsegs);
1125
1126 splx(s);
1127 return;
1128
1129 stuffup:
1130 xs->error = XS_DRIVER_STUFFUP;
1131 scsipi_done(xs);
1132 splx(s);
1133 }
1134
1135 int
1136 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1137 {
1138 struct mfi_softc *sc = ccb->ccb_sc;
1139 struct mfi_frame_header *hdr;
1140 bus_dma_segment_t *sgd;
1141 union mfi_sgl *sgl;
1142 int error, i;
1143
1144 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1145 (u_long)ccb->ccb_data);
1146
1147 if (!ccb->ccb_data)
1148 return (1);
1149
1150 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1151 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1152 if (error) {
1153 if (error == EFBIG)
1154 printf("more than %d dma segs\n",
1155 sc->sc_max_sgl);
1156 else
1157 printf("error %d loading dma map\n", error);
1158 return (1);
1159 }
1160
1161 hdr = &ccb->ccb_frame->mfr_header;
1162 sgl = ccb->ccb_sgl;
1163 sgd = ccb->ccb_dmamap->dm_segs;
1164 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1165 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1166 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1167 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1168 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1169 }
1170
1171 if (ccb->ccb_direction == MFI_DATA_IN) {
1172 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1173 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1174 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1175 } else {
1176 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1177 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1178 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1179 }
1180
1181 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1182 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1183 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1184 ccb->ccb_dmamap->dm_nsegs;
1185 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1186
1187 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1188 " dm_nsegs: %d extra_frames: %d\n",
1189 DEVNAME(sc),
1190 hdr->mfh_sg_count,
1191 ccb->ccb_frame_size,
1192 sc->sc_frames_size,
1193 ccb->ccb_dmamap->dm_nsegs,
1194 ccb->ccb_extra_frames);
1195
1196 return (0);
1197 }
1198
1199 int
1200 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1201 void *buf, uint8_t *mbox)
1202 {
1203 struct mfi_ccb *ccb;
1204 struct mfi_dcmd_frame *dcmd;
1205 int rv = 1;
1206
1207 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(sc), opc);
1208
1209 if ((ccb = mfi_get_ccb(sc)) == NULL)
1210 return (rv);
1211
1212 dcmd = &ccb->ccb_frame->mfr_dcmd;
1213 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1214 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1215 dcmd->mdf_header.mfh_timeout = 0;
1216
1217 dcmd->mdf_opcode = opc;
1218 dcmd->mdf_header.mfh_data_len = 0;
1219 ccb->ccb_direction = dir;
1220 ccb->ccb_done = mfi_mgmt_done;
1221
1222 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1223
1224 /* handle special opcodes */
1225 if (mbox)
1226 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1227
1228 if (dir != MFI_DATA_NONE) {
1229 dcmd->mdf_header.mfh_data_len = len;
1230 ccb->ccb_data = buf;
1231 ccb->ccb_len = len;
1232 ccb->ccb_sgl = &dcmd->mdf_sgl;
1233
1234 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1235 goto done;
1236 }
1237
1238 if (cold) {
1239 if (mfi_poll(ccb))
1240 goto done;
1241 } else {
1242 mfi_despatch_cmd(ccb);
1243
1244 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt sleeping\n", DEVNAME(sc));
1245 while (ccb->ccb_state != MFI_CCB_DONE)
1246 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1247
1248 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1249 goto done;
1250 }
1251
1252 rv = 0;
1253
1254 done:
1255 mfi_put_ccb(ccb);
1256 return (rv);
1257 }
1258
1259 void
1260 mfi_mgmt_done(struct mfi_ccb *ccb)
1261 {
1262 struct mfi_softc *sc = ccb->ccb_sc;
1263 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1264
1265 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1266 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1267
1268 if (ccb->ccb_data != NULL) {
1269 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1270 DEVNAME(sc));
1271 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1272 ccb->ccb_dmamap->dm_mapsize,
1273 (ccb->ccb_direction & MFI_DATA_IN) ?
1274 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1275
1276 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1277 }
1278
1279 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1280 ccb->ccb_flags |= MFI_CCB_F_ERR;
1281
1282 ccb->ccb_state = MFI_CCB_DONE;
1283
1284 wakeup(ccb);
1285 }
1286
1287
1288 int
1289 mfi_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1290 int flag, struct proc *p)
1291 {
1292 return (ENOTTY);
1293 }
1294
1295 #if NBIO > 0
1296 int
1297 mfi_ioctl(struct device *dev, u_long cmd, void *addr)
1298 {
1299 struct mfi_softc *sc = (struct mfi_softc *)dev;
1300 int error = 0;
1301
1302 int s = splbio();
1303 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1304
1305 switch (cmd) {
1306 case BIOCINQ:
1307 DNPRINTF(MFI_D_IOCTL, "inq\n");
1308 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1309 break;
1310
1311 case BIOCVOL:
1312 DNPRINTF(MFI_D_IOCTL, "vol\n");
1313 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1314 break;
1315
1316 case BIOCDISK:
1317 DNPRINTF(MFI_D_IOCTL, "disk\n");
1318 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1319 break;
1320
1321 case BIOCALARM:
1322 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1323 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1324 break;
1325
1326 case BIOCBLINK:
1327 DNPRINTF(MFI_D_IOCTL, "blink\n");
1328 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1329 break;
1330
1331 case BIOCSETSTATE:
1332 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1333 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1334 break;
1335
1336 default:
1337 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1338 error = EINVAL;
1339 }
1340 splx(s);
1341 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
1342 return (error);
1343 }
1344
1345 int
1346 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1347 {
1348 struct mfi_conf *cfg;
1349 int rv = EINVAL;
1350
1351 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1352
1353 if (mfi_get_info(sc)) {
1354 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1355 DEVNAME(sc));
1356 return (EIO);
1357 }
1358
1359 /* get figures */
1360 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1361 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1362 goto freeme;
1363
1364 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1365 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1366 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1367
1368 rv = 0;
1369 freeme:
1370 free(cfg, M_DEVBUF);
1371 return (rv);
1372 }
1373
1374 int
1375 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1376 {
1377 int i, per, rv = EINVAL;
1378 uint8_t mbox[MFI_MBOX_SIZE];
1379
1380 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1381 DEVNAME(sc), bv->bv_volid);
1382
1383 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1384 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1385 goto done;
1386
1387 i = bv->bv_volid;
1388 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1389 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1390 DEVNAME(sc), mbox[0]);
1391
1392 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1393 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1394 goto done;
1395
1396 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1397 /* go do hotspares */
1398 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1399 goto done;
1400 }
1401
1402 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1403
1404 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1405 case MFI_LD_OFFLINE:
1406 bv->bv_status = BIOC_SVOFFLINE;
1407 break;
1408
1409 case MFI_LD_PART_DEGRADED:
1410 case MFI_LD_DEGRADED:
1411 bv->bv_status = BIOC_SVDEGRADED;
1412 break;
1413
1414 case MFI_LD_ONLINE:
1415 bv->bv_status = BIOC_SVONLINE;
1416 break;
1417
1418 default:
1419 bv->bv_status = BIOC_SVINVALID;
1420 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1421 DEVNAME(sc),
1422 sc->sc_ld_list.mll_list[i].mll_state);
1423 }
1424
1425 /* additional status can modify MFI status */
1426 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1427 case MFI_LD_PROG_CC:
1428 case MFI_LD_PROG_BGI:
1429 bv->bv_status = BIOC_SVSCRUB;
1430 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1431 bv->bv_percent = (per * 100) / 0xffff;
1432 bv->bv_seconds =
1433 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1434 break;
1435
1436 case MFI_LD_PROG_FGI:
1437 case MFI_LD_PROG_RECONSTRUCT:
1438 /* nothing yet */
1439 break;
1440 }
1441
1442 /*
1443 * The RAID levels are determined per the SNIA DDF spec, this is only
1444 * a subset that is valid for the MFI contrller.
1445 */
1446 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1447 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1448 MFI_DDF_SRL_SPANNED)
1449 bv->bv_level *= 10;
1450
1451 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1452 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1453
1454 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1455
1456 rv = 0;
1457 done:
1458 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
1459 DEVNAME(sc), rv);
1460 return (rv);
1461 }
1462
1463 int
1464 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1465 {
1466 struct mfi_conf *cfg;
1467 struct mfi_array *ar;
1468 struct mfi_ld_cfg *ld;
1469 struct mfi_pd_details *pd;
1470 struct scsipi_inquiry_data *inqbuf;
1471 char vend[8+16+4+1];
1472 int i, rv = EINVAL;
1473 int arr, vol, disk;
1474 uint32_t size;
1475 uint8_t mbox[MFI_MBOX_SIZE];
1476
1477 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1478 DEVNAME(sc), bd->bd_diskid);
1479
1480 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1481
1482 /* send single element command to retrieve size for full structure */
1483 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1484 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1485 goto freeme;
1486
1487 size = cfg->mfc_size;
1488 free(cfg, M_DEVBUF);
1489
1490 /* memory for read config */
1491 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1492 memset(cfg, 0, size);
1493 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1494 goto freeme;
1495
1496 ar = cfg->mfc_array;
1497
1498 /* calculate offset to ld structure */
1499 ld = (struct mfi_ld_cfg *)(
1500 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1501 cfg->mfc_array_size * cfg->mfc_no_array);
1502
1503 vol = bd->bd_volid;
1504
1505 if (vol >= cfg->mfc_no_ld) {
1506 /* do hotspares */
1507 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1508 goto freeme;
1509 }
1510
1511 /* find corresponding array for ld */
1512 for (i = 0, arr = 0; i < vol; i++)
1513 arr += ld[i].mlc_parm.mpa_span_depth;
1514
1515 /* offset disk into pd list */
1516 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1517
1518 /* offset array index into the next spans */
1519 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1520
1521 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1522 switch (ar[arr].pd[disk].mar_pd_state){
1523 case MFI_PD_UNCONFIG_GOOD:
1524 bd->bd_status = BIOC_SDUNUSED;
1525 break;
1526
1527 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1528 bd->bd_status = BIOC_SDHOTSPARE;
1529 break;
1530
1531 case MFI_PD_OFFLINE:
1532 bd->bd_status = BIOC_SDOFFLINE;
1533 break;
1534
1535 case MFI_PD_FAILED:
1536 bd->bd_status = BIOC_SDFAILED;
1537 break;
1538
1539 case MFI_PD_REBUILD:
1540 bd->bd_status = BIOC_SDREBUILD;
1541 break;
1542
1543 case MFI_PD_ONLINE:
1544 bd->bd_status = BIOC_SDONLINE;
1545 break;
1546
1547 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1548 default:
1549 bd->bd_status = BIOC_SDINVALID;
1550 break;
1551
1552 }
1553
1554 /* get the remaining fields */
1555 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1556 memset(pd, 0, sizeof(*pd));
1557 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1558 sizeof *pd, pd, mbox))
1559 goto freeme;
1560
1561 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1562
1563 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1564 bd->bd_channel = pd->mpd_enc_idx;
1565
1566 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1567 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1568 vend[sizeof vend - 1] = '\0';
1569 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1570
1571 /* XXX find a way to retrieve serial nr from drive */
1572 /* XXX find a way to get bd_procdev */
1573
1574 rv = 0;
1575 freeme:
1576 free(pd, M_DEVBUF);
1577 free(cfg, M_DEVBUF);
1578
1579 return (rv);
1580 }
1581
1582 int
1583 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1584 {
1585 uint32_t opc, dir = MFI_DATA_NONE;
1586 int rv = 0;
1587 int8_t ret;
1588
1589 switch(ba->ba_opcode) {
1590 case BIOC_SADISABLE:
1591 opc = MR_DCMD_SPEAKER_DISABLE;
1592 break;
1593
1594 case BIOC_SAENABLE:
1595 opc = MR_DCMD_SPEAKER_ENABLE;
1596 break;
1597
1598 case BIOC_SASILENCE:
1599 opc = MR_DCMD_SPEAKER_SILENCE;
1600 break;
1601
1602 case BIOC_GASTATUS:
1603 opc = MR_DCMD_SPEAKER_GET;
1604 dir = MFI_DATA_IN;
1605 break;
1606
1607 case BIOC_SATEST:
1608 opc = MR_DCMD_SPEAKER_TEST;
1609 break;
1610
1611 default:
1612 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1613 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1614 return (EINVAL);
1615 }
1616
1617 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1618 rv = EINVAL;
1619 else
1620 if (ba->ba_opcode == BIOC_GASTATUS)
1621 ba->ba_status = ret;
1622 else
1623 ba->ba_status = 0;
1624
1625 return (rv);
1626 }
1627
1628 int
1629 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1630 {
1631 int i, found, rv = EINVAL;
1632 uint8_t mbox[MFI_MBOX_SIZE];
1633 uint32_t cmd;
1634 struct mfi_pd_list *pd;
1635
1636 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1637 bb->bb_status);
1638
1639 /* channel 0 means not in an enclosure so can't be blinked */
1640 if (bb->bb_channel == 0)
1641 return (EINVAL);
1642
1643 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1644
1645 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1646 MFI_PD_LIST_SIZE, pd, NULL))
1647 goto done;
1648
1649 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1650 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1651 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1652 found = 1;
1653 break;
1654 }
1655
1656 if (!found)
1657 goto done;
1658
1659 memset(mbox, 0, sizeof mbox);
1660
1661 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1662
1663 switch (bb->bb_status) {
1664 case BIOC_SBUNBLINK:
1665 cmd = MR_DCMD_PD_UNBLINK;
1666 break;
1667
1668 case BIOC_SBBLINK:
1669 cmd = MR_DCMD_PD_BLINK;
1670 break;
1671
1672 case BIOC_SBALARM:
1673 default:
1674 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1675 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1676 goto done;
1677 }
1678
1679
1680 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1681 goto done;
1682
1683 rv = 0;
1684 done:
1685 free(pd, M_DEVBUF);
1686 return (rv);
1687 }
1688
1689 int
1690 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1691 {
1692 struct mfi_pd_list *pd;
1693 int i, found, rv = EINVAL;
1694 uint8_t mbox[MFI_MBOX_SIZE];
1695 uint32_t cmd;
1696
1697 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1698 bs->bs_status);
1699
1700 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1701
1702 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1703 MFI_PD_LIST_SIZE, pd, NULL))
1704 goto done;
1705
1706 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1707 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1708 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1709 found = 1;
1710 break;
1711 }
1712
1713 if (!found)
1714 goto done;
1715
1716 memset(mbox, 0, sizeof mbox);
1717
1718 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1719
1720 switch (bs->bs_status) {
1721 case BIOC_SSONLINE:
1722 mbox[2] = MFI_PD_ONLINE;
1723 cmd = MD_DCMD_PD_SET_STATE;
1724 break;
1725
1726 case BIOC_SSOFFLINE:
1727 mbox[2] = MFI_PD_OFFLINE;
1728 cmd = MD_DCMD_PD_SET_STATE;
1729 break;
1730
1731 case BIOC_SSHOTSPARE:
1732 mbox[2] = MFI_PD_HOTSPARE;
1733 cmd = MD_DCMD_PD_SET_STATE;
1734 break;
1735 /*
1736 case BIOC_SSREBUILD:
1737 cmd = MD_DCMD_PD_REBUILD;
1738 break;
1739 */
1740 default:
1741 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1742 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1743 goto done;
1744 }
1745
1746
1747 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1748 goto done;
1749
1750 rv = 0;
1751 done:
1752 free(pd, M_DEVBUF);
1753 return (rv);
1754 }
1755
1756 int
1757 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1758 {
1759 struct mfi_conf *cfg;
1760 struct mfi_hotspare *hs;
1761 struct mfi_pd_details *pd;
1762 struct bioc_disk *sdhs;
1763 struct bioc_vol *vdhs;
1764 struct scsipi_inquiry_data *inqbuf;
1765 char vend[8+16+4+1];
1766 int i, rv = EINVAL;
1767 uint32_t size;
1768 uint8_t mbox[MFI_MBOX_SIZE];
1769
1770 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1771
1772 if (!bio_hs)
1773 return (EINVAL);
1774
1775 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
1776
1777 /* send single element command to retrieve size for full structure */
1778 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1779 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1780 goto freeme;
1781
1782 size = cfg->mfc_size;
1783 free(cfg, M_DEVBUF);
1784
1785 /* memory for read config */
1786 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1787 memset(cfg, 0, size);
1788 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1789 goto freeme;
1790
1791 /* calculate offset to hs structure */
1792 hs = (struct mfi_hotspare *)(
1793 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1794 cfg->mfc_array_size * cfg->mfc_no_array +
1795 cfg->mfc_ld_size * cfg->mfc_no_ld);
1796
1797 if (volid < cfg->mfc_no_ld)
1798 goto freeme; /* not a hotspare */
1799
1800 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1801 goto freeme; /* not a hotspare */
1802
1803 /* offset into hotspare structure */
1804 i = volid - cfg->mfc_no_ld;
1805
1806 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1807 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1808 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1809
1810 /* get pd fields */
1811 memset(mbox, 0, sizeof mbox);
1812 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1813 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1814 sizeof *pd, pd, mbox)) {
1815 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1816 DEVNAME(sc));
1817 goto freeme;
1818 }
1819
1820 switch (type) {
1821 case MFI_MGMT_VD:
1822 vdhs = bio_hs;
1823 vdhs->bv_status = BIOC_SVONLINE;
1824 vdhs->bv_size = pd->mpd_size / 2; /* XXX why? / 2 */
1825 vdhs->bv_level = -1; /* hotspare */
1826 vdhs->bv_nodisk = 1;
1827 break;
1828
1829 case MFI_MGMT_SD:
1830 sdhs = bio_hs;
1831 sdhs->bd_status = BIOC_SDHOTSPARE;
1832 sdhs->bd_size = pd->mpd_size / 2; /* XXX why? / 2 */
1833 sdhs->bd_channel = pd->mpd_enc_idx;
1834 sdhs->bd_target = pd->mpd_enc_slot;
1835 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
1836 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
1837 vend[sizeof vend - 1] = '\0';
1838 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1839 break;
1840
1841 default:
1842 goto freeme;
1843 }
1844
1845 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1846 rv = 0;
1847 freeme:
1848 free(pd, M_DEVBUF);
1849 free(cfg, M_DEVBUF);
1850
1851 return (rv);
1852 }
1853
1854 int
1855 mfi_create_sensors(struct mfi_softc *sc)
1856 {
1857 int i;
1858 int nsensors = sc->sc_ld_cnt;
1859
1860 sc->sc_sensor_data =
1861 malloc(sizeof(struct envsys_data) * nsensors,
1862 M_DEVBUF, M_NOWAIT | M_ZERO);
1863 if (sc->sc_sensor_data == NULL) {
1864 aprint_error("%s: can't allocate envsys_tre_data\n",
1865 DEVNAME(sc));
1866 return(ENOMEM);
1867 }
1868
1869 for (i = 0; i < nsensors; i++) {
1870 sc->sc_sensor_data[i].sensor = i;
1871 sc->sc_sensor_data[i].units = ENVSYS_DRIVE;
1872 sc->sc_sensor_data[i].state = ENVSYS_SVALID;
1873 sc->sc_sensor_data[i].monitor = true;
1874 /* Enable monitoring for drive state changes */
1875 sc->sc_sensor_data[i].flags |= ENVSYS_FMONDRVSTATE;
1876 /* Disable userland monitoring */
1877 sc->sc_sensor_data[i].flags |= ENVSYS_FMONNOTSUPP;
1878 /* logical drives */
1879 snprintf(sc->sc_sensor_data[i].desc,
1880 sizeof(sc->sc_sensor_data[i].desc), "%s:%d",
1881 DEVNAME(sc), i);
1882 }
1883
1884 sc->sc_envsys.sme_name = DEVNAME(sc);
1885 sc->sc_envsys.sme_cookie = sc;
1886 sc->sc_envsys.sme_gtredata = mfi_sensor_gtredata;
1887 sc->sc_envsys.sme_nsensors = sc->sc_ld_cnt;
1888 if (sysmon_envsys_register(&sc->sc_envsys)) {
1889 printf("%s: unable to register with sysmon\n", DEVNAME(sc));
1890 return(1);
1891 }
1892 return (0);
1893 }
1894
1895 int
1896 mfi_sensor_gtredata(struct sysmon_envsys *sme, envsys_data_t *edata)
1897 {
1898 struct mfi_softc *sc = sme->sme_cookie;
1899 struct bioc_vol bv;
1900 int s;
1901
1902 if (edata->sensor >= sc->sc_ld_cnt)
1903 return EINVAL;
1904
1905 bzero(&bv, sizeof(bv));
1906 bv.bv_volid = edata->sensor;
1907 s = splbio();
1908 if (mfi_ioctl_vol(sc, &bv)) {
1909 splx(s);
1910 return EIO;
1911 }
1912 splx(s);
1913
1914 switch(bv.bv_status) {
1915 case BIOC_SVOFFLINE:
1916 edata->value_cur = ENVSYS_DRIVE_FAIL;
1917 edata->state = ENVSYS_SCRITICAL;
1918 break;
1919
1920 case BIOC_SVDEGRADED:
1921 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1922 edata->state = ENVSYS_SCRITICAL;
1923 break;
1924
1925 case BIOC_SVSCRUB:
1926 case BIOC_SVONLINE:
1927 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1928 edata->state = ENVSYS_SVALID;
1929 break;
1930
1931 case BIOC_SVINVALID:
1932 /* FALLTRHOUGH */
1933 default:
1934 edata->value_cur = 0; /* unknown */
1935 edata->state = ENVSYS_SINVALID;
1936 }
1937
1938 edata->state = ENVSYS_SVALID;
1939 edata->units = ENVSYS_DRIVE;
1940 return 0;
1941 }
1942
1943 #endif /* NBIO > 0 */
1944