mfi.c revision 1.3 1 /* $NetBSD: mfi.c,v 1.3 2007/03/04 06:01:58 christos Exp $ */
2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3 /*
4 * Copyright (c) 2006 Marco Peereboom <marco (at) peereboom.us>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.3 2007/03/04 06:01:58 christos Exp $");
21
22 /* #include "bio.h" XXX */
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/buf.h>
27 #include <sys/ioctl.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32
33 #include <uvm/uvm_param.h>
34
35 #include <machine/bus.h>
36
37 #include <dev/scsipi/scsipi_all.h>
38 #include <dev/scsipi/scsi_all.h>
39 #include <dev/scsipi/scsi_spc.h>
40 #include <dev/scsipi/scsipi_disk.h>
41 #include <dev/scsipi/scsi_disk.h>
42 #include <dev/scsipi/scsiconf.h>
43
44 #include <dev/ic/mfireg.h>
45 #include <dev/ic/mfivar.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #include <sys/sensors.h>
50 #endif /* NBIO > 0 */
51
52 #ifdef MFI_DEBUG
53 uint32_t mfi_debug = 0
54 /* | MFI_D_CMD */
55 /* | MFI_D_INTR */
56 /* | MFI_D_MISC */
57 /* | MFI_D_DMA */
58 | MFI_D_IOCTL
59 /* | MFI_D_RW */
60 /* | MFI_D_MEM */
61 /* | MFI_D_CCB */
62 ;
63 #endif
64
65 void mfi_scsipi_request(struct scsipi_channel *,
66 scsipi_adapter_req_t, void *);
67 int mfi_scsi_ioctl(struct scsipi_channel *, u_long, void *, int,
68 struct proc *);
69 void mfiminphys(struct buf *bp);
70
71 struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
72 void mfi_put_ccb(struct mfi_ccb *);
73 int mfi_init_ccb(struct mfi_softc *);
74
75 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
76 void mfi_freemem(struct mfi_softc *, struct mfi_mem *);
77
78 int mfi_transition_firmware(struct mfi_softc *);
79 int mfi_initialize_firmware(struct mfi_softc *);
80 int mfi_get_info(struct mfi_softc *);
81 uint32_t mfi_read(struct mfi_softc *, bus_size_t);
82 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
83 int mfi_poll(struct mfi_ccb *);
84 int mfi_despatch_cmd(struct mfi_ccb *);
85 int mfi_create_sgl(struct mfi_ccb *, int);
86
87 /* commands */
88 int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
89 int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, uint32_t,
90 uint32_t);
91 void mfi_scsi_xs_done(struct mfi_ccb *);
92 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
93 void *, uint8_t *);
94 void mfi_mgmt_done(struct mfi_ccb *);
95
96 #if NBIO > 0
97 int mfi_ioctl(struct device *, u_long, void *);
98 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
99 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
100 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
101 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
102 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
103 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
104 int mfi_bio_hs(struct mfi_softc *, int, int, void *);
105 int mfi_create_sensors(struct mfi_softc *);
106 void mfi_refresh_sensors(void *);
107 #endif /* NBIO > 0 */
108
109 struct mfi_ccb *
110 mfi_get_ccb(struct mfi_softc *sc)
111 {
112 struct mfi_ccb *ccb;
113 int s;
114
115 s = splbio();
116 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
117 if (ccb) {
118 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
119 ccb->ccb_state = MFI_CCB_READY;
120 }
121 splx(s);
122
123 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
124
125 return (ccb);
126 }
127
128 void
129 mfi_put_ccb(struct mfi_ccb *ccb)
130 {
131 struct mfi_softc *sc = ccb->ccb_sc;
132 int s;
133
134 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
135
136 s = splbio();
137 ccb->ccb_state = MFI_CCB_FREE;
138 ccb->ccb_xs = NULL;
139 ccb->ccb_flags = 0;
140 ccb->ccb_done = NULL;
141 ccb->ccb_direction = 0;
142 ccb->ccb_frame_size = 0;
143 ccb->ccb_extra_frames = 0;
144 ccb->ccb_sgl = NULL;
145 ccb->ccb_data = NULL;
146 ccb->ccb_len = 0;
147 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
148 splx(s);
149 }
150
151 int
152 mfi_init_ccb(struct mfi_softc *sc)
153 {
154 struct mfi_ccb *ccb;
155 uint32_t i;
156 int error;
157
158 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
159
160 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
161 M_DEVBUF, M_WAITOK);
162 memset(sc->sc_ccb, 0, sizeof(struct mfi_ccb) * sc->sc_max_cmds);
163
164 for (i = 0; i < sc->sc_max_cmds; i++) {
165 ccb = &sc->sc_ccb[i];
166
167 ccb->ccb_sc = sc;
168
169 /* select i'th frame */
170 ccb->ccb_frame = (union mfi_frame *)
171 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
172 ccb->ccb_pframe =
173 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
174 ccb->ccb_frame->mfr_header.mfh_context = i;
175
176 /* select i'th sense */
177 ccb->ccb_sense = (struct mfi_sense *)
178 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
179 ccb->ccb_psense =
180 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
181
182 /* create a dma map for transfer */
183 error = bus_dmamap_create(sc->sc_dmat,
184 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
185 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
186 if (error) {
187 printf("%s: cannot create ccb dmamap (%d)\n",
188 DEVNAME(sc), error);
189 goto destroy;
190 }
191
192 DNPRINTF(MFI_D_CCB,
193 "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
194 ccb->ccb_frame->mfr_header.mfh_context, ccb,
195 ccb->ccb_frame, ccb->ccb_pframe,
196 ccb->ccb_sense, ccb->ccb_psense,
197 ccb->ccb_dmamap);
198
199 /* add ccb to queue */
200 mfi_put_ccb(ccb);
201 }
202
203 return (0);
204 destroy:
205 /* free dma maps and ccb memory */
206 while (i) {
207 ccb = &sc->sc_ccb[i];
208 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
209 i--;
210 }
211
212 free(sc->sc_ccb, M_DEVBUF);
213
214 return (1);
215 }
216
217 uint32_t
218 mfi_read(struct mfi_softc *sc, bus_size_t r)
219 {
220 uint32_t rv;
221
222 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
223 BUS_SPACE_BARRIER_READ);
224 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
225
226 DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
227 return (rv);
228 }
229
230 void
231 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
232 {
233 DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
234
235 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
236 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
237 BUS_SPACE_BARRIER_WRITE);
238 }
239
240 struct mfi_mem *
241 mfi_allocmem(struct mfi_softc *sc, size_t size)
242 {
243 struct mfi_mem *mm;
244 int nsegs;
245
246 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
247 size);
248
249 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT);
250 if (mm == NULL)
251 return (NULL);
252
253 memset(mm, 0, sizeof(struct mfi_mem));
254 mm->am_size = size;
255
256 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
257 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
258 goto amfree;
259
260 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
261 &nsegs, BUS_DMA_NOWAIT) != 0)
262 goto destroy;
263
264 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
265 BUS_DMA_NOWAIT) != 0)
266 goto free;
267
268 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
269 BUS_DMA_NOWAIT) != 0)
270 goto unmap;
271
272 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
273 mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
274
275 memset(mm->am_kva, 0, size);
276 return (mm);
277
278 unmap:
279 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
280 free:
281 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
282 destroy:
283 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
284 amfree:
285 free(mm, M_DEVBUF);
286
287 return (NULL);
288 }
289
290 void
291 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
292 {
293 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
294
295 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
296 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
297 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
298 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
299 free(mm, M_DEVBUF);
300 }
301
302 int
303 mfi_transition_firmware(struct mfi_softc *sc)
304 {
305 int32_t fw_state, cur_state;
306 int max_wait, i;
307
308 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
309
310 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
311 fw_state);
312
313 while (fw_state != MFI_STATE_READY) {
314 DNPRINTF(MFI_D_MISC,
315 "%s: waiting for firmware to become ready\n",
316 DEVNAME(sc));
317 cur_state = fw_state;
318 switch (fw_state) {
319 case MFI_STATE_FAULT:
320 printf("%s: firmware fault\n", DEVNAME(sc));
321 return (1);
322 case MFI_STATE_WAIT_HANDSHAKE:
323 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
324 max_wait = 2;
325 break;
326 case MFI_STATE_OPERATIONAL:
327 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
328 max_wait = 10;
329 break;
330 case MFI_STATE_UNDEFINED:
331 case MFI_STATE_BB_INIT:
332 max_wait = 2;
333 break;
334 case MFI_STATE_FW_INIT:
335 case MFI_STATE_DEVICE_SCAN:
336 case MFI_STATE_FLUSH_CACHE:
337 max_wait = 20;
338 break;
339 default:
340 printf("%s: unknown firmware state %d\n",
341 DEVNAME(sc), fw_state);
342 return (1);
343 }
344 for (i = 0; i < (max_wait * 10); i++) {
345 fw_state = mfi_read(sc, MFI_OMSG0) & MFI_STATE_MASK;
346 if (fw_state == cur_state)
347 DELAY(100000);
348 else
349 break;
350 }
351 if (fw_state == cur_state) {
352 printf("%s: firmware stuck in state %#x\n",
353 DEVNAME(sc), fw_state);
354 return (1);
355 }
356 }
357
358 return (0);
359 }
360
361 int
362 mfi_initialize_firmware(struct mfi_softc *sc)
363 {
364 struct mfi_ccb *ccb;
365 struct mfi_init_frame *init;
366 struct mfi_init_qinfo *qinfo;
367
368 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
369
370 if ((ccb = mfi_get_ccb(sc)) == NULL)
371 return (1);
372
373 init = &ccb->ccb_frame->mfr_init;
374 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
375
376 memset(qinfo, 0, sizeof *qinfo);
377 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
378 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
379 offsetof(struct mfi_prod_cons, mpc_reply_q));
380 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
381 offsetof(struct mfi_prod_cons, mpc_producer));
382 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
383 offsetof(struct mfi_prod_cons, mpc_consumer));
384
385 init->mif_header.mfh_cmd = MFI_CMD_INIT;
386 init->mif_header.mfh_data_len = sizeof *qinfo;
387 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
388
389 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
390 DEVNAME(sc),
391 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
392 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
393
394 if (mfi_poll(ccb)) {
395 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc));
396 return (1);
397 }
398
399 mfi_put_ccb(ccb);
400
401 return (0);
402 }
403
404 int
405 mfi_get_info(struct mfi_softc *sc)
406 {
407 #ifdef MFI_DEBUG
408 int i;
409 #endif
410 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
411
412 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
413 sizeof(sc->sc_info), &sc->sc_info, NULL))
414 return (1);
415
416 #ifdef MFI_DEBUG
417
418 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
419 printf("%s: active FW %s Version %s date %s time %s\n",
420 DEVNAME(sc),
421 sc->sc_info.mci_image_component[i].mic_name,
422 sc->sc_info.mci_image_component[i].mic_version,
423 sc->sc_info.mci_image_component[i].mic_build_date,
424 sc->sc_info.mci_image_component[i].mic_build_time);
425 }
426
427 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
428 printf("%s: pending FW %s Version %s date %s time %s\n",
429 DEVNAME(sc),
430 sc->sc_info.mci_pending_image_component[i].mic_name,
431 sc->sc_info.mci_pending_image_component[i].mic_version,
432 sc->sc_info.mci_pending_image_component[i].mic_build_date,
433 sc->sc_info.mci_pending_image_component[i].mic_build_time);
434 }
435
436 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
437 DEVNAME(sc),
438 sc->sc_info.mci_max_arms,
439 sc->sc_info.mci_max_spans,
440 sc->sc_info.mci_max_arrays,
441 sc->sc_info.mci_max_lds,
442 sc->sc_info.mci_product_name);
443
444 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
445 DEVNAME(sc),
446 sc->sc_info.mci_serial_number,
447 sc->sc_info.mci_hw_present,
448 sc->sc_info.mci_current_fw_time,
449 sc->sc_info.mci_max_cmds,
450 sc->sc_info.mci_max_sg_elements);
451
452 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
453 DEVNAME(sc),
454 sc->sc_info.mci_max_request_size,
455 sc->sc_info.mci_lds_present,
456 sc->sc_info.mci_lds_degraded,
457 sc->sc_info.mci_lds_offline,
458 sc->sc_info.mci_pd_present);
459
460 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
461 DEVNAME(sc),
462 sc->sc_info.mci_pd_disks_present,
463 sc->sc_info.mci_pd_disks_pred_failure,
464 sc->sc_info.mci_pd_disks_failed);
465
466 printf("%s: nvram %d mem %d flash %d\n",
467 DEVNAME(sc),
468 sc->sc_info.mci_nvram_size,
469 sc->sc_info.mci_memory_size,
470 sc->sc_info.mci_flash_size);
471
472 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
473 DEVNAME(sc),
474 sc->sc_info.mci_ram_correctable_errors,
475 sc->sc_info.mci_ram_uncorrectable_errors,
476 sc->sc_info.mci_cluster_allowed,
477 sc->sc_info.mci_cluster_active);
478
479 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
480 DEVNAME(sc),
481 sc->sc_info.mci_max_strips_per_io,
482 sc->sc_info.mci_raid_levels,
483 sc->sc_info.mci_adapter_ops,
484 sc->sc_info.mci_ld_ops);
485
486 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
487 DEVNAME(sc),
488 sc->sc_info.mci_stripe_sz_ops.min,
489 sc->sc_info.mci_stripe_sz_ops.max,
490 sc->sc_info.mci_pd_ops,
491 sc->sc_info.mci_pd_mix_support);
492
493 printf("%s: ecc_bucket %d pckg_prop %s\n",
494 DEVNAME(sc),
495 sc->sc_info.mci_ecc_bucket_count,
496 sc->sc_info.mci_package_version);
497
498 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
499 DEVNAME(sc),
500 sc->sc_info.mci_properties.mcp_seq_num,
501 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
502 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
503 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
504
505 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
506 DEVNAME(sc),
507 sc->sc_info.mci_properties.mcp_rebuild_rate,
508 sc->sc_info.mci_properties.mcp_patrol_read_rate,
509 sc->sc_info.mci_properties.mcp_bgi_rate,
510 sc->sc_info.mci_properties.mcp_cc_rate);
511
512 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
513 DEVNAME(sc),
514 sc->sc_info.mci_properties.mcp_recon_rate,
515 sc->sc_info.mci_properties.mcp_cache_flush_interval,
516 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
517 sc->sc_info.mci_properties.mcp_spinup_delay,
518 sc->sc_info.mci_properties.mcp_cluster_enable);
519
520 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
521 DEVNAME(sc),
522 sc->sc_info.mci_properties.mcp_coercion_mode,
523 sc->sc_info.mci_properties.mcp_alarm_enable,
524 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
525 sc->sc_info.mci_properties.mcp_disable_battery_warn,
526 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
527
528 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
529 DEVNAME(sc),
530 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
531 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
532 sc->sc_info.mci_properties.mcp_expose_encl_devices);
533
534 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
535 DEVNAME(sc),
536 sc->sc_info.mci_pci.mip_vendor,
537 sc->sc_info.mci_pci.mip_device,
538 sc->sc_info.mci_pci.mip_subvendor,
539 sc->sc_info.mci_pci.mip_subdevice);
540
541 printf("%s: type %#x port_count %d port_addr ",
542 DEVNAME(sc),
543 sc->sc_info.mci_host.mih_type,
544 sc->sc_info.mci_host.mih_port_count);
545
546 for (i = 0; i < 8; i++)
547 printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
548 printf("\n");
549
550 printf("%s: type %.x port_count %d port_addr ",
551 DEVNAME(sc),
552 sc->sc_info.mci_device.mid_type,
553 sc->sc_info.mci_device.mid_port_count);
554
555 for (i = 0; i < 8; i++)
556 printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
557 printf("\n");
558 #endif /* MFI_DEBUG */
559
560 return (0);
561 }
562
563 void
564 mfiminphys(struct buf *bp)
565 {
566 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
567
568 /* XXX currently using MFI_MAXFER = MAXPHYS */
569 if (bp->b_bcount > MFI_MAXFER)
570 bp->b_bcount = MFI_MAXFER;
571 minphys(bp);
572 }
573
574 int
575 mfi_attach(struct mfi_softc *sc)
576 {
577 struct scsipi_adapter *adapt = &sc->sc_adapt;
578 struct scsipi_channel *chan = &sc->sc_chan;
579 uint32_t status, frames;
580 int i;
581
582 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
583
584 if (mfi_transition_firmware(sc))
585 return (1);
586
587 TAILQ_INIT(&sc->sc_ccb_freeq);
588
589 /* rw_init(&sc->sc_lock, "mfi_lock"); XXX */
590
591 status = mfi_read(sc, MFI_OMSG0);
592 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
593 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
594 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
595 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
596
597 /* consumer/producer and reply queue memory */
598 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
599 sizeof(struct mfi_prod_cons));
600 if (sc->sc_pcq == NULL) {
601 aprint_error("%s: unable to allocate reply queue memory\n",
602 DEVNAME(sc));
603 goto nopcq;
604 }
605 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
606 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
608
609 /* frame memory */
610 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */
611 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl +
612 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1;
613 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
614 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
615 if (sc->sc_frames == NULL) {
616 aprint_error("%s: unable to allocate frame memory\n",
617 DEVNAME(sc));
618 goto noframe;
619 }
620 /* XXX hack, fix this */
621 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
622 aprint_error("%s: improper frame alignment (%#llx) FIXME\n",
623 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames));
624 goto noframe;
625 }
626
627 /* sense memory */
628 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
629 if (sc->sc_sense == NULL) {
630 aprint_error("%s: unable to allocate sense memory\n",
631 DEVNAME(sc));
632 goto nosense;
633 }
634
635 /* now that we have all memory bits go initialize ccbs */
636 if (mfi_init_ccb(sc)) {
637 aprint_error("%s: could not init ccb list\n", DEVNAME(sc));
638 goto noinit;
639 }
640
641 /* kickstart firmware with all addresses and pointers */
642 if (mfi_initialize_firmware(sc)) {
643 aprint_error("%s: could not initialize firmware\n",
644 DEVNAME(sc));
645 goto noinit;
646 }
647
648 if (mfi_get_info(sc)) {
649 aprint_error("%s: could not retrieve controller information\n",
650 DEVNAME(sc));
651 goto noinit;
652 }
653
654 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n",
655 DEVNAME(sc),
656 sc->sc_info.mci_lds_present,
657 sc->sc_info.mci_package_version,
658 sc->sc_info.mci_memory_size);
659
660 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
661 sc->sc_max_ld = sc->sc_ld_cnt;
662 for (i = 0; i < sc->sc_ld_cnt; i++)
663 sc->sc_ld[i].ld_present = 1;
664
665 memset(adapt, 0, sizeof(*adapt));
666 adapt->adapt_dev = &sc->sc_dev;
667 adapt->adapt_nchannels = 1;
668 if (sc->sc_ld_cnt)
669 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt;
670 else
671 adapt->adapt_openings = sc->sc_max_cmds;
672 adapt->adapt_max_periph = adapt->adapt_openings;
673 adapt->adapt_request = mfi_scsipi_request;
674 adapt->adapt_minphys = mfiminphys;
675 adapt->adapt_ioctl = mfi_scsi_ioctl;
676
677 memset(chan, 0, sizeof(*chan));
678 chan->chan_adapter = adapt;
679 chan->chan_bustype = &scsi_bustype;
680 chan->chan_channel = 0;
681 chan->chan_flags = 0;
682 chan->chan_nluns = 8;
683 chan->chan_ntargets = MFI_MAX_LD;
684 chan->chan_id = MFI_MAX_LD;
685
686 (void) config_found(&sc->sc_dev, &sc->sc_chan, scsiprint);
687
688 /* enable interrupts */
689 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
690
691 #if NBIO > 0
692 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
693 panic("%s: controller registration failed", DEVNAME(sc));
694 else
695 sc->sc_ioctl = mfi_ioctl;
696
697 if (mfi_create_sensors(sc) != 0)
698 aprint_error("%s: unable to create sensors\n", DEVNAME(sc));
699 #endif /* NBIO > 0 */
700
701 return (0);
702 noinit:
703 mfi_freemem(sc, sc->sc_sense);
704 nosense:
705 mfi_freemem(sc, sc->sc_frames);
706 noframe:
707 mfi_freemem(sc, sc->sc_pcq);
708 nopcq:
709 return (1);
710 }
711
712 int
713 mfi_despatch_cmd(struct mfi_ccb *ccb)
714 {
715 struct mfi_softc *sc = ccb->ccb_sc;
716 DNPRINTF(MFI_D_CMD, "%s: mfi_despatch_cmd\n", DEVNAME(sc));
717
718 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
719 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
720 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
721 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
722 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
723 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
724
725 mfi_write(ccb->ccb_sc, MFI_IQP, htole32((ccb->ccb_pframe >> 3) |
726 ccb->ccb_extra_frames));
727
728 return(0);
729 }
730
731 int
732 mfi_poll(struct mfi_ccb *ccb)
733 {
734 struct mfi_softc *sc = ccb->ccb_sc;
735 struct mfi_frame_header *hdr;
736 int to = 0;
737
738 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
739
740 hdr = &ccb->ccb_frame->mfr_header;
741 hdr->mfh_cmd_status = 0xff;
742 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
743
744 mfi_despatch_cmd(ccb);
745 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
746 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
747 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
748
749 while (hdr->mfh_cmd_status == 0xff) {
750 delay(1000);
751 if (to++ > 5000) /* XXX 5 seconds busywait sucks */
752 break;
753 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
754 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
755 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
756 }
757 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
758 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
759 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
760
761 if (ccb->ccb_data != NULL) {
762 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
763 DEVNAME(sc));
764 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
765 ccb->ccb_dmamap->dm_mapsize,
766 (ccb->ccb_direction & MFI_DATA_IN) ?
767 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
768
769 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
770 }
771
772 if (hdr->mfh_cmd_status == 0xff) {
773 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
774 hdr->mfh_context);
775 ccb->ccb_flags |= MFI_CCB_F_ERR;
776 return (1);
777 }
778
779 return (0);
780 }
781
782 int
783 mfi_intr(void *arg)
784 {
785 struct mfi_softc *sc = arg;
786 struct mfi_prod_cons *pcq;
787 struct mfi_ccb *ccb;
788 uint32_t status, producer, consumer, ctx;
789 int claimed = 0;
790
791 status = mfi_read(sc, MFI_OSTS);
792 if ((status & MFI_OSTS_INTR_VALID) == 0)
793 return (claimed);
794 /* write status back to acknowledge interrupt */
795 mfi_write(sc, MFI_OSTS, status);
796
797 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
798
799 pcq = MFIMEM_KVA(sc->sc_pcq);
800 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
801 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
802 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
803
804 producer = pcq->mpc_producer;
805 consumer = pcq->mpc_consumer;
806
807 while (consumer != producer) {
808 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
809 DEVNAME(sc), producer, consumer);
810
811 ctx = pcq->mpc_reply_q[consumer];
812 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
813 if (ctx == MFI_INVALID_CTX)
814 printf("%s: invalid context, p: %d c: %d\n",
815 DEVNAME(sc), producer, consumer);
816 else {
817 /* XXX remove from queue and call scsi_done */
818 ccb = &sc->sc_ccb[ctx];
819 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
820 DEVNAME(sc), ctx);
821 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
822 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
823 sc->sc_frames_size,
824 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
825 ccb->ccb_done(ccb);
826
827 claimed = 1;
828 }
829 consumer++;
830 if (consumer == (sc->sc_max_cmds + 1))
831 consumer = 0;
832 }
833
834 pcq->mpc_consumer = consumer;
835 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
836 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
838
839 return (claimed);
840 }
841
842 int
843 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno,
844 uint32_t blockcnt)
845 {
846 struct scsipi_periph *periph = xs->xs_periph;
847 struct mfi_io_frame *io;
848
849 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
850 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
851 periph->periph_target);
852
853 if (!xs->data)
854 return (1);
855
856 io = &ccb->ccb_frame->mfr_io;
857 if (xs->xs_control & XS_CTL_DATA_IN) {
858 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
859 ccb->ccb_direction = MFI_DATA_IN;
860 } else {
861 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
862 ccb->ccb_direction = MFI_DATA_OUT;
863 }
864 io->mif_header.mfh_target_id = periph->periph_target;
865 io->mif_header.mfh_timeout = 0;
866 io->mif_header.mfh_flags = 0;
867 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
868 io->mif_header.mfh_data_len= blockcnt;
869 io->mif_lba_hi = 0;
870 io->mif_lba_lo = blockno;
871 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
872 io->mif_sense_addr_hi = 0;
873
874 ccb->ccb_done = mfi_scsi_xs_done;
875 ccb->ccb_xs = xs;
876 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
877 ccb->ccb_sgl = &io->mif_sgl;
878 ccb->ccb_data = xs->data;
879 ccb->ccb_len = xs->datalen;
880
881 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
882 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
883 return (1);
884
885 return (0);
886 }
887
888 void
889 mfi_scsi_xs_done(struct mfi_ccb *ccb)
890 {
891 struct scsipi_xfer *xs = ccb->ccb_xs;
892 struct mfi_softc *sc = ccb->ccb_sc;
893 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
894
895 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
896 DEVNAME(sc), ccb, ccb->ccb_frame);
897
898 if (xs->data != NULL) {
899 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
900 DEVNAME(sc));
901 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
902 ccb->ccb_dmamap->dm_mapsize,
903 (xs->xs_control & XS_CTL_DATA_IN) ?
904 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
905
906 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
907 }
908
909 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
910 xs->error = XS_DRIVER_STUFFUP;
911 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
912 DEVNAME(sc), hdr->mfh_cmd_status);
913
914 if (hdr->mfh_scsi_status != 0) {
915 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
916 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
917 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
918 DNPRINTF(MFI_D_INTR,
919 "%s: mfi_scsi_xs_done sense %#x %x %x\n",
920 DEVNAME(sc), hdr->mfh_scsi_status,
921 &xs->sense, ccb->ccb_sense);
922 memset(&xs->sense, 0, sizeof(xs->sense));
923 memcpy(&xs->sense, ccb->ccb_sense,
924 sizeof(struct scsi_sense_data));
925 xs->error = XS_SENSE;
926 }
927 } else {
928 xs->error = XS_NOERROR;
929 xs->status = SCSI_OK;
930 xs->resid = 0;
931 }
932
933 mfi_put_ccb(ccb);
934 scsipi_done(xs);
935 }
936
937 int
938 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
939 {
940 struct mfi_pass_frame *pf;
941 struct scsipi_periph *periph = xs->xs_periph;
942
943 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
944 periph->periph_channel->chan_adapter->adapt_dev->dv_xname,
945 periph->periph_target);
946
947 pf = &ccb->ccb_frame->mfr_pass;
948 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
949 pf->mpf_header.mfh_target_id = periph->periph_target;
950 pf->mpf_header.mfh_lun_id = 0;
951 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
952 pf->mpf_header.mfh_timeout = 0;
953 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
954 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
955
956 pf->mpf_sense_addr_hi = 0;
957 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
958
959 memset(pf->mpf_cdb, 0, 16);
960 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
961
962 ccb->ccb_done = mfi_scsi_xs_done;
963 ccb->ccb_xs = xs;
964 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
965 ccb->ccb_sgl = &pf->mpf_sgl;
966
967 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
968 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
969 MFI_DATA_IN : MFI_DATA_OUT;
970 else
971 ccb->ccb_direction = MFI_DATA_NONE;
972
973 if (xs->data) {
974 ccb->ccb_data = xs->data;
975 ccb->ccb_len = xs->datalen;
976
977 if (mfi_create_sgl(ccb, xs->xs_control & XS_CTL_NOSLEEP) ?
978 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)
979 return (1);
980 }
981
982 return (0);
983 }
984
985 void
986 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
987 void *arg)
988 {
989 struct scsipi_periph *periph;
990 struct scsipi_xfer *xs;
991 struct scsipi_adapter *adapt = chan->chan_adapter;
992 struct mfi_softc *sc = (void *) adapt->adapt_dev;
993 struct mfi_ccb *ccb;
994 struct scsi_rw_6 *rw;
995 struct scsipi_rw_10 *rwb;
996 uint32_t blockno, blockcnt;
997 uint8_t target;
998 uint8_t mbox[MFI_MBOX_SIZE];
999 int s;
1000
1001 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n",
1002 DEVNAME(sc), req, xs->cmd->opcode);
1003
1004 switch (req) {
1005 case ADAPTER_REQ_GROW_RESOURCES:
1006 /* Not supported. */
1007 return;
1008 case ADAPTER_REQ_SET_XFER_MODE:
1009 /* Not supported. */
1010 return;
1011 case ADAPTER_REQ_RUN_XFER:
1012 break;
1013 }
1014
1015 xs = arg;
1016 periph = xs->xs_periph;
1017 target = periph->periph_target;
1018
1019 s = splbio();
1020 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1021 periph->periph_lun != 0) {
1022 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1023 DEVNAME(sc), target);
1024 xs->error = XS_SELTIMEOUT;
1025 scsipi_done(xs);
1026 splx(s);
1027 return;
1028 }
1029
1030 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1031 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1032 xs->error = XS_RESOURCE_SHORTAGE;
1033 scsipi_done(xs);
1034 splx(s);
1035 return;
1036 }
1037
1038 switch (xs->cmd->opcode) {
1039 /* IO path */
1040 case READ_10:
1041 case WRITE_10:
1042 rwb = (struct scsipi_rw_10 *)xs->cmd;
1043 blockno = _4btol(rwb->addr);
1044 blockcnt = _2btol(rwb->length);
1045 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1046 mfi_put_ccb(ccb);
1047 goto stuffup;
1048 }
1049 break;
1050
1051 case SCSI_READ_6_COMMAND:
1052 case SCSI_WRITE_6_COMMAND:
1053 rw = (struct scsi_rw_6 *)xs->cmd;
1054 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1055 blockcnt = rw->length ? rw->length : 0x100;
1056 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) {
1057 mfi_put_ccb(ccb);
1058 goto stuffup;
1059 }
1060 break;
1061
1062 case SCSI_SYNCHRONIZE_CACHE_10:
1063 mfi_put_ccb(ccb); /* we don't need this */
1064
1065 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1066 if (mfi_mgmt(sc, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE,
1067 0, NULL, mbox))
1068 goto stuffup;
1069 xs->error = XS_NOERROR;
1070 xs->status = SCSI_OK;
1071 xs->resid = 0;
1072 scsipi_done(xs);
1073 splx(s);
1074 return;
1075 /* NOTREACHED */
1076
1077 /* hand it of to the firmware and let it deal with it */
1078 case SCSI_TEST_UNIT_READY:
1079 /* save off sd? after autoconf */
1080 if (!cold) /* XXX bogus */
1081 strlcpy(sc->sc_ld[target].ld_dev, sc->sc_dev.dv_xname,
1082 sizeof(sc->sc_ld[target].ld_dev));
1083 /* FALLTHROUGH */
1084
1085 default:
1086 if (mfi_scsi_ld(ccb, xs)) {
1087 mfi_put_ccb(ccb);
1088 goto stuffup;
1089 }
1090 break;
1091 }
1092
1093 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1094
1095 if (xs->xs_control & XS_CTL_POLL) {
1096 if (mfi_poll(ccb)) {
1097 /* XXX check for sense in ccb->ccb_sense? */
1098 printf("%s: mfi_scsipi_request poll failed\n",
1099 DEVNAME(sc));
1100 mfi_put_ccb(ccb);
1101 bzero(&xs->sense, sizeof(xs->sense));
1102 xs->sense.scsi_sense.response_code =
1103 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1104 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1105 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1106 xs->error = XS_SENSE;
1107 xs->status = SCSI_CHECK;
1108 } else {
1109 DNPRINTF(MFI_D_DMA,
1110 "%s: mfi_scsipi_request poll complete %d\n",
1111 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1112 xs->error = XS_NOERROR;
1113 xs->status = SCSI_OK;
1114 xs->resid = 0;
1115 }
1116 mfi_put_ccb(ccb);
1117 scsipi_done(xs);
1118 splx(s);
1119 return;
1120 }
1121
1122 mfi_despatch_cmd(ccb);
1123
1124 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1125 ccb->ccb_dmamap->dm_nsegs);
1126
1127 splx(s);
1128 return;
1129
1130 stuffup:
1131 xs->error = XS_DRIVER_STUFFUP;
1132 scsipi_done(xs);
1133 splx(s);
1134 }
1135
1136 int
1137 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1138 {
1139 struct mfi_softc *sc = ccb->ccb_sc;
1140 struct mfi_frame_header *hdr;
1141 bus_dma_segment_t *sgd;
1142 union mfi_sgl *sgl;
1143 int error, i;
1144
1145 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1146 ccb->ccb_data);
1147
1148 if (!ccb->ccb_data)
1149 return (1);
1150
1151 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1152 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1153 if (error) {
1154 if (error == EFBIG)
1155 printf("more than %d dma segs\n",
1156 sc->sc_max_sgl);
1157 else
1158 printf("error %d loading dma map\n", error);
1159 return (1);
1160 }
1161
1162 hdr = &ccb->ccb_frame->mfr_header;
1163 sgl = ccb->ccb_sgl;
1164 sgd = ccb->ccb_dmamap->dm_segs;
1165 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1166 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1167 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1168 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1169 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1170 }
1171
1172 if (ccb->ccb_direction == MFI_DATA_IN) {
1173 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1174 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1175 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1176 } else {
1177 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1178 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1179 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1180 }
1181
1182 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1183 /* for 64 bit io make the sizeof a variable to hold whatever sg size */
1184 ccb->ccb_frame_size += sizeof(struct mfi_sg32) *
1185 ccb->ccb_dmamap->dm_nsegs;
1186 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1187
1188 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1189 " dm_nsegs: %d extra_frames: %d\n",
1190 DEVNAME(sc),
1191 hdr->mfh_sg_count,
1192 ccb->ccb_frame_size,
1193 sc->sc_frames_size,
1194 ccb->ccb_dmamap->dm_nsegs,
1195 ccb->ccb_extra_frames);
1196
1197 return (0);
1198 }
1199
1200 int
1201 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1202 void *buf, uint8_t *mbox)
1203 {
1204 struct mfi_ccb *ccb;
1205 struct mfi_dcmd_frame *dcmd;
1206 int rv = 1;
1207
1208 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(sc), opc);
1209
1210 if ((ccb = mfi_get_ccb(sc)) == NULL)
1211 return (rv);
1212
1213 dcmd = &ccb->ccb_frame->mfr_dcmd;
1214 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1215 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1216 dcmd->mdf_header.mfh_timeout = 0;
1217
1218 dcmd->mdf_opcode = opc;
1219 dcmd->mdf_header.mfh_data_len = 0;
1220 ccb->ccb_direction = dir;
1221 ccb->ccb_done = mfi_mgmt_done;
1222
1223 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1224
1225 /* handle special opcodes */
1226 if (mbox)
1227 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1228
1229 if (dir != MFI_DATA_NONE) {
1230 dcmd->mdf_header.mfh_data_len = len;
1231 ccb->ccb_data = buf;
1232 ccb->ccb_len = len;
1233 ccb->ccb_sgl = &dcmd->mdf_sgl;
1234
1235 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1236 goto done;
1237 }
1238
1239 if (cold) {
1240 if (mfi_poll(ccb))
1241 goto done;
1242 } else {
1243 mfi_despatch_cmd(ccb);
1244
1245 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt sleeping\n", DEVNAME(sc));
1246 while (ccb->ccb_state != MFI_CCB_DONE)
1247 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1248
1249 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1250 goto done;
1251 }
1252
1253 rv = 0;
1254
1255 done:
1256 mfi_put_ccb(ccb);
1257 return (rv);
1258 }
1259
1260 void
1261 mfi_mgmt_done(struct mfi_ccb *ccb)
1262 {
1263 struct mfi_softc *sc = ccb->ccb_sc;
1264 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1265
1266 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#x %#x\n",
1267 DEVNAME(sc), ccb, ccb->ccb_frame);
1268
1269 if (ccb->ccb_data != NULL) {
1270 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1271 DEVNAME(sc));
1272 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1273 ccb->ccb_dmamap->dm_mapsize,
1274 (ccb->ccb_direction & MFI_DATA_IN) ?
1275 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1276
1277 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1278 }
1279
1280 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1281 ccb->ccb_flags |= MFI_CCB_F_ERR;
1282
1283 ccb->ccb_state = MFI_CCB_DONE;
1284
1285 wakeup(ccb);
1286 }
1287
1288
1289 int
1290 mfi_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1291 int flag, struct proc *p)
1292 {
1293 return (ENOTTY);
1294 }
1295
1296 #if NBIO > 0
1297 int
1298 mfi_ioctl(struct device *dev, u_long cmd, void *addr)
1299 {
1300 struct mfi_softc *sc = (struct mfi_softc *)dev;
1301 int error = 0;
1302
1303 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1304
1305 rw_enter_write(&sc->sc_lock);
1306
1307 switch (cmd) {
1308 case BIOCINQ:
1309 DNPRINTF(MFI_D_IOCTL, "inq\n");
1310 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1311 break;
1312
1313 case BIOCVOL:
1314 DNPRINTF(MFI_D_IOCTL, "vol\n");
1315 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1316 break;
1317
1318 case BIOCDISK:
1319 DNPRINTF(MFI_D_IOCTL, "disk\n");
1320 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1321 break;
1322
1323 case BIOCALARM:
1324 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1325 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1326 break;
1327
1328 case BIOCBLINK:
1329 DNPRINTF(MFI_D_IOCTL, "blink\n");
1330 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1331 break;
1332
1333 case BIOCSETSTATE:
1334 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1335 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1336 break;
1337
1338 default:
1339 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1340 error = EINVAL;
1341 }
1342
1343 rw_exit_write(&sc->sc_lock);
1344
1345 return (error);
1346 }
1347
1348 int
1349 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1350 {
1351 struct mfi_conf *cfg;
1352 int rv = EINVAL;
1353
1354 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1355
1356 if (mfi_get_info(sc)) {
1357 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
1358 DEVNAME(sc));
1359 return (EIO);
1360 }
1361
1362 /* get figures */
1363 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1364 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1365 goto freeme;
1366
1367 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1368 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1369 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1370
1371 rv = 0;
1372 freeme:
1373 free(cfg, M_DEVBUF);
1374 return (rv);
1375 }
1376
1377 int
1378 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1379 {
1380 int i, per, rv = EINVAL;
1381 uint8_t mbox[MFI_MBOX_SIZE];
1382
1383 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1384 DEVNAME(sc), bv->bv_volid);
1385
1386 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1387 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1388 goto done;
1389
1390 i = bv->bv_volid;
1391 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1392 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
1393 DEVNAME(sc), mbox[0]);
1394
1395 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
1396 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox))
1397 goto done;
1398
1399 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1400 /* go do hotspares */
1401 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1402 goto done;
1403 }
1404
1405 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
1406
1407 switch(sc->sc_ld_list.mll_list[i].mll_state) {
1408 case MFI_LD_OFFLINE:
1409 bv->bv_status = BIOC_SVOFFLINE;
1410 break;
1411
1412 case MFI_LD_PART_DEGRADED:
1413 case MFI_LD_DEGRADED:
1414 bv->bv_status = BIOC_SVDEGRADED;
1415 break;
1416
1417 case MFI_LD_ONLINE:
1418 bv->bv_status = BIOC_SVONLINE;
1419 break;
1420
1421 default:
1422 bv->bv_status = BIOC_SVINVALID;
1423 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1424 DEVNAME(sc),
1425 sc->sc_ld_list.mll_list[i].mll_state);
1426 }
1427
1428 /* additional status can modify MFI status */
1429 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
1430 case MFI_LD_PROG_CC:
1431 case MFI_LD_PROG_BGI:
1432 bv->bv_status = BIOC_SVSCRUB;
1433 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
1434 bv->bv_percent = (per * 100) / 0xffff;
1435 bv->bv_seconds =
1436 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
1437 break;
1438
1439 case MFI_LD_PROG_FGI:
1440 case MFI_LD_PROG_RECONSTRUCT:
1441 /* nothing yet */
1442 break;
1443 }
1444
1445 /*
1446 * The RAID levels are determined per the SNIA DDF spec, this is only
1447 * a subset that is valid for the MFI contrller.
1448 */
1449 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
1450 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
1451 MFI_DDF_SRL_SPANNED)
1452 bv->bv_level *= 10;
1453
1454 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
1455 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
1456
1457 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
1458
1459 rv = 0;
1460 done:
1461 return (rv);
1462 }
1463
1464 int
1465 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1466 {
1467 struct mfi_conf *cfg;
1468 struct mfi_array *ar;
1469 struct mfi_ld_cfg *ld;
1470 struct mfi_pd_details *pd;
1471 struct scsi_inquiry_data *inqbuf;
1472 char vend[8+16+4+1];
1473 int i, rv = EINVAL;
1474 int arr, vol, disk;
1475 uint32_t size;
1476 uint8_t mbox[MFI_MBOX_SIZE];
1477
1478 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1479 DEVNAME(sc), bd->bd_diskid);
1480
1481 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1482
1483 /* send single element command to retrieve size for full structure */
1484 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1485 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1486 goto freeme;
1487
1488 size = cfg->mfc_size;
1489 free(cfg, M_DEVBUF);
1490
1491 /* memory for read config */
1492 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1493 memset(cfg, 0, size);
1494 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1495 goto freeme;
1496
1497 ar = cfg->mfc_array;
1498
1499 /* calculate offset to ld structure */
1500 ld = (struct mfi_ld_cfg *)(
1501 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1502 cfg->mfc_array_size * cfg->mfc_no_array);
1503
1504 vol = bd->bd_volid;
1505
1506 if (vol >= cfg->mfc_no_ld) {
1507 /* do hotspares */
1508 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1509 goto freeme;
1510 }
1511
1512 /* find corresponding array for ld */
1513 for (i = 0, arr = 0; i < vol; i++)
1514 arr += ld[i].mlc_parm.mpa_span_depth;
1515
1516 /* offset disk into pd list */
1517 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1518
1519 /* offset array index into the next spans */
1520 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1521
1522 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1523 switch (ar[arr].pd[disk].mar_pd_state){
1524 case MFI_PD_UNCONFIG_GOOD:
1525 bd->bd_status = BIOC_SDUNUSED;
1526 break;
1527
1528 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1529 bd->bd_status = BIOC_SDHOTSPARE;
1530 break;
1531
1532 case MFI_PD_OFFLINE:
1533 bd->bd_status = BIOC_SDOFFLINE;
1534 break;
1535
1536 case MFI_PD_FAILED:
1537 bd->bd_status = BIOC_SDFAILED;
1538 break;
1539
1540 case MFI_PD_REBUILD:
1541 bd->bd_status = BIOC_SDREBUILD;
1542 break;
1543
1544 case MFI_PD_ONLINE:
1545 bd->bd_status = BIOC_SDONLINE;
1546 break;
1547
1548 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1549 default:
1550 bd->bd_status = BIOC_SDINVALID;
1551 break;
1552
1553 }
1554
1555 /* get the remaining fields */
1556 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1557 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1558 sizeof *pd, pd, mbox))
1559 goto freeme;
1560
1561 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1562
1563 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1564 bd->bd_channel = pd->mpd_enc_idx;
1565
1566 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1567 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1568 vend[sizeof vend - 1] = '\0';
1569 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1570
1571 /* XXX find a way to retrieve serial nr from drive */
1572 /* XXX find a way to get bd_procdev */
1573
1574 rv = 0;
1575 freeme:
1576 free(pd, M_DEVBUF);
1577 free(cfg, M_DEVBUF);
1578
1579 return (rv);
1580 }
1581
1582 int
1583 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1584 {
1585 uint32_t opc, dir = MFI_DATA_NONE;
1586 int rv = 0;
1587 int8_t ret;
1588
1589 switch(ba->ba_opcode) {
1590 case BIOC_SADISABLE:
1591 opc = MR_DCMD_SPEAKER_DISABLE;
1592 break;
1593
1594 case BIOC_SAENABLE:
1595 opc = MR_DCMD_SPEAKER_ENABLE;
1596 break;
1597
1598 case BIOC_SASILENCE:
1599 opc = MR_DCMD_SPEAKER_SILENCE;
1600 break;
1601
1602 case BIOC_GASTATUS:
1603 opc = MR_DCMD_SPEAKER_GET;
1604 dir = MFI_DATA_IN;
1605 break;
1606
1607 case BIOC_SATEST:
1608 opc = MR_DCMD_SPEAKER_TEST;
1609 break;
1610
1611 default:
1612 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1613 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1614 return (EINVAL);
1615 }
1616
1617 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1618 rv = EINVAL;
1619 else
1620 if (ba->ba_opcode == BIOC_GASTATUS)
1621 ba->ba_status = ret;
1622 else
1623 ba->ba_status = 0;
1624
1625 return (rv);
1626 }
1627
1628 int
1629 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1630 {
1631 int i, found, rv = EINVAL;
1632 uint8_t mbox[MFI_MBOX_SIZE];
1633 uint32_t cmd;
1634 struct mfi_pd_list *pd;
1635
1636 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1637 bb->bb_status);
1638
1639 /* channel 0 means not in an enclosure so can't be blinked */
1640 if (bb->bb_channel == 0)
1641 return (EINVAL);
1642
1643 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1644
1645 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1646 MFI_PD_LIST_SIZE, pd, NULL))
1647 goto done;
1648
1649 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1650 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1651 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1652 found = 1;
1653 break;
1654 }
1655
1656 if (!found)
1657 goto done;
1658
1659 memset(mbox, 0, sizeof mbox);
1660
1661 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1662
1663 switch (bb->bb_status) {
1664 case BIOC_SBUNBLINK:
1665 cmd = MR_DCMD_PD_UNBLINK;
1666 break;
1667
1668 case BIOC_SBBLINK:
1669 cmd = MR_DCMD_PD_BLINK;
1670 break;
1671
1672 case BIOC_SBALARM:
1673 default:
1674 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1675 "opcode %x\n", DEVNAME(sc), bb->bb_status);
1676 goto done;
1677 }
1678
1679
1680 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1681 goto done;
1682
1683 rv = 0;
1684 done:
1685 free(pd, M_DEVBUF);
1686 return (rv);
1687 }
1688
1689 int
1690 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1691 {
1692 struct mfi_pd_list *pd;
1693 int i, found, rv = EINVAL;
1694 uint8_t mbox[MFI_MBOX_SIZE];
1695 uint32_t cmd;
1696
1697 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
1698 bs->bs_status);
1699
1700 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
1701
1702 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1703 MFI_PD_LIST_SIZE, pd, NULL))
1704 goto done;
1705
1706 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1707 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
1708 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
1709 found = 1;
1710 break;
1711 }
1712
1713 if (!found)
1714 goto done;
1715
1716 memset(mbox, 0, sizeof mbox);
1717
1718 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;;
1719
1720 switch (bs->bs_status) {
1721 case BIOC_SSONLINE:
1722 mbox[2] = MFI_PD_ONLINE;
1723 cmd = MD_DCMD_PD_SET_STATE;
1724 break;
1725
1726 case BIOC_SSOFFLINE:
1727 mbox[2] = MFI_PD_OFFLINE;
1728 cmd = MD_DCMD_PD_SET_STATE;
1729 break;
1730
1731 case BIOC_SSHOTSPARE:
1732 mbox[2] = MFI_PD_HOTSPARE;
1733 cmd = MD_DCMD_PD_SET_STATE;
1734 break;
1735 /*
1736 case BIOC_SSREBUILD:
1737 cmd = MD_DCMD_PD_REBUILD;
1738 break;
1739 */
1740 default:
1741 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
1742 "opcode %x\n", DEVNAME(sc), bs->bs_status);
1743 goto done;
1744 }
1745
1746
1747 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox))
1748 goto done;
1749
1750 rv = 0;
1751 done:
1752 free(pd, M_DEVBUF);
1753 return (rv);
1754 }
1755
1756 int
1757 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
1758 {
1759 struct mfi_conf *cfg;
1760 struct mfi_hotspare *hs;
1761 struct mfi_pd_details *pd;
1762 struct bioc_disk *sdhs;
1763 struct bioc_vol *vdhs;
1764 struct scsi_inquiry_data *inqbuf;
1765 char vend[8+16+4+1];
1766 int i, rv = EINVAL;
1767 uint32_t size;
1768 uint8_t mbox[MFI_MBOX_SIZE];
1769
1770 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
1771
1772 if (!bio_hs)
1773 return (EINVAL);
1774
1775 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1776
1777 /* send single element command to retrieve size for full structure */
1778 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
1779 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
1780 goto freeme;
1781
1782 size = cfg->mfc_size;
1783 free(cfg, M_DEVBUF);
1784
1785 /* memory for read config */
1786 cfg = malloc(size, M_DEVBUF, M_WAITOK);
1787 memset(cfg, 0, size);
1788 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
1789 goto freeme;
1790
1791 /* calculate offset to hs structure */
1792 hs = (struct mfi_hotspare *)(
1793 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1794 cfg->mfc_array_size * cfg->mfc_no_array +
1795 cfg->mfc_ld_size * cfg->mfc_no_ld);
1796
1797 if (volid < cfg->mfc_no_ld)
1798 goto freeme; /* not a hotspare */
1799
1800 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
1801 goto freeme; /* not a hotspare */
1802
1803 /* offset into hotspare structure */
1804 i = volid - cfg->mfc_no_ld;
1805
1806 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
1807 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
1808 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
1809
1810 /* get pd fields */
1811 memset(mbox, 0, sizeof mbox);
1812 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
1813 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1814 sizeof *pd, pd, mbox)) {
1815 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
1816 DEVNAME(sc));
1817 goto freeme;
1818 }
1819
1820 switch (type) {
1821 case MFI_MGMT_VD:
1822 vdhs = bio_hs;
1823 vdhs->bv_status = BIOC_SVONLINE;
1824 vdhs->bv_size = pd->mpd_size / 2; /* XXX why? / 2 */
1825 vdhs->bv_level = -1; /* hotspare */
1826 vdhs->bv_nodisk = 1;
1827 break;
1828
1829 case MFI_MGMT_SD:
1830 sdhs = bio_hs;
1831 sdhs->bd_status = BIOC_SDHOTSPARE;
1832 sdhs->bd_size = pd->mpd_size / 2; /* XXX why? / 2 */
1833 sdhs->bd_channel = pd->mpd_enc_idx;
1834 sdhs->bd_target = pd->mpd_enc_slot;
1835 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1836 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
1837 vend[sizeof vend - 1] = '\0';
1838 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
1839 break;
1840
1841 default:
1842 goto freeme;
1843 }
1844
1845 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
1846 rv = 0;
1847 freeme:
1848 free(pd, M_DEVBUF);
1849 free(cfg, M_DEVBUF);
1850
1851 return (rv);
1852 }
1853
1854 int
1855 mfi_create_sensors(struct mfi_softc *sc)
1856 {
1857 struct device *dev;
1858 struct scsibus_softc *ssc;
1859 int i;
1860
1861 TAILQ_FOREACH(dev, &alldevs, dv_list) {
1862 if (dev->dv_parent != &sc->sc_dev)
1863 continue;
1864
1865 /* check if this is the scsibus for the logical disks */
1866 ssc = (struct scsibus_softc *)dev;
1867 if (ssc->adapter_link == &sc->sc_link)
1868 break;
1869 }
1870
1871 if (ssc == NULL)
1872 return (1);
1873
1874 sc->sc_sensors = malloc(sizeof(struct sensor) * sc->sc_ld_cnt,
1875 M_DEVBUF, M_WAITOK);
1876 if (sc->sc_sensors == NULL)
1877 return (1);
1878 bzero(sc->sc_sensors, sizeof(struct sensor) * sc->sc_ld_cnt);
1879
1880 for (i = 0; i < sc->sc_ld_cnt; i++) {
1881 if (ssc->sc_link[i][0] == NULL)
1882 goto bad;
1883
1884 dev = ssc->sc_link[i][0]->device_softc;
1885
1886 sc->sc_sensors[i].type = SENSOR_DRIVE;
1887 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1888
1889 strlcpy(sc->sc_sensors[i].device, DEVNAME(sc),
1890 sizeof(sc->sc_sensors[i].device));
1891 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
1892 sizeof(sc->sc_sensors[i].desc));
1893
1894 sensor_add(&sc->sc_sensors[i]);
1895 }
1896
1897 if (sensor_task_register(sc, mfi_refresh_sensors, 10) != 0)
1898 goto bad;
1899
1900 return (0);
1901
1902 bad:
1903 while (--i >= 0)
1904 sensor_del(&sc->sc_sensors[i]);
1905 free(sc->sc_sensors, M_DEVBUF);
1906
1907 return (1);
1908 }
1909
1910 void
1911 mfi_refresh_sensors(void *arg)
1912 {
1913 struct mfi_softc *sc = arg;
1914 int i;
1915 struct bioc_vol bv;
1916
1917
1918 for (i = 0; i < sc->sc_ld_cnt; i++) {
1919 bzero(&bv, sizeof(bv));
1920 bv.bv_volid = i;
1921 if (mfi_ioctl_vol(sc, &bv))
1922 return;
1923
1924 switch(bv.bv_status) {
1925 case BIOC_SVOFFLINE:
1926 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1927 sc->sc_sensors[i].status = SENSOR_S_CRIT;
1928 break;
1929
1930 case BIOC_SVDEGRADED:
1931 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1932 sc->sc_sensors[i].status = SENSOR_S_WARN;
1933 break;
1934
1935 case BIOC_SVSCRUB:
1936 case BIOC_SVONLINE:
1937 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1938 sc->sc_sensors[i].status = SENSOR_S_OK;
1939 break;
1940
1941 case BIOC_SVINVALID:
1942 /* FALLTRHOUGH */
1943 default:
1944 sc->sc_sensors[i].value = 0; /* unknown */
1945 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1946 }
1947
1948 }
1949 }
1950 #endif /* NBIO > 0 */
1951