mfii.c revision 1.30 1 /* $NetBSD: mfii.c,v 1.30 2023/09/23 13:01:16 christos Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.30 2023/09/23 13:01:16 christos Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 #define MFII_MAX_LD_EXT 256
210
211 struct mfii_ld_list_ext {
212 uint32_t mll_no_ld;
213 uint32_t mll_res;
214 struct {
215 struct mfi_ld mll_ld;
216 uint8_t mll_state; /* states are the same as MFI_ */
217 uint8_t mll_res2;
218 uint8_t mll_res3;
219 uint8_t mll_res4;
220 uint64_t mll_size;
221 } mll_list[MFII_MAX_LD_EXT];
222 } __packed;
223
224 struct mfii_dmamem {
225 bus_dmamap_t mdm_map;
226 bus_dma_segment_t mdm_seg;
227 size_t mdm_size;
228 void * mdm_kva;
229 };
230 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
231 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
232 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
233 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
234
235 struct mfii_softc;
236
237 typedef enum mfii_direction {
238 MFII_DATA_NONE = 0,
239 MFII_DATA_IN,
240 MFII_DATA_OUT
241 } mfii_direction_t;
242
243 struct mfii_ccb {
244 struct mfii_softc *ccb_sc;
245 void *ccb_request;
246 u_int64_t ccb_request_dva;
247 bus_addr_t ccb_request_offset;
248
249 void *ccb_mfi;
250 u_int64_t ccb_mfi_dva;
251 bus_addr_t ccb_mfi_offset;
252
253 struct mfi_sense *ccb_sense;
254 u_int64_t ccb_sense_dva;
255 bus_addr_t ccb_sense_offset;
256
257 struct mfii_sge *ccb_sgl;
258 u_int64_t ccb_sgl_dva;
259 bus_addr_t ccb_sgl_offset;
260 u_int ccb_sgl_len;
261
262 struct mfii_request_descr ccb_req;
263
264 bus_dmamap_t ccb_dmamap64;
265 bus_dmamap_t ccb_dmamap32;
266 bool ccb_dma64;
267
268 /* data for sgl */
269 void *ccb_data;
270 size_t ccb_len;
271
272 mfii_direction_t ccb_direction;
273
274 void *ccb_cookie;
275 kmutex_t ccb_mtx;
276 kcondvar_t ccb_cv;
277 void (*ccb_done)(struct mfii_softc *,
278 struct mfii_ccb *);
279
280 u_int32_t ccb_flags;
281 #define MFI_CCB_F_ERR (1<<0)
282 u_int ccb_smid;
283 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
284 };
285 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
286
287 struct mfii_iop {
288 int bar;
289 int num_sge_loc;
290 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
291 #define MFII_IOP_NUM_SGE_LOC_35 1
292 u_int16_t ldio_ctx_reg_lock_flags;
293 u_int8_t ldio_req_type;
294 u_int8_t ldio_ctx_type_nseg;
295 u_int8_t sge_flag_chain;
296 u_int8_t sge_flag_eol;
297 u_int8_t iop_flag;
298 #define MFII_IOP_QUIRK_REGREAD 0x01
299 #define MFII_IOP_HAS_32BITDESC_BIT 0x02
300 };
301
302 struct mfii_softc {
303 device_t sc_dev;
304 struct scsipi_channel sc_chan;
305 struct scsipi_adapter sc_adapt;
306
307 const struct mfii_iop *sc_iop;
308 u_int sc_iop_flag;
309 #define MFII_IOP_DESC_32BIT 0x01
310
311 pci_chipset_tag_t sc_pc;
312 pcitag_t sc_tag;
313
314 bus_space_tag_t sc_iot;
315 bus_space_handle_t sc_ioh;
316 bus_size_t sc_ios;
317 bus_dma_tag_t sc_dmat;
318 bus_dma_tag_t sc_dmat64;
319 bool sc_64bit_dma;
320
321 void *sc_ih;
322
323 kmutex_t sc_ccb_mtx;
324 kmutex_t sc_post_mtx;
325
326 u_int sc_max_fw_cmds;
327 u_int sc_max_cmds;
328 u_int sc_max_sgl;
329
330 u_int sc_reply_postq_depth;
331 u_int sc_reply_postq_index;
332 kmutex_t sc_reply_postq_mtx;
333 struct mfii_dmamem *sc_reply_postq;
334
335 struct mfii_dmamem *sc_requests;
336 struct mfii_dmamem *sc_mfi;
337 struct mfii_dmamem *sc_sense;
338 struct mfii_dmamem *sc_sgl;
339
340 struct mfii_ccb *sc_ccb;
341 struct mfii_ccb_list sc_ccb_freeq;
342
343 struct mfii_ccb *sc_aen_ccb;
344 struct workqueue *sc_aen_wq;
345 struct work sc_aen_work;
346
347 kmutex_t sc_abort_mtx;
348 struct mfii_ccb_list sc_abort_list;
349 struct workqueue *sc_abort_wq;
350 struct work sc_abort_work;
351
352 /* save some useful information for logical drives that is missing
353 * in sc_ld_list
354 */
355 struct {
356 bool ld_present;
357 char ld_dev[16]; /* device name sd? */
358 int ld_target_id;
359 } sc_ld[MFII_MAX_LD_EXT];
360 int sc_target_lds[MFII_MAX_LD_EXT];
361 bool sc_max256vd;
362
363 /* bio */
364 struct mfi_conf *sc_cfg;
365 struct mfi_ctrl_info sc_info;
366 struct mfii_ld_list_ext sc_ld_list;
367 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
368 int sc_no_pd; /* used physical disks */
369 int sc_ld_sz; /* sizeof sc_ld_details */
370
371 /* mgmt lock */
372 kmutex_t sc_lock;
373 bool sc_running;
374
375 /* sensors */
376 struct sysmon_envsys *sc_sme;
377 envsys_data_t *sc_sensors;
378 bool sc_bbuok;
379
380 device_t sc_child;
381 };
382
383 // #define MFII_DEBUG
384 #ifdef MFII_DEBUG
385 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
386 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
387 #define MFII_D_CMD 0x0001
388 #define MFII_D_INTR 0x0002
389 #define MFII_D_MISC 0x0004
390 #define MFII_D_DMA 0x0008
391 #define MFII_D_IOCTL 0x0010
392 #define MFII_D_RW 0x0020
393 #define MFII_D_MEM 0x0040
394 #define MFII_D_CCB 0x0080
395 uint32_t mfii_debug = 0
396 /* | MFII_D_CMD */
397 /* | MFII_D_INTR */
398 | MFII_D_MISC
399 /* | MFII_D_DMA */
400 /* | MFII_D_IOCTL */
401 /* | MFII_D_RW */
402 /* | MFII_D_MEM */
403 /* | MFII_D_CCB */
404 ;
405 #else
406 #define DPRINTF(x...)
407 #define DNPRINTF(n,x...)
408 #endif
409
410 static int mfii_match(device_t, cfdata_t, void *);
411 static void mfii_attach(device_t, device_t, void *);
412 static int mfii_detach(device_t, int);
413 static int mfii_rescan(device_t, const char *, const int *);
414 static void mfii_childdetached(device_t, device_t);
415 static bool mfii_suspend(device_t, const pmf_qual_t *);
416 static bool mfii_resume(device_t, const pmf_qual_t *);
417 static bool mfii_shutdown(device_t, int);
418
419
420 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
421 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
422 mfii_childdetached, DVF_DETACH_SHUTDOWN);
423
424 static void mfii_scsipi_request(struct scsipi_channel *,
425 scsipi_adapter_req_t, void *);
426 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
427
428 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
429
430 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
431 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
432
433 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
434 static void mfii_dmamem_free(struct mfii_softc *,
435 struct mfii_dmamem *);
436
437 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
438 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
439 static int mfii_init_ccb(struct mfii_softc *);
440 static void mfii_scrub_ccb(struct mfii_ccb *);
441
442 static int mfii_transition_firmware(struct mfii_softc *);
443 static int mfii_initialise_firmware(struct mfii_softc *);
444 static int mfii_get_info(struct mfii_softc *);
445
446 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
447 static void mfii_start64(struct mfii_softc *, struct mfii_ccb *);
448 static void mfii_start_common(struct mfii_softc *,
449 struct mfii_ccb *, bool);
450 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
451 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
452 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
453 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
454 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
455 static int mfii_my_intr(struct mfii_softc *);
456 static int mfii_intr(void *);
457 static void mfii_postq(struct mfii_softc *);
458
459 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
460 void *, int);
461 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
462 void *, int);
463
464 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
465
466 static int mfii_mgmt(struct mfii_softc *, uint32_t,
467 const union mfi_mbox *, void *, size_t,
468 mfii_direction_t, bool);
469 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
470 uint32_t, const union mfi_mbox *, void *, size_t,
471 mfii_direction_t, bool);
472 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
473
474 static int mfii_scsi_cmd_io(struct mfii_softc *,
475 struct mfii_ccb *, struct scsipi_xfer *);
476 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
477 struct mfii_ccb *, struct scsipi_xfer *);
478 static void mfii_scsi_cmd_tmo(void *);
479
480 static void mfii_abort_task(struct work *, void *);
481 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
482 uint16_t, uint16_t, uint8_t, uint32_t);
483 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
484 struct mfii_ccb *);
485
486 static int mfii_aen_register(struct mfii_softc *);
487 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
488 struct mfii_dmamem *, uint32_t);
489 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
490 static void mfii_aen(struct work *, void *);
491 static void mfii_aen_unregister(struct mfii_softc *);
492
493 static void mfii_aen_pd_insert(struct mfii_softc *,
494 const struct mfi_evtarg_pd_address *);
495 static void mfii_aen_pd_remove(struct mfii_softc *,
496 const struct mfi_evtarg_pd_address *);
497 static void mfii_aen_pd_state_change(struct mfii_softc *,
498 const struct mfi_evtarg_pd_state *);
499 static void mfii_aen_ld_update(struct mfii_softc *);
500
501 #if NBIO > 0
502 static int mfii_ioctl(device_t, u_long, void *);
503 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
504 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
505 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
506 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
507 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
508 static int mfii_ioctl_setstate(struct mfii_softc *,
509 struct bioc_setstate *);
510 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
511 static int mfii_bio_getitall(struct mfii_softc *);
512 #endif /* NBIO > 0 */
513
514 #if 0
515 static const char *mfi_bbu_indicators[] = {
516 "pack missing",
517 "voltage low",
518 "temp high",
519 "charge active",
520 "discharge active",
521 "learn cycle req'd",
522 "learn cycle active",
523 "learn cycle failed",
524 "learn cycle timeout",
525 "I2C errors",
526 "replace pack",
527 "low capacity",
528 "periodic learn req'd"
529 };
530 #endif
531
532 #define MFI_BBU_SENSORS 4
533
534 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
535 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
536 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
537 static int mfii_create_sensors(struct mfii_softc *);
538 static int mfii_destroy_sensors(struct mfii_softc *);
539 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
540 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
541
542 /*
543 * mfii boards support asynchronous (and non-polled) completion of
544 * dcmds by proxying them through a passthru mpii command that points
545 * at a dcmd frame. since the passthru command is submitted like
546 * the scsi commands using an SMID in the request descriptor,
547 * ccb_request memory * must contain the passthru command because
548 * that is what the SMID refers to. this means ccb_request cannot
549 * contain the dcmd. rather than allocating separate dma memory to
550 * hold the dcmd, we reuse the sense memory buffer for it.
551 */
552
553 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
554
555 static inline void
556 mfii_dcmd_scrub(struct mfii_ccb *ccb)
557 {
558 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
559 }
560
561 static inline struct mfi_dcmd_frame *
562 mfii_dcmd_frame(struct mfii_ccb *ccb)
563 {
564 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
565 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
566 }
567
568 static inline void
569 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
570 {
571 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
572 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
573 }
574
575 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
576
577 static const struct mfii_iop mfii_iop_thunderbolt = {
578 MFII_BAR,
579 MFII_IOP_NUM_SGE_LOC_ORIG,
580 0,
581 MFII_REQ_TYPE_LDIO,
582 0,
583 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
584 0,
585 0
586 };
587
588 /*
589 * a lot of these values depend on us not implementing fastpath yet.
590 */
591 static const struct mfii_iop mfii_iop_25 = {
592 MFII_BAR,
593 MFII_IOP_NUM_SGE_LOC_ORIG,
594 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
595 MFII_REQ_TYPE_NO_LOCK,
596 MFII_RAID_CTX_TYPE_CUDA | 0x1,
597 MFII_SGE_CHAIN_ELEMENT,
598 MFII_SGE_END_OF_LIST,
599 0
600 };
601
602 static const struct mfii_iop mfii_iop_35 = {
603 MFII_BAR_35,
604 MFII_IOP_NUM_SGE_LOC_35,
605 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
606 MFII_REQ_TYPE_NO_LOCK,
607 MFII_RAID_CTX_TYPE_CUDA | 0x1,
608 MFII_SGE_CHAIN_ELEMENT,
609 MFII_SGE_END_OF_LIST,
610 0
611 };
612
613 static const struct mfii_iop mfii_iop_aero = {
614 MFII_BAR_35,
615 MFII_IOP_NUM_SGE_LOC_35,
616 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
617 MFII_REQ_TYPE_NO_LOCK,
618 MFII_RAID_CTX_TYPE_CUDA | 0x1,
619 MFII_SGE_CHAIN_ELEMENT,
620 MFII_SGE_END_OF_LIST,
621 MFII_IOP_QUIRK_REGREAD | MFII_IOP_HAS_32BITDESC_BIT
622 };
623
624 struct mfii_device {
625 pcireg_t mpd_vendor;
626 pcireg_t mpd_product;
627 const struct mfii_iop *mpd_iop;
628 };
629
630 static const struct mfii_device mfii_devices[] = {
631 /* Fusion */
632 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
633 &mfii_iop_thunderbolt },
634 /* Fury */
635 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
636 &mfii_iop_25 },
637 /* Invader */
638 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
639 &mfii_iop_25 },
640 /* Intruder */
641 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
642 &mfii_iop_25 },
643 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
644 &mfii_iop_25 },
645 /* Cutlass */
646 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
647 &mfii_iop_25 },
648 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
649 &mfii_iop_25 },
650 /* Crusader */
651 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
652 &mfii_iop_35 },
653 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
654 &mfii_iop_35 },
655 /* Ventura */
656 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
657 &mfii_iop_35 },
658 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
659 &mfii_iop_35 },
660 /* Tomcat */
661 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
662 &mfii_iop_35 },
663 /* Harpoon */
664 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
665 &mfii_iop_35 },
666 /* Aero */
667 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
668 &mfii_iop_aero },
669 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_3,
670 &mfii_iop_aero },
671 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
672 &mfii_iop_aero },
673 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_3,
674 &mfii_iop_aero }
675 };
676
677 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
678
679 static const struct mfii_iop *
680 mfii_find_iop(struct pci_attach_args *pa)
681 {
682 const struct mfii_device *mpd;
683 int i;
684
685 for (i = 0; i < __arraycount(mfii_devices); i++) {
686 mpd = &mfii_devices[i];
687
688 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
689 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
690 return (mpd->mpd_iop);
691 }
692
693 return (NULL);
694 }
695
696 static int
697 mfii_match(device_t parent, cfdata_t match, void *aux)
698 {
699 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
700 }
701
702 static void
703 mfii_attach(device_t parent, device_t self, void *aux)
704 {
705 struct mfii_softc *sc = device_private(self);
706 struct pci_attach_args *pa = aux;
707 pcireg_t memtype;
708 pci_intr_handle_t *ihp;
709 char intrbuf[PCI_INTRSTR_LEN];
710 const char *intrstr;
711 u_int32_t status, scpad2, scpad3;
712 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
713 struct scsipi_adapter *adapt = &sc->sc_adapt;
714 struct scsipi_channel *chan = &sc->sc_chan;
715 union mfi_mbox mbox;
716
717 /* init sc */
718 sc->sc_dev = self;
719 sc->sc_iop = mfii_find_iop(aux);
720 sc->sc_dmat = pa->pa_dmat;
721 if (pci_dma64_available(pa)) {
722 sc->sc_dmat64 = pa->pa_dmat64;
723 sc->sc_64bit_dma = 1;
724 } else {
725 sc->sc_dmat64 = pa->pa_dmat;
726 sc->sc_64bit_dma = 0;
727 }
728 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
729 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
730 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
731 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
732
733 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
734
735 sc->sc_aen_ccb = NULL;
736 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
737 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
738 PRI_BIO, IPL_BIO, WQ_MPSAFE);
739
740 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
741 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
742 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
743
744 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
745 SIMPLEQ_INIT(&sc->sc_abort_list);
746
747 /* wire up the bus shizz */
748 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
749 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
750 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
751 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
752 aprint_error(": unable to map registers\n");
753 return;
754 }
755
756 /* disable interrupts */
757 mfii_write(sc, MFI_OMSK, 0xffffffff);
758
759 if (pci_intr_alloc(pa, &ihp, NULL, 0)) {
760 aprint_error(": unable to map interrupt\n");
761 goto pci_unmap;
762 }
763 intrstr = pci_intr_string(pa->pa_pc, ihp[0], intrbuf, sizeof(intrbuf));
764 pci_intr_setattr(pa->pa_pc, &ihp[0], PCI_INTR_MPSAFE, true);
765
766 /* lets get started */
767 if (mfii_transition_firmware(sc))
768 goto pci_unmap;
769 sc->sc_running = true;
770
771 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
772 scpad3 = mfii_read(sc, MFII_OSP3);
773 status = mfii_fw_state(sc);
774 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
775 if (sc->sc_max_fw_cmds == 0)
776 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
777 /*
778 * reduce max_cmds by 1 to ensure that the reply queue depth does not
779 * exceed FW supplied max_fw_cmds.
780 */
781 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
782
783 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
784 scpad2 = mfii_read(sc, MFII_OSP2);
785 chain_frame_sz =
786 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
787 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
788 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
789 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
790
791 nsge_in_io = (MFII_REQUEST_SIZE -
792 sizeof(struct mpii_msg_scsi_io) -
793 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
794 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
795
796 /* round down to nearest power of two */
797 sc->sc_max_sgl = 1;
798 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
799 sc->sc_max_sgl <<= 1;
800
801 /* Check for atomic(32bit) descriptor */
802 if (((sc->sc_iop->iop_flag & MFII_IOP_HAS_32BITDESC_BIT) != 0) &&
803 ((scpad2 & MFI_STATE_ATOMIC_DESCRIPTOR) != 0))
804 sc->sc_iop_flag |= MFII_IOP_DESC_32BIT;
805
806 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
807 DEVNAME(sc), status, scpad2, scpad3);
808 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
809 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
810 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
811 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
812 sc->sc_max_sgl);
813
814 /* sense memory */
815 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
816 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
817 if (sc->sc_sense == NULL) {
818 aprint_error(": unable to allocate sense memory\n");
819 goto pci_unmap;
820 }
821
822 /* reply post queue */
823 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
824
825 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
826 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
827 if (sc->sc_reply_postq == NULL)
828 goto free_sense;
829
830 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
831 MFII_DMA_LEN(sc->sc_reply_postq));
832
833 /* MPII request frame array */
834 sc->sc_requests = mfii_dmamem_alloc(sc,
835 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
836 if (sc->sc_requests == NULL)
837 goto free_reply_postq;
838
839 /* MFI command frame array */
840 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
841 if (sc->sc_mfi == NULL)
842 goto free_requests;
843
844 /* MPII SGL array */
845 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
846 sizeof(struct mfii_sge) * sc->sc_max_sgl);
847 if (sc->sc_sgl == NULL)
848 goto free_mfi;
849
850 if (mfii_init_ccb(sc) != 0) {
851 aprint_error(": could not init ccb list\n");
852 goto free_sgl;
853 }
854
855 /* kickstart firmware with all addresses and pointers */
856 if (mfii_initialise_firmware(sc) != 0) {
857 aprint_error(": could not initialize firmware\n");
858 goto free_sgl;
859 }
860
861 mutex_enter(&sc->sc_lock);
862 if (mfii_get_info(sc) != 0) {
863 mutex_exit(&sc->sc_lock);
864 aprint_error(": could not retrieve controller information\n");
865 goto free_sgl;
866 }
867 mutex_exit(&sc->sc_lock);
868
869 aprint_normal(": \"%s\", firmware %s",
870 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
871 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
872 aprint_normal(", %uMB cache",
873 le16toh(sc->sc_info.mci_memory_size));
874 }
875 aprint_normal("\n");
876 aprint_naive("\n");
877
878 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ihp[0], IPL_BIO,
879 mfii_intr, sc, DEVNAME(sc));
880 if (sc->sc_ih == NULL) {
881 aprint_error_dev(self, "can't establish interrupt");
882 if (intrstr)
883 aprint_error(" at %s", intrstr);
884 aprint_error("\n");
885 goto free_sgl;
886 }
887 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
888
889 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
890 sc->sc_ld[i].ld_present = 1;
891
892 sc->sc_max256vd =
893 (sc->sc_info.mci_adapter_ops3 & MFI_INFO_AOPS3_SUPP_MAX_EXT_LDS) ?
894 true : false;
895
896 if (sc->sc_max256vd)
897 aprint_verbose_dev(self, "Max 256 VD support\n");
898
899 memset(adapt, 0, sizeof(*adapt));
900 adapt->adapt_dev = sc->sc_dev;
901 adapt->adapt_nchannels = 1;
902 /* keep a few commands for management */
903 if (sc->sc_max_cmds > 4)
904 adapt->adapt_openings = sc->sc_max_cmds - 4;
905 else
906 adapt->adapt_openings = sc->sc_max_cmds;
907 adapt->adapt_max_periph = adapt->adapt_openings;
908 adapt->adapt_request = mfii_scsipi_request;
909 adapt->adapt_minphys = minphys;
910 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
911
912 memset(chan, 0, sizeof(*chan));
913 chan->chan_adapter = adapt;
914 chan->chan_bustype = &scsi_sas_bustype;
915 chan->chan_channel = 0;
916 chan->chan_flags = 0;
917 chan->chan_nluns = 8;
918 chan->chan_ntargets = sc->sc_info.mci_max_lds;
919 chan->chan_id = sc->sc_info.mci_max_lds;
920
921 mfii_rescan(sc->sc_dev, NULL, NULL);
922
923 if (mfii_aen_register(sc) != 0) {
924 /* error printed by mfii_aen_register */
925 goto intr_disestablish;
926 }
927
928 memset(&mbox, 0, sizeof(mbox));
929 if (sc->sc_max256vd)
930 mbox.b[0] = 1;
931 mutex_enter(&sc->sc_lock);
932 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
933 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
934 mutex_exit(&sc->sc_lock);
935 aprint_error_dev(self,
936 "getting list of logical disks failed\n");
937 goto intr_disestablish;
938 }
939 mutex_exit(&sc->sc_lock);
940 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
941 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
942 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
943 sc->sc_target_lds[target] = i;
944 sc->sc_ld[i].ld_target_id = target;
945 }
946
947 /* enable interrupts */
948 mfii_write(sc, MFI_OSTS, 0xffffffff);
949 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
950
951 #if NBIO > 0
952 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
953 panic("%s: controller registration failed", DEVNAME(sc));
954 #endif /* NBIO > 0 */
955
956 if (mfii_create_sensors(sc) != 0)
957 aprint_error_dev(self, "unable to create sensors\n");
958
959 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
960 mfii_shutdown))
961 aprint_error_dev(self, "couldn't establish power handler\n");
962 return;
963 intr_disestablish:
964 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
965 free_sgl:
966 mfii_dmamem_free(sc, sc->sc_sgl);
967 free_mfi:
968 mfii_dmamem_free(sc, sc->sc_mfi);
969 free_requests:
970 mfii_dmamem_free(sc, sc->sc_requests);
971 free_reply_postq:
972 mfii_dmamem_free(sc, sc->sc_reply_postq);
973 free_sense:
974 mfii_dmamem_free(sc, sc->sc_sense);
975 pci_unmap:
976 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
977 }
978
979 #if 0
980 struct srp_gc mfii_dev_handles_gc =
981 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
982
983 static inline uint16_t
984 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
985 {
986 struct srp_ref sr;
987 uint16_t *map, handle;
988
989 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
990 handle = map[target];
991 srp_leave(&sr);
992
993 return (handle);
994 }
995
996 static int
997 mfii_dev_handles_update(struct mfii_softc *sc)
998 {
999 struct mfii_ld_map *lm;
1000 uint16_t *dev_handles = NULL;
1001 int i;
1002 int rv = 0;
1003
1004 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
1005
1006 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
1007 MFII_DATA_IN, false);
1008
1009 if (rv != 0) {
1010 rv = EIO;
1011 goto free_lm;
1012 }
1013
1014 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
1015 M_DEVBUF, M_WAITOK);
1016
1017 for (i = 0; i < MFI_MAX_PD; i++)
1018 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
1019
1020 /* commit the updated info */
1021 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
1022 srp_update_locked(&mfii_dev_handles_gc,
1023 &sc->sc_pd->pd_dev_handles, dev_handles);
1024
1025 free_lm:
1026 free(lm, M_TEMP, sizeof(*lm));
1027
1028 return (rv);
1029 }
1030
1031 static void
1032 mfii_dev_handles_dtor(void *null, void *v)
1033 {
1034 uint16_t *dev_handles = v;
1035
1036 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
1037 }
1038 #endif /* 0 */
1039
1040 static int
1041 mfii_detach(device_t self, int flags)
1042 {
1043 struct mfii_softc *sc = device_private(self);
1044 int error;
1045
1046 if (sc->sc_ih == NULL)
1047 return (0);
1048
1049 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
1050 return error;
1051
1052 mfii_destroy_sensors(sc);
1053 #if NBIO > 0
1054 bio_unregister(sc->sc_dev);
1055 #endif
1056 mfii_shutdown(sc->sc_dev, 0);
1057 mfii_write(sc, MFI_OMSK, 0xffffffff);
1058
1059 mfii_aen_unregister(sc);
1060 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1061 mfii_dmamem_free(sc, sc->sc_sgl);
1062 mfii_dmamem_free(sc, sc->sc_mfi);
1063 mfii_dmamem_free(sc, sc->sc_requests);
1064 mfii_dmamem_free(sc, sc->sc_reply_postq);
1065 mfii_dmamem_free(sc, sc->sc_sense);
1066 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1067
1068 return (0);
1069 }
1070
1071 static int
1072 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1073 {
1074 struct mfii_softc *sc = device_private(self);
1075
1076 if (sc->sc_child != NULL)
1077 return 0;
1078
1079 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
1080 CFARGS_NONE);
1081 return 0;
1082 }
1083
1084 static void
1085 mfii_childdetached(device_t self, device_t child)
1086 {
1087 struct mfii_softc *sc = device_private(self);
1088
1089 KASSERT(self == sc->sc_dev);
1090 KASSERT(child == sc->sc_child);
1091
1092 if (child == sc->sc_child)
1093 sc->sc_child = NULL;
1094 }
1095
1096 static bool
1097 mfii_suspend(device_t dev, const pmf_qual_t *q)
1098 {
1099 /* XXX to be implemented */
1100 return false;
1101 }
1102
1103 static bool
1104 mfii_resume(device_t dev, const pmf_qual_t *q)
1105 {
1106 /* XXX to be implemented */
1107 return false;
1108 }
1109
1110 static bool
1111 mfii_shutdown(device_t dev, int how)
1112 {
1113 struct mfii_softc *sc = device_private(dev);
1114 struct mfii_ccb *ccb;
1115 union mfi_mbox mbox;
1116 bool rv = true;
1117
1118 memset(&mbox, 0, sizeof(mbox));
1119
1120 mutex_enter(&sc->sc_lock);
1121 DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1122 ccb = mfii_get_ccb(sc);
1123 if (ccb == NULL)
1124 return false;
1125 mutex_enter(&sc->sc_ccb_mtx);
1126 if (sc->sc_running) {
1127 sc->sc_running = 0; /* prevent new commands */
1128 mutex_exit(&sc->sc_ccb_mtx);
1129 #if 0 /* XXX why does this hang ? */
1130 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1131 mfii_scrub_ccb(ccb);
1132 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1133 NULL, 0, MFII_DATA_NONE, true)) {
1134 aprint_error_dev(dev,
1135 "shutdown: cache flush failed\n");
1136 rv = false;
1137 goto fail;
1138 }
1139 printf("ok1\n");
1140 #endif
1141 mbox.b[0] = 0;
1142 mfii_scrub_ccb(ccb);
1143 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1144 NULL, 0, MFII_DATA_NONE, true)) {
1145 aprint_error_dev(dev, "shutdown: "
1146 "firmware shutdown failed\n");
1147 rv = false;
1148 goto fail;
1149 }
1150 } else {
1151 mutex_exit(&sc->sc_ccb_mtx);
1152 }
1153 fail:
1154 mfii_put_ccb(sc, ccb);
1155 mutex_exit(&sc->sc_lock);
1156 return rv;
1157 }
1158
1159 /* Register read function without retry */
1160 static inline u_int32_t
1161 mfii_read_wor(struct mfii_softc *sc, bus_size_t r)
1162 {
1163 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1164 BUS_SPACE_BARRIER_READ);
1165 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1166 }
1167
1168 static u_int32_t
1169 mfii_read(struct mfii_softc *sc, bus_size_t r)
1170 {
1171 uint32_t rv;
1172 int i = 0;
1173
1174 if ((sc->sc_iop->iop_flag & MFII_IOP_QUIRK_REGREAD) != 0) {
1175 do {
1176 rv = mfii_read_wor(sc, r);
1177 i++;
1178 } while ((rv == 0) && (i < 3));
1179 } else
1180 rv = mfii_read_wor(sc, r);
1181
1182 return rv;
1183 }
1184
1185 static void
1186 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1187 {
1188 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1189 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1190 BUS_SPACE_BARRIER_WRITE);
1191 }
1192
1193 static struct mfii_dmamem *
1194 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1195 {
1196 struct mfii_dmamem *m;
1197 int nsegs;
1198
1199 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1200 m->mdm_size = size;
1201
1202 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1203 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1204 goto mdmfree;
1205
1206 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1207 &nsegs, BUS_DMA_NOWAIT) != 0)
1208 goto destroy;
1209
1210 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1211 BUS_DMA_NOWAIT) != 0)
1212 goto free;
1213
1214 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1215 BUS_DMA_NOWAIT) != 0)
1216 goto unmap;
1217
1218 memset(m->mdm_kva, 0, size);
1219 return (m);
1220
1221 unmap:
1222 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1223 free:
1224 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1225 destroy:
1226 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1227 mdmfree:
1228 free(m, M_DEVBUF);
1229
1230 return (NULL);
1231 }
1232
1233 static void
1234 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1235 {
1236 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1237 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1238 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1239 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1240 free(m, M_DEVBUF);
1241 }
1242
1243 static void
1244 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1245 {
1246 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1247 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1248 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1249
1250 io->function = MFII_FUNCTION_PASSTHRU_IO;
1251 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1252 io->chain_offset = io->sgl_offset0 / 4;
1253
1254 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1255 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1256 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1257
1258 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1259 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1260
1261 mfii_start(sc, ccb);
1262 }
1263
1264 static int
1265 mfii_aen_register(struct mfii_softc *sc)
1266 {
1267 struct mfi_evt_log_info mel;
1268 struct mfii_ccb *ccb;
1269 struct mfii_dmamem *mdm;
1270 int rv;
1271
1272 ccb = mfii_get_ccb(sc);
1273 if (ccb == NULL) {
1274 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1275 return (ENOMEM);
1276 }
1277
1278 memset(&mel, 0, sizeof(mel));
1279 mfii_scrub_ccb(ccb);
1280
1281 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1282 &mel, sizeof(mel), MFII_DATA_IN, true);
1283 if (rv != 0) {
1284 mfii_put_ccb(sc, ccb);
1285 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1286 return (EIO);
1287 }
1288
1289 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1290 if (mdm == NULL) {
1291 mfii_put_ccb(sc, ccb);
1292 aprint_error_dev(sc->sc_dev,
1293 "unable to allocate event data\n");
1294 return (ENOMEM);
1295 }
1296
1297 /* replay all the events from boot */
1298 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1299
1300 return (0);
1301 }
1302
1303 static void
1304 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1305 struct mfii_dmamem *mdm, uint32_t seq)
1306 {
1307 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1308 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1309 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1310 union mfi_evt_class_locale mec;
1311
1312 mfii_scrub_ccb(ccb);
1313 mfii_dcmd_scrub(ccb);
1314 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1315
1316 ccb->ccb_cookie = mdm;
1317 ccb->ccb_done = mfii_aen_done;
1318 sc->sc_aen_ccb = ccb;
1319
1320 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1321 mec.mec_members.reserved = 0;
1322 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1323
1324 hdr->mfh_cmd = MFI_CMD_DCMD;
1325 hdr->mfh_sg_count = 1;
1326 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1327 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1328 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1329 dcmd->mdf_mbox.w[0] = htole32(seq);
1330 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1331 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1332 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1333
1334 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1335 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1336
1337 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1338 mfii_dcmd_start(sc, ccb);
1339 }
1340
1341 static void
1342 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1343 {
1344 KASSERT(sc->sc_aen_ccb == ccb);
1345
1346 /*
1347 * defer to a thread with KERNEL_LOCK so we can run autoconf
1348 * We shouldn't have more than one AEN command pending at a time,
1349 * so no need to lock
1350 */
1351 if (sc->sc_running)
1352 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1353 }
1354
1355 static void
1356 mfii_aen(struct work *wk, void *arg)
1357 {
1358 struct mfii_softc *sc = arg;
1359 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1360 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1361 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1362
1363 mfii_dcmd_sync(sc, ccb,
1364 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1365 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1366 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1367
1368 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1369 le32toh(med->med_seq_num), le32toh(med->med_code),
1370 med->med_arg_type, med->med_description);
1371
1372 switch (le32toh(med->med_code)) {
1373 case MR_EVT_PD_INSERTED_EXT:
1374 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1375 break;
1376
1377 mfii_aen_pd_insert(sc, &med->args.pd_address);
1378 break;
1379 case MR_EVT_PD_REMOVED_EXT:
1380 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1381 break;
1382
1383 mfii_aen_pd_remove(sc, &med->args.pd_address);
1384 break;
1385
1386 case MR_EVT_PD_STATE_CHANGE:
1387 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1388 break;
1389
1390 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1391 break;
1392
1393 case MR_EVT_LD_CREATED:
1394 case MR_EVT_LD_DELETED:
1395 mfii_aen_ld_update(sc);
1396 break;
1397
1398 default:
1399 break;
1400 }
1401
1402 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1403 }
1404
1405 static void
1406 mfii_aen_pd_insert(struct mfii_softc *sc,
1407 const struct mfi_evtarg_pd_address *pd)
1408 {
1409 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1410 le16toh(pd->device_id), le16toh(pd->encl_id));
1411 }
1412
1413 static void
1414 mfii_aen_pd_remove(struct mfii_softc *sc,
1415 const struct mfi_evtarg_pd_address *pd)
1416 {
1417 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1418 le16toh(pd->device_id), le16toh(pd->encl_id));
1419 }
1420
1421 static void
1422 mfii_aen_pd_state_change(struct mfii_softc *sc,
1423 const struct mfi_evtarg_pd_state *state)
1424 {
1425 return;
1426 }
1427
1428 static void
1429 mfii_aen_ld_update(struct mfii_softc *sc)
1430 {
1431 union mfi_mbox mbox;
1432 int i, j, target, old, nld;
1433 int newlds[MFII_MAX_LD_EXT];
1434
1435 memset(&mbox, 0, sizeof(mbox));
1436 if (sc->sc_max256vd)
1437 mbox.b[0] = 1;
1438 mutex_enter(&sc->sc_lock);
1439 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
1440 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1441 mutex_exit(&sc->sc_lock);
1442 DNPRINTF(MFII_D_MISC,
1443 "%s: getting list of logical disks failed\n", DEVNAME(sc));
1444 return;
1445 }
1446 mutex_exit(&sc->sc_lock);
1447
1448 memset(newlds, -1, sizeof(newlds));
1449
1450 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1451 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1452 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1453 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1454 newlds[target] = i;
1455 sc->sc_ld[i].ld_target_id = target;
1456 }
1457
1458 for (i = 0; i < MFII_MAX_LD_EXT; i++) {
1459 old = sc->sc_target_lds[i];
1460 nld = newlds[i];
1461
1462 if (old == -1 && nld != -1) {
1463 printf("%s: logical drive %d added (target %d)\n",
1464 DEVNAME(sc), i, nld);
1465 sc->sc_ld[i].ld_present = 1;
1466
1467 // XXX scsi_probe_target(sc->sc_scsibus, i);
1468
1469 j = i + MFI_BBU_SENSORS;
1470 mfii_init_ld_sensor(sc, &sc->sc_sensors[j], i);
1471 mfii_attach_sensor(sc, &sc->sc_sensors[j]);
1472 } else if (nld == -1 && old != -1) {
1473 printf("%s: logical drive %d removed (target %d)\n",
1474 DEVNAME(sc), i, old);
1475 sc->sc_ld[i].ld_present = 0;
1476
1477 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1478 sysmon_envsys_sensor_detach(sc->sc_sme,
1479 &sc->sc_sensors[i + MFI_BBU_SENSORS]);
1480 }
1481 }
1482
1483 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1484 }
1485
1486 static void
1487 mfii_aen_unregister(struct mfii_softc *sc)
1488 {
1489 /* XXX */
1490 }
1491
1492 static int
1493 mfii_transition_firmware(struct mfii_softc *sc)
1494 {
1495 int32_t fw_state, cur_state;
1496 int max_wait, i;
1497
1498 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1499
1500 while (fw_state != MFI_STATE_READY) {
1501 cur_state = fw_state;
1502 switch (fw_state) {
1503 case MFI_STATE_FAULT:
1504 printf("%s: firmware fault\n", DEVNAME(sc));
1505 return (1);
1506 case MFI_STATE_WAIT_HANDSHAKE:
1507 mfii_write(sc, MFI_SKINNY_IDB,
1508 MFI_INIT_CLEAR_HANDSHAKE);
1509 max_wait = 2;
1510 break;
1511 case MFI_STATE_OPERATIONAL:
1512 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1513 max_wait = 10;
1514 break;
1515 case MFI_STATE_UNDEFINED:
1516 case MFI_STATE_BB_INIT:
1517 max_wait = 2;
1518 break;
1519 case MFI_STATE_FW_INIT:
1520 case MFI_STATE_DEVICE_SCAN:
1521 case MFI_STATE_FLUSH_CACHE:
1522 max_wait = 20;
1523 break;
1524 default:
1525 printf("%s: unknown firmware state %d\n",
1526 DEVNAME(sc), fw_state);
1527 return (1);
1528 }
1529 for (i = 0; i < (max_wait * 10); i++) {
1530 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1531 if (fw_state == cur_state)
1532 DELAY(100000);
1533 else
1534 break;
1535 }
1536 if (fw_state == cur_state) {
1537 printf("%s: firmware stuck in state %#x\n",
1538 DEVNAME(sc), fw_state);
1539 return (1);
1540 }
1541 }
1542
1543 return (0);
1544 }
1545
1546 static int
1547 mfii_get_info(struct mfii_softc *sc)
1548 {
1549 int i, rv;
1550
1551 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1552 sizeof(sc->sc_info), MFII_DATA_IN, true);
1553
1554 if (rv != 0)
1555 return (rv);
1556
1557 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1558 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1559 DEVNAME(sc),
1560 sc->sc_info.mci_image_component[i].mic_name,
1561 sc->sc_info.mci_image_component[i].mic_version,
1562 sc->sc_info.mci_image_component[i].mic_build_date,
1563 sc->sc_info.mci_image_component[i].mic_build_time);
1564 }
1565
1566 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1567 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1568 DEVNAME(sc),
1569 sc->sc_info.mci_pending_image_component[i].mic_name,
1570 sc->sc_info.mci_pending_image_component[i].mic_version,
1571 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1572 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1573 }
1574
1575 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1576 DEVNAME(sc),
1577 sc->sc_info.mci_max_arms,
1578 sc->sc_info.mci_max_spans,
1579 sc->sc_info.mci_max_arrays,
1580 sc->sc_info.mci_max_lds,
1581 sc->sc_info.mci_product_name);
1582
1583 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1584 DEVNAME(sc),
1585 sc->sc_info.mci_serial_number,
1586 sc->sc_info.mci_hw_present,
1587 sc->sc_info.mci_current_fw_time,
1588 sc->sc_info.mci_max_cmds,
1589 sc->sc_info.mci_max_sg_elements);
1590
1591 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1592 DEVNAME(sc),
1593 sc->sc_info.mci_max_request_size,
1594 sc->sc_info.mci_lds_present,
1595 sc->sc_info.mci_lds_degraded,
1596 sc->sc_info.mci_lds_offline,
1597 sc->sc_info.mci_pd_present);
1598
1599 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1600 DEVNAME(sc),
1601 sc->sc_info.mci_pd_disks_present,
1602 sc->sc_info.mci_pd_disks_pred_failure,
1603 sc->sc_info.mci_pd_disks_failed);
1604
1605 DPRINTF("%s: nvram %d mem %d flash %d\n",
1606 DEVNAME(sc),
1607 sc->sc_info.mci_nvram_size,
1608 sc->sc_info.mci_memory_size,
1609 sc->sc_info.mci_flash_size);
1610
1611 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1612 DEVNAME(sc),
1613 sc->sc_info.mci_ram_correctable_errors,
1614 sc->sc_info.mci_ram_uncorrectable_errors,
1615 sc->sc_info.mci_cluster_allowed,
1616 sc->sc_info.mci_cluster_active);
1617
1618 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1619 DEVNAME(sc),
1620 sc->sc_info.mci_max_strips_per_io,
1621 sc->sc_info.mci_raid_levels,
1622 sc->sc_info.mci_adapter_ops,
1623 sc->sc_info.mci_ld_ops);
1624
1625 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1626 DEVNAME(sc),
1627 sc->sc_info.mci_stripe_sz_ops.min,
1628 sc->sc_info.mci_stripe_sz_ops.max,
1629 sc->sc_info.mci_pd_ops,
1630 sc->sc_info.mci_pd_mix_support);
1631
1632 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1633 DEVNAME(sc),
1634 sc->sc_info.mci_ecc_bucket_count,
1635 sc->sc_info.mci_package_version);
1636
1637 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1638 DEVNAME(sc),
1639 sc->sc_info.mci_properties.mcp_seq_num,
1640 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1641 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1642 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1643
1644 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1645 DEVNAME(sc),
1646 sc->sc_info.mci_properties.mcp_rebuild_rate,
1647 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1648 sc->sc_info.mci_properties.mcp_bgi_rate,
1649 sc->sc_info.mci_properties.mcp_cc_rate);
1650
1651 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1652 DEVNAME(sc),
1653 sc->sc_info.mci_properties.mcp_recon_rate,
1654 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1655 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1656 sc->sc_info.mci_properties.mcp_spinup_delay,
1657 sc->sc_info.mci_properties.mcp_cluster_enable);
1658
1659 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1660 DEVNAME(sc),
1661 sc->sc_info.mci_properties.mcp_coercion_mode,
1662 sc->sc_info.mci_properties.mcp_alarm_enable,
1663 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1664 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1665 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1666
1667 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1668 DEVNAME(sc),
1669 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1670 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1671 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1672
1673 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1674 DEVNAME(sc),
1675 sc->sc_info.mci_pci.mip_vendor,
1676 sc->sc_info.mci_pci.mip_device,
1677 sc->sc_info.mci_pci.mip_subvendor,
1678 sc->sc_info.mci_pci.mip_subdevice);
1679
1680 DPRINTF("%s: type %#x port_count %d port_addr ",
1681 DEVNAME(sc),
1682 sc->sc_info.mci_host.mih_type,
1683 sc->sc_info.mci_host.mih_port_count);
1684
1685 for (i = 0; i < 8; i++)
1686 DPRINTF("%.0" PRIx64 " ",
1687 sc->sc_info.mci_host.mih_port_addr[i]);
1688 DPRINTF("\n");
1689
1690 DPRINTF("%s: type %.x port_count %d port_addr ",
1691 DEVNAME(sc),
1692 sc->sc_info.mci_device.mid_type,
1693 sc->sc_info.mci_device.mid_port_count);
1694
1695 for (i = 0; i < 8; i++)
1696 DPRINTF("%.0" PRIx64 " ",
1697 sc->sc_info.mci_device.mid_port_addr[i]);
1698 DPRINTF("\n");
1699
1700 return (0);
1701 }
1702
1703 static int
1704 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1705 {
1706 struct mfi_frame_header *hdr = ccb->ccb_request;
1707 u_int64_t r;
1708 int to = 0, rv = 0;
1709
1710 #ifdef DIAGNOSTIC
1711 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1712 panic("mfii_mfa_poll called with cookie or done set");
1713 #endif
1714
1715 hdr->mfh_context = ccb->ccb_smid;
1716 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1717 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1718
1719 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1720 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1721
1722 /*
1723 * Even if the Aero card supports 32bit descriptor, 64bit descriptor
1724 * access is required for MFI_CMD_INIT.
1725 * Currently, mfii_mfa_poll() is called for MFI_CMD_INIT only.
1726 */
1727 mfii_start64(sc, ccb);
1728
1729 for (;;) {
1730 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1731 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1732 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1733
1734 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1735 break;
1736
1737 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1738 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1739 ccb->ccb_smid);
1740 ccb->ccb_flags |= MFI_CCB_F_ERR;
1741 rv = 1;
1742 break;
1743 }
1744
1745 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1746 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1748
1749 delay(1000);
1750 }
1751
1752 if (ccb->ccb_len > 0) {
1753 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1754 0, ccb->ccb_dmamap32->dm_mapsize,
1755 (ccb->ccb_direction == MFII_DATA_IN) ?
1756 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1757
1758 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1759 }
1760
1761 return (rv);
1762 }
1763
1764 static int
1765 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1766 {
1767 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1768 void *cookie;
1769 int rv = 1;
1770
1771 done = ccb->ccb_done;
1772 cookie = ccb->ccb_cookie;
1773
1774 ccb->ccb_done = mfii_poll_done;
1775 ccb->ccb_cookie = &rv;
1776
1777 mfii_start(sc, ccb);
1778
1779 do {
1780 delay(10);
1781 mfii_postq(sc);
1782 } while (rv == 1);
1783
1784 ccb->ccb_cookie = cookie;
1785 done(sc, ccb);
1786
1787 return (0);
1788 }
1789
1790 static void
1791 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1792 {
1793 int *rv = ccb->ccb_cookie;
1794
1795 *rv = 0;
1796 }
1797
1798 static int
1799 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1800 {
1801 #ifdef DIAGNOSTIC
1802 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1803 panic("mfii_exec called with cookie or done set");
1804 #endif
1805
1806 ccb->ccb_cookie = ccb;
1807 ccb->ccb_done = mfii_exec_done;
1808
1809 mfii_start(sc, ccb);
1810
1811 mutex_enter(&ccb->ccb_mtx);
1812 while (ccb->ccb_cookie != NULL)
1813 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1814 mutex_exit(&ccb->ccb_mtx);
1815
1816 return (0);
1817 }
1818
1819 static void
1820 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1821 {
1822 mutex_enter(&ccb->ccb_mtx);
1823 ccb->ccb_cookie = NULL;
1824 cv_signal(&ccb->ccb_cv);
1825 mutex_exit(&ccb->ccb_mtx);
1826 }
1827
1828 static int
1829 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1830 void *buf, size_t len, mfii_direction_t dir, bool poll)
1831 {
1832 struct mfii_ccb *ccb;
1833 int rv;
1834
1835 KASSERT(mutex_owned(&sc->sc_lock));
1836 if (!sc->sc_running)
1837 return EAGAIN;
1838
1839 ccb = mfii_get_ccb(sc);
1840 if (ccb == NULL)
1841 return (ENOMEM);
1842
1843 mfii_scrub_ccb(ccb);
1844 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1845 mfii_put_ccb(sc, ccb);
1846
1847 return (rv);
1848 }
1849
1850 static int
1851 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1852 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1853 bool poll)
1854 {
1855 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1856 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1857 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1858 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1859 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1860 int rv = EIO;
1861
1862 if (cold)
1863 poll = true;
1864
1865 ccb->ccb_data = buf;
1866 ccb->ccb_len = len;
1867 ccb->ccb_direction = dir;
1868 switch (dir) {
1869 case MFII_DATA_IN:
1870 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1871 break;
1872 case MFII_DATA_OUT:
1873 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1874 break;
1875 case MFII_DATA_NONE:
1876 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1877 break;
1878 }
1879
1880 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1881 rv = ENOMEM;
1882 goto done;
1883 }
1884
1885 hdr->mfh_cmd = MFI_CMD_DCMD;
1886 hdr->mfh_context = ccb->ccb_smid;
1887 hdr->mfh_data_len = htole32(len);
1888 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1889 KASSERT(!ccb->ccb_dma64);
1890
1891 dcmd->mdf_opcode = opc;
1892 /* handle special opcodes */
1893 if (mbox != NULL)
1894 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1895
1896 io->function = MFII_FUNCTION_PASSTHRU_IO;
1897 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1898 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1899
1900 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1901 sge->sg_len = htole32(MFI_FRAME_SIZE);
1902 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1903
1904 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1905 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1906
1907 if (poll) {
1908 ccb->ccb_done = mfii_empty_done;
1909 mfii_poll(sc, ccb);
1910 } else
1911 mfii_exec(sc, ccb);
1912
1913 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1914 rv = 0;
1915 }
1916
1917 done:
1918 return (rv);
1919 }
1920
1921 static void
1922 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1923 {
1924 return;
1925 }
1926
1927 static int
1928 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1929 void *sglp, int nosleep)
1930 {
1931 union mfi_sgl *sgl = sglp;
1932 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1933 int error;
1934 int i;
1935
1936 KASSERT(!ccb->ccb_dma64);
1937 if (ccb->ccb_len == 0)
1938 return (0);
1939
1940 error = bus_dmamap_load(sc->sc_dmat, dmap,
1941 ccb->ccb_data, ccb->ccb_len, NULL,
1942 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1943 if (error) {
1944 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1945 return (1);
1946 }
1947
1948 for (i = 0; i < dmap->dm_nsegs; i++) {
1949 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1950 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1951 }
1952
1953 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1954 ccb->ccb_direction == MFII_DATA_OUT ?
1955 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1956
1957 return (0);
1958 }
1959
1960 static void
1961 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1962 {
1963
1964 mfii_start_common(sc, ccb,
1965 ((sc->sc_iop_flag & MFII_IOP_DESC_32BIT) != 0) ? true : false);
1966 }
1967
1968 static void
1969 mfii_start64(struct mfii_softc *sc, struct mfii_ccb *ccb)
1970 {
1971
1972 mfii_start_common(sc, ccb, false);
1973 }
1974
1975 static void
1976 mfii_start_common(struct mfii_softc *sc, struct mfii_ccb *ccb, bool do32)
1977 {
1978 uint32_t *r = (uint32_t *)&ccb->ccb_req;
1979
1980 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1981 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1982 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1983
1984 if (do32)
1985 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_ISQP, r[0]);
1986 else {
1987 #if defined(__LP64__)
1988 uint64_t buf;
1989
1990 buf = ((uint64_t)r[1] << 32) | r[0];
1991 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
1992 #else
1993 mutex_enter(&sc->sc_post_mtx);
1994 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1995 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1996 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1997 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1998 mutex_exit(&sc->sc_post_mtx);
1999 #endif
2000 }
2001 }
2002
2003 static void
2004 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2005 {
2006 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2007 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2008 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2009
2010 if (ccb->ccb_sgl_len > 0) {
2011 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2012 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2013 BUS_DMASYNC_POSTWRITE);
2014 }
2015
2016 if (ccb->ccb_dma64) {
2017 KASSERT(ccb->ccb_len > 0);
2018 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
2019 0, ccb->ccb_dmamap64->dm_mapsize,
2020 (ccb->ccb_direction == MFII_DATA_IN) ?
2021 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2022
2023 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
2024 } else if (ccb->ccb_len > 0) {
2025 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
2026 0, ccb->ccb_dmamap32->dm_mapsize,
2027 (ccb->ccb_direction == MFII_DATA_IN) ?
2028 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2029
2030 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
2031 }
2032
2033 ccb->ccb_done(sc, ccb);
2034 }
2035
2036 static int
2037 mfii_initialise_firmware(struct mfii_softc *sc)
2038 {
2039 struct mpii_msg_iocinit_request *iiq;
2040 struct mfii_dmamem *m;
2041 struct mfii_ccb *ccb;
2042 struct mfi_init_frame *init;
2043 int rv;
2044
2045 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2046 if (m == NULL)
2047 return (1);
2048
2049 iiq = MFII_DMA_KVA(m);
2050 memset(iiq, 0, sizeof(*iiq));
2051
2052 iiq->function = MPII_FUNCTION_IOC_INIT;
2053 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2054
2055 iiq->msg_version_maj = 0x02;
2056 iiq->msg_version_min = 0x00;
2057 iiq->hdr_version_unit = 0x10;
2058 iiq->hdr_version_dev = 0x0;
2059
2060 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2061
2062 iiq->reply_descriptor_post_queue_depth =
2063 htole16(sc->sc_reply_postq_depth);
2064 iiq->reply_free_queue_depth = htole16(0);
2065
2066 iiq->sense_buffer_address_high = htole32(
2067 MFII_DMA_DVA(sc->sc_sense) >> 32);
2068
2069 iiq->reply_descriptor_post_queue_address_lo =
2070 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
2071 iiq->reply_descriptor_post_queue_address_hi =
2072 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2073
2074 iiq->system_request_frame_base_address_lo =
2075 htole32(MFII_DMA_DVA(sc->sc_requests));
2076 iiq->system_request_frame_base_address_hi =
2077 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
2078
2079 iiq->timestamp = htole64(time_uptime);
2080
2081 ccb = mfii_get_ccb(sc);
2082 if (ccb == NULL) {
2083 /* shouldn't ever run out of ccbs during attach */
2084 return (1);
2085 }
2086 mfii_scrub_ccb(ccb);
2087 init = ccb->ccb_request;
2088
2089 init->mif_header.mfh_cmd = MFI_CMD_INIT;
2090 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2091 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
2092 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
2093
2094 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2095 0, MFII_DMA_LEN(sc->sc_reply_postq),
2096 BUS_DMASYNC_PREREAD);
2097
2098 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2099 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2100
2101 rv = mfii_mfa_poll(sc, ccb);
2102
2103 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2104 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2105
2106 mfii_put_ccb(sc, ccb);
2107 mfii_dmamem_free(sc, m);
2108
2109 return (rv);
2110 }
2111
2112 static int
2113 mfii_my_intr(struct mfii_softc *sc)
2114 {
2115 u_int32_t status;
2116
2117 status = mfii_read(sc, MFI_OSTS);
2118
2119 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2120 if (ISSET(status, 0x1)) {
2121 mfii_write(sc, MFI_OSTS, status);
2122 return (1);
2123 }
2124
2125 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2126 }
2127
2128 static int
2129 mfii_intr(void *arg)
2130 {
2131 struct mfii_softc *sc = arg;
2132
2133 if (!mfii_my_intr(sc))
2134 return (0);
2135
2136 mfii_postq(sc);
2137
2138 return (1);
2139 }
2140
2141 static void
2142 mfii_postq(struct mfii_softc *sc)
2143 {
2144 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2145 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2146 struct mpii_reply_descr *rdp;
2147 struct mfii_ccb *ccb;
2148 int rpi = 0;
2149
2150 mutex_enter(&sc->sc_reply_postq_mtx);
2151
2152 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2153 0, MFII_DMA_LEN(sc->sc_reply_postq),
2154 BUS_DMASYNC_POSTREAD);
2155
2156 for (;;) {
2157 rdp = &postq[sc->sc_reply_postq_index];
2158 DNPRINTF(MFII_D_INTR,
2159 "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2160 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2161 rdp->data == 0xffffffff);
2162 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2163 MPII_REPLY_DESCR_UNUSED)
2164 break;
2165 if (rdp->data == 0xffffffff) {
2166 /*
2167 * ioc is still writing to the reply post queue
2168 * race condition - bail!
2169 */
2170 break;
2171 }
2172
2173 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2174 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2175 memset(rdp, 0xff, sizeof(*rdp));
2176
2177 sc->sc_reply_postq_index++;
2178 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2179 rpi = 1;
2180 }
2181
2182 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2183 0, MFII_DMA_LEN(sc->sc_reply_postq),
2184 BUS_DMASYNC_PREREAD);
2185
2186 if (rpi)
2187 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2188
2189 mutex_exit(&sc->sc_reply_postq_mtx);
2190
2191 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2192 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2193 mfii_done(sc, ccb);
2194 }
2195 }
2196
2197 static void
2198 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2199 void *arg)
2200 {
2201 struct scsipi_periph *periph;
2202 struct scsipi_xfer *xs;
2203 struct scsipi_adapter *adapt = chan->chan_adapter;
2204 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2205 struct mfii_ccb *ccb;
2206 int timeout;
2207 int target;
2208
2209 switch (req) {
2210 case ADAPTER_REQ_GROW_RESOURCES:
2211 /* Not supported. */
2212 return;
2213 case ADAPTER_REQ_SET_XFER_MODE:
2214 {
2215 struct scsipi_xfer_mode *xm = arg;
2216 xm->xm_mode = PERIPH_CAP_TQING;
2217 xm->xm_period = 0;
2218 xm->xm_offset = 0;
2219 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2220 return;
2221 }
2222 case ADAPTER_REQ_RUN_XFER:
2223 break;
2224 }
2225
2226 xs = arg;
2227 periph = xs->xs_periph;
2228 target = periph->periph_target;
2229
2230 if (target >= MFII_MAX_LD_EXT || !sc->sc_ld[target].ld_present ||
2231 periph->periph_lun != 0) {
2232 xs->error = XS_SELTIMEOUT;
2233 scsipi_done(xs);
2234 return;
2235 }
2236
2237 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2238 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2239 /* the cache is stable storage, don't flush */
2240 xs->error = XS_NOERROR;
2241 xs->status = SCSI_OK;
2242 xs->resid = 0;
2243 scsipi_done(xs);
2244 return;
2245 }
2246
2247 ccb = mfii_get_ccb(sc);
2248 if (ccb == NULL) {
2249 xs->error = XS_RESOURCE_SHORTAGE;
2250 scsipi_done(xs);
2251 return;
2252 }
2253 mfii_scrub_ccb(ccb);
2254 ccb->ccb_cookie = xs;
2255 ccb->ccb_done = mfii_scsi_cmd_done;
2256 ccb->ccb_data = xs->data;
2257 ccb->ccb_len = xs->datalen;
2258
2259 timeout = mstohz(xs->timeout);
2260 if (timeout == 0)
2261 timeout = 1;
2262 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2263
2264 switch (xs->cmd->opcode) {
2265 case SCSI_READ_6_COMMAND:
2266 case READ_10:
2267 case READ_12:
2268 case READ_16:
2269 case SCSI_WRITE_6_COMMAND:
2270 case WRITE_10:
2271 case WRITE_12:
2272 case WRITE_16:
2273 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2274 goto stuffup;
2275 break;
2276
2277 default:
2278 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2279 goto stuffup;
2280 break;
2281 }
2282
2283 xs->error = XS_NOERROR;
2284 xs->resid = 0;
2285
2286 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2287 xs->cmd->opcode);
2288
2289 if (xs->xs_control & XS_CTL_POLL) {
2290 if (mfii_poll(sc, ccb) != 0)
2291 goto stuffup;
2292 return;
2293 }
2294
2295 mfii_start(sc, ccb);
2296
2297 return;
2298
2299 stuffup:
2300 xs->error = XS_DRIVER_STUFFUP;
2301 scsipi_done(xs);
2302 mfii_put_ccb(sc, ccb);
2303 }
2304
2305 static void
2306 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2307 {
2308 struct scsipi_xfer *xs = ccb->ccb_cookie;
2309 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2310 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2311
2312 if (callout_stop(&xs->xs_callout) != 0)
2313 return;
2314
2315 switch (ctx->status) {
2316 case MFI_STAT_OK:
2317 break;
2318
2319 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2320 xs->error = XS_SENSE;
2321 memset(&xs->sense, 0, sizeof(xs->sense));
2322 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2323 break;
2324
2325 case MFI_STAT_LD_OFFLINE:
2326 case MFI_STAT_DEVICE_NOT_FOUND:
2327 xs->error = XS_SELTIMEOUT;
2328 break;
2329
2330 default:
2331 xs->error = XS_DRIVER_STUFFUP;
2332 break;
2333 }
2334
2335 scsipi_done(xs);
2336 mfii_put_ccb(sc, ccb);
2337 }
2338
2339 static int
2340 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2341 struct scsipi_xfer *xs)
2342 {
2343 struct scsipi_periph *periph = xs->xs_periph;
2344 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2345 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2346 int segs, target;
2347
2348 target = sc->sc_ld[periph->periph_target].ld_target_id;
2349 io->dev_handle = htole16(target);
2350 io->function = MFII_FUNCTION_LDIO_REQUEST;
2351 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2352 io->sgl_flags = htole16(0x02); /* XXX */
2353 io->sense_buffer_length = sizeof(xs->sense);
2354 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2355 io->data_length = htole32(xs->datalen);
2356 io->io_flags = htole16(xs->cmdlen);
2357 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2358 case XS_CTL_DATA_IN:
2359 ccb->ccb_direction = MFII_DATA_IN;
2360 io->direction = MPII_SCSIIO_DIR_READ;
2361 break;
2362 case XS_CTL_DATA_OUT:
2363 ccb->ccb_direction = MFII_DATA_OUT;
2364 io->direction = MPII_SCSIIO_DIR_WRITE;
2365 break;
2366 default:
2367 ccb->ccb_direction = MFII_DATA_NONE;
2368 io->direction = MPII_SCSIIO_DIR_NONE;
2369 break;
2370 }
2371 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2372
2373 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2374 ctx->timeout_value = htole16(0x14); /* XXX */
2375 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2376 ctx->virtual_disk_target_id = htole16(target);
2377
2378 if (mfii_load_ccb(sc, ccb, ctx + 1,
2379 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2380 return (1);
2381
2382 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2383 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2384 switch (sc->sc_iop->num_sge_loc) {
2385 case MFII_IOP_NUM_SGE_LOC_ORIG:
2386 ctx->num_sge = segs;
2387 break;
2388 case MFII_IOP_NUM_SGE_LOC_35:
2389 /* 12 bit field, but we're only using the lower 8 */
2390 ctx->span_arm = segs;
2391 break;
2392 }
2393
2394 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2395 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2396
2397 return (0);
2398 }
2399
2400 static int
2401 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2402 struct scsipi_xfer *xs)
2403 {
2404 struct scsipi_periph *periph = xs->xs_periph;
2405 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2406 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2407 int target;
2408
2409 target = sc->sc_ld[periph->periph_target].ld_target_id;
2410 io->dev_handle = htole16(target);
2411 io->function = MFII_FUNCTION_LDIO_REQUEST;
2412 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2413 io->sgl_flags = htole16(0x02); /* XXX */
2414 io->sense_buffer_length = sizeof(xs->sense);
2415 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2416 io->data_length = htole32(xs->datalen);
2417 io->io_flags = htole16(xs->cmdlen);
2418 io->lun[0] = htobe16(periph->periph_lun);
2419 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2420 case XS_CTL_DATA_IN:
2421 ccb->ccb_direction = MFII_DATA_IN;
2422 io->direction = MPII_SCSIIO_DIR_READ;
2423 break;
2424 case XS_CTL_DATA_OUT:
2425 ccb->ccb_direction = MFII_DATA_OUT;
2426 io->direction = MPII_SCSIIO_DIR_WRITE;
2427 break;
2428 default:
2429 ccb->ccb_direction = MFII_DATA_NONE;
2430 io->direction = MPII_SCSIIO_DIR_NONE;
2431 break;
2432 }
2433 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2434
2435 ctx->virtual_disk_target_id = htole16(target);
2436
2437 if (mfii_load_ccb(sc, ccb, ctx + 1,
2438 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2439 return (1);
2440
2441 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2442 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2443
2444 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2445 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2446
2447 return (0);
2448 }
2449
2450 #if 0
2451 void
2452 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2453 {
2454 struct scsi_link *link = xs->sc_link;
2455 struct mfii_softc *sc = link->adapter_softc;
2456 struct mfii_ccb *ccb = xs->io;
2457
2458 mfii_scrub_ccb(ccb);
2459 ccb->ccb_cookie = xs;
2460 ccb->ccb_done = mfii_scsi_cmd_done;
2461 ccb->ccb_data = xs->data;
2462 ccb->ccb_len = xs->datalen;
2463
2464 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2465
2466 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2467 if (xs->error != XS_NOERROR)
2468 goto done;
2469
2470 xs->resid = 0;
2471
2472 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2473 if (mfii_poll(sc, ccb) != 0)
2474 goto stuffup;
2475 return;
2476 }
2477
2478 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2479 mfii_start(sc, ccb);
2480
2481 return;
2482
2483 stuffup:
2484 xs->error = XS_DRIVER_STUFFUP;
2485 done:
2486 scsi_done(xs);
2487 }
2488
2489 int
2490 mfii_pd_scsi_probe(struct scsi_link *link)
2491 {
2492 struct mfii_softc *sc = link->adapter_softc;
2493 struct mfi_pd_details mpd;
2494 union mfi_mbox mbox;
2495 int rv;
2496
2497 if (link->lun > 0)
2498 return (0);
2499
2500 memset(&mbox, 0, sizeof(mbox));
2501 mbox.s[0] = htole16(link->target);
2502
2503 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2504 MFII_DATA_IN, true);
2505 if (rv != 0)
2506 return (EIO);
2507
2508 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2509 return (ENXIO);
2510
2511 return (0);
2512 }
2513
2514 int
2515 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2516 struct scsipi_xfer *xs)
2517 {
2518 struct scsi_link *link = xs->sc_link;
2519 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2520 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2521 uint16_t dev_handle;
2522
2523 dev_handle = mfii_dev_handle(sc, link->target);
2524 if (dev_handle == htole16(0xffff))
2525 return (XS_SELTIMEOUT);
2526
2527 io->dev_handle = dev_handle;
2528 io->function = 0;
2529 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2530 io->sgl_flags = htole16(0x02); /* XXX */
2531 io->sense_buffer_length = sizeof(xs->sense);
2532 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2533 io->data_length = htole32(xs->datalen);
2534 io->io_flags = htole16(xs->cmdlen);
2535 io->lun[0] = htobe16(link->lun);
2536 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2537 case XS_CTL_DATA_IN:
2538 ccb->ccb_direction = MFII_DATA_IN;
2539 io->direction = MPII_SCSIIO_DIR_READ;
2540 break;
2541 case XS_CTL_DATA_OUT:
2542 ccb->ccb_direction = MFII_DATA_OUT;
2543 io->direction = MPII_SCSIIO_DIR_WRITE;
2544 break;
2545 default:
2546 ccb->ccb_direction = MFII_DATA_NONE;
2547 io->direction = MPII_SCSIIO_DIR_NONE;
2548 break;
2549 }
2550 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2551
2552 ctx->virtual_disk_target_id = htole16(link->target);
2553 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2554 ctx->timeout_value = sc->sc_pd->pd_timeout;
2555
2556 if (mfii_load_ccb(sc, ccb, ctx + 1,
2557 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2558 return (XS_DRIVER_STUFFUP);
2559
2560 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2561 KASSERT(ccb->ccb_dma64);
2562
2563 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2564 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2565 ccb->ccb_req.dev_handle = dev_handle;
2566
2567 return (XS_NOERROR);
2568 }
2569 #endif
2570
2571 static int
2572 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2573 int nosleep)
2574 {
2575 struct mpii_msg_request *req = ccb->ccb_request;
2576 struct mfii_sge *sge = NULL, *nsge = sglp;
2577 struct mfii_sge *ce = NULL;
2578 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2579 u_int space;
2580 int i;
2581
2582 int error;
2583
2584 if (ccb->ccb_len == 0)
2585 return (0);
2586
2587 ccb->ccb_dma64 = true;
2588 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2589 ccb->ccb_data, ccb->ccb_len, NULL,
2590 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2591 if (error) {
2592 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2593 return (1);
2594 }
2595
2596 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2597 sizeof(*nsge);
2598 if (dmap->dm_nsegs > space) {
2599 space--;
2600
2601 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2602 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2603
2604 ce = nsge + space;
2605 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2606 ce->sg_len = htole32(ccb->ccb_sgl_len);
2607 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2608
2609 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2610 }
2611
2612 for (i = 0; i < dmap->dm_nsegs; i++) {
2613 if (nsge == ce)
2614 nsge = ccb->ccb_sgl;
2615
2616 sge = nsge;
2617
2618 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2619 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2620 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2621
2622 nsge = sge + 1;
2623 }
2624 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2625
2626 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2627 ccb->ccb_direction == MFII_DATA_OUT ?
2628 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2629
2630 if (ccb->ccb_sgl_len > 0) {
2631 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2632 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2633 BUS_DMASYNC_PREWRITE);
2634 }
2635
2636 return (0);
2637 }
2638
2639 static void
2640 mfii_scsi_cmd_tmo(void *p)
2641 {
2642 struct mfii_ccb *ccb = p;
2643 struct mfii_softc *sc = ccb->ccb_sc;
2644 bool start_abort;
2645
2646 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2647
2648 mutex_enter(&sc->sc_abort_mtx);
2649 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2650 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2651 if (start_abort)
2652 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2653 mutex_exit(&sc->sc_abort_mtx);
2654 }
2655
2656 static void
2657 mfii_abort_task(struct work *wk, void *scp)
2658 {
2659 struct mfii_softc *sc = scp;
2660 struct mfii_ccb *list;
2661
2662 mutex_enter(&sc->sc_abort_mtx);
2663 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2664 SIMPLEQ_INIT(&sc->sc_abort_list);
2665 mutex_exit(&sc->sc_abort_mtx);
2666
2667 while (list != NULL) {
2668 struct mfii_ccb *ccb = list;
2669 struct scsipi_xfer *xs = ccb->ccb_cookie;
2670 struct scsipi_periph *periph = xs->xs_periph;
2671 struct mfii_ccb *accb;
2672
2673 list = SIMPLEQ_NEXT(ccb, ccb_link);
2674
2675 if (!sc->sc_ld[periph->periph_target].ld_present) {
2676 /* device is gone */
2677 xs->error = XS_SELTIMEOUT;
2678 scsipi_done(xs);
2679 mfii_put_ccb(sc, ccb);
2680 continue;
2681 }
2682
2683 accb = mfii_get_ccb(sc);
2684 mfii_scrub_ccb(accb);
2685 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2686 MPII_SCSI_TASK_ABORT_TASK,
2687 htole32(MFII_TASK_MGMT_FLAGS_PD));
2688
2689 accb->ccb_cookie = ccb;
2690 accb->ccb_done = mfii_scsi_cmd_abort_done;
2691
2692 mfii_start(sc, accb);
2693 }
2694 }
2695
2696 static void
2697 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2698 uint16_t smid, uint8_t type, uint32_t flags)
2699 {
2700 struct mfii_task_mgmt *msg;
2701 struct mpii_msg_scsi_task_request *req;
2702
2703 msg = accb->ccb_request;
2704 req = &msg->mpii_request;
2705 req->dev_handle = dev_handle;
2706 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2707 req->task_type = type;
2708 req->task_mid = htole16( smid);
2709 msg->flags = flags;
2710
2711 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2712 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2713 }
2714
2715 static void
2716 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2717 {
2718 struct mfii_ccb *ccb = accb->ccb_cookie;
2719 struct scsipi_xfer *xs = ccb->ccb_cookie;
2720
2721 /* XXX check accb completion? */
2722
2723 mfii_put_ccb(sc, accb);
2724 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2725
2726 xs->error = XS_TIMEOUT;
2727 scsipi_done(xs);
2728 mfii_put_ccb(sc, ccb);
2729 }
2730
2731 static struct mfii_ccb *
2732 mfii_get_ccb(struct mfii_softc *sc)
2733 {
2734 struct mfii_ccb *ccb;
2735
2736 mutex_enter(&sc->sc_ccb_mtx);
2737 if (!sc->sc_running) {
2738 ccb = NULL;
2739 } else {
2740 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2741 if (ccb != NULL)
2742 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2743 }
2744 mutex_exit(&sc->sc_ccb_mtx);
2745 return (ccb);
2746 }
2747
2748 static void
2749 mfii_scrub_ccb(struct mfii_ccb *ccb)
2750 {
2751 ccb->ccb_cookie = NULL;
2752 ccb->ccb_done = NULL;
2753 ccb->ccb_flags = 0;
2754 ccb->ccb_data = NULL;
2755 ccb->ccb_direction = MFII_DATA_NONE;
2756 ccb->ccb_dma64 = false;
2757 ccb->ccb_len = 0;
2758 ccb->ccb_sgl_len = 0;
2759 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2760 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2761 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2762 }
2763
2764 static void
2765 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2766 {
2767 mutex_enter(&sc->sc_ccb_mtx);
2768 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2769 mutex_exit(&sc->sc_ccb_mtx);
2770 }
2771
2772 static int
2773 mfii_init_ccb(struct mfii_softc *sc)
2774 {
2775 struct mfii_ccb *ccb;
2776 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2777 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2778 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2779 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2780 u_int i;
2781 int error;
2782
2783 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2784 M_DEVBUF, M_WAITOK|M_ZERO);
2785
2786 for (i = 0; i < sc->sc_max_cmds; i++) {
2787 ccb = &sc->sc_ccb[i];
2788 ccb->ccb_sc = sc;
2789
2790 /* create a dma map for transfer */
2791 error = bus_dmamap_create(sc->sc_dmat,
2792 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2793 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2794 if (error) {
2795 printf("%s: cannot create ccb dmamap32 (%d)\n",
2796 DEVNAME(sc), error);
2797 goto destroy;
2798 }
2799 error = bus_dmamap_create(sc->sc_dmat64,
2800 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2801 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2802 if (error) {
2803 printf("%s: cannot create ccb dmamap64 (%d)\n",
2804 DEVNAME(sc), error);
2805 goto destroy32;
2806 }
2807
2808 /* select i + 1'th request. 0 is reserved for events */
2809 ccb->ccb_smid = i + 1;
2810 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2811 ccb->ccb_request = request + ccb->ccb_request_offset;
2812 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2813 ccb->ccb_request_offset;
2814
2815 /* select i'th MFI command frame */
2816 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2817 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2818 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2819 ccb->ccb_mfi_offset;
2820
2821 /* select i'th sense */
2822 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2823 ccb->ccb_sense = (struct mfi_sense *)(sense +
2824 ccb->ccb_sense_offset);
2825 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2826 ccb->ccb_sense_offset;
2827
2828 /* select i'th sgl */
2829 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2830 sc->sc_max_sgl * i;
2831 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2832 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2833 ccb->ccb_sgl_offset;
2834
2835 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2836 cv_init(&ccb->ccb_cv, "mfiiexec");
2837
2838 /* add ccb to queue */
2839 mfii_put_ccb(sc, ccb);
2840 }
2841
2842 return (0);
2843
2844 destroy32:
2845 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2846 destroy:
2847 /* free dma maps and ccb memory */
2848 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2849 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2850 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2851 }
2852
2853 free(sc->sc_ccb, M_DEVBUF);
2854
2855 return (1);
2856 }
2857
2858 #if NBIO > 0
2859 static int
2860 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2861 {
2862 struct mfii_softc *sc = device_private(dev);
2863 int error = 0;
2864
2865 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2866
2867 mutex_enter(&sc->sc_lock);
2868
2869 switch (cmd) {
2870 case BIOCINQ:
2871 DNPRINTF(MFII_D_IOCTL, "inq\n");
2872 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2873 break;
2874
2875 case BIOCVOL:
2876 DNPRINTF(MFII_D_IOCTL, "vol\n");
2877 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2878 break;
2879
2880 case BIOCDISK:
2881 DNPRINTF(MFII_D_IOCTL, "disk\n");
2882 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2883 break;
2884
2885 case BIOCALARM:
2886 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2887 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2888 break;
2889
2890 case BIOCBLINK:
2891 DNPRINTF(MFII_D_IOCTL, "blink\n");
2892 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2893 break;
2894
2895 case BIOCSETSTATE:
2896 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2897 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2898 break;
2899
2900 #if 0
2901 case BIOCPATROL:
2902 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2903 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2904 break;
2905 #endif
2906
2907 default:
2908 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2909 error = ENOTTY;
2910 }
2911
2912 mutex_exit(&sc->sc_lock);
2913
2914 return (error);
2915 }
2916
2917 static int
2918 mfii_bio_getitall(struct mfii_softc *sc)
2919 {
2920 int i, d, rv = EINVAL;
2921 size_t size;
2922 union mfi_mbox mbox;
2923 struct mfi_conf *cfg = NULL;
2924 struct mfi_ld_details *ld_det = NULL;
2925
2926 /* get info */
2927 if (mfii_get_info(sc)) {
2928 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2929 DEVNAME(sc));
2930 goto done;
2931 }
2932
2933 /* send single element command to retrieve size for full structure */
2934 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2935 if (cfg == NULL)
2936 goto done;
2937 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2938 MFII_DATA_IN, false)) {
2939 free(cfg, M_DEVBUF);
2940 goto done;
2941 }
2942
2943 size = cfg->mfc_size;
2944 free(cfg, M_DEVBUF);
2945
2946 /* memory for read config */
2947 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2948 if (cfg == NULL)
2949 goto done;
2950 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2951 MFII_DATA_IN, false)) {
2952 free(cfg, M_DEVBUF);
2953 goto done;
2954 }
2955
2956 /* replace current pointer with new one */
2957 if (sc->sc_cfg)
2958 free(sc->sc_cfg, M_DEVBUF);
2959 sc->sc_cfg = cfg;
2960
2961 /* get all ld info */
2962 memset(&mbox, 0, sizeof(mbox));
2963 if (sc->sc_max256vd)
2964 mbox.b[0] = 1;
2965 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
2966 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2967 goto done;
2968
2969 /* get memory for all ld structures */
2970 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2971 if (sc->sc_ld_sz != size) {
2972 if (sc->sc_ld_details)
2973 free(sc->sc_ld_details, M_DEVBUF);
2974
2975 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2976 if (ld_det == NULL)
2977 goto done;
2978 sc->sc_ld_sz = size;
2979 sc->sc_ld_details = ld_det;
2980 }
2981
2982 /* find used physical disks */
2983 size = sizeof(struct mfi_ld_details);
2984 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2985 memset(&mbox, 0, sizeof(mbox));
2986 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2987 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2988 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2989 goto done;
2990
2991 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2992 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2993 }
2994 sc->sc_no_pd = d;
2995
2996 rv = 0;
2997 done:
2998 return (rv);
2999 }
3000
3001 static int
3002 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
3003 {
3004 int rv = EINVAL;
3005 struct mfi_conf *cfg = NULL;
3006
3007 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
3008
3009 if (mfii_bio_getitall(sc)) {
3010 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3011 DEVNAME(sc));
3012 goto done;
3013 }
3014
3015 /* count unused disks as volumes */
3016 if (sc->sc_cfg == NULL)
3017 goto done;
3018 cfg = sc->sc_cfg;
3019
3020 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3021 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3022 #if notyet
3023 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3024 (bi->bi_nodisk - sc->sc_no_pd);
3025 #endif
3026 /* tell bio who we are */
3027 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3028
3029 rv = 0;
3030 done:
3031 return (rv);
3032 }
3033
3034 static int
3035 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3036 {
3037 int i, per, rv = EINVAL;
3038
3039 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3040 DEVNAME(sc), bv->bv_volid);
3041
3042 /* we really could skip and expect that inq took care of it */
3043 if (mfii_bio_getitall(sc)) {
3044 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3045 DEVNAME(sc));
3046 goto done;
3047 }
3048
3049 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3050 /* go do hotspares & unused disks */
3051 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3052 goto done;
3053 }
3054
3055 i = bv->bv_volid;
3056 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
3057 sizeof(bv->bv_dev));
3058
3059 switch (sc->sc_ld_list.mll_list[i].mll_state) {
3060 case MFI_LD_OFFLINE:
3061 bv->bv_status = BIOC_SVOFFLINE;
3062 break;
3063
3064 case MFI_LD_PART_DEGRADED:
3065 case MFI_LD_DEGRADED:
3066 bv->bv_status = BIOC_SVDEGRADED;
3067 break;
3068
3069 case MFI_LD_ONLINE:
3070 bv->bv_status = BIOC_SVONLINE;
3071 break;
3072
3073 default:
3074 bv->bv_status = BIOC_SVINVALID;
3075 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3076 DEVNAME(sc),
3077 sc->sc_ld_list.mll_list[i].mll_state);
3078 }
3079
3080 /* additional status can modify MFI status */
3081 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3082 case MFI_LD_PROG_CC:
3083 bv->bv_status = BIOC_SVSCRUB;
3084 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3085 bv->bv_percent = (per * 100) / 0xffff;
3086 bv->bv_seconds =
3087 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3088 break;
3089
3090 case MFI_LD_PROG_BGI:
3091 bv->bv_status = BIOC_SVSCRUB;
3092 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3093 bv->bv_percent = (per * 100) / 0xffff;
3094 bv->bv_seconds =
3095 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3096 break;
3097
3098 case MFI_LD_PROG_FGI:
3099 case MFI_LD_PROG_RECONSTRUCT:
3100 /* nothing yet */
3101 break;
3102 }
3103
3104 #if 0
3105 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3106 bv->bv_cache = BIOC_CVWRITEBACK;
3107 else
3108 bv->bv_cache = BIOC_CVWRITETHROUGH;
3109 #endif
3110
3111 /*
3112 * The RAID levels are determined per the SNIA DDF spec, this is only
3113 * a subset that is valid for the MFI controller.
3114 */
3115 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3116 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3117 bv->bv_level *= 10;
3118
3119 bv->bv_nodisk =
3120 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3121 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3122
3123 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3124 bv->bv_stripe_size =
3125 (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3126 / 1024; /* in KB */
3127
3128 rv = 0;
3129 done:
3130 return (rv);
3131 }
3132
3133 static int
3134 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3135 {
3136 struct mfi_conf *cfg;
3137 struct mfi_array *ar;
3138 struct mfi_ld_cfg *ld;
3139 struct mfi_pd_details *pd;
3140 struct mfi_pd_list *pl;
3141 struct scsipi_inquiry_data *inqbuf;
3142 char vend[8+16+4+1], *vendp;
3143 int i, rv = EINVAL;
3144 int arr, vol, disk, span;
3145 union mfi_mbox mbox;
3146
3147 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3148 DEVNAME(sc), bd->bd_diskid);
3149
3150 /* we really could skip and expect that inq took care of it */
3151 if (mfii_bio_getitall(sc)) {
3152 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3153 DEVNAME(sc));
3154 return (rv);
3155 }
3156 cfg = sc->sc_cfg;
3157
3158 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3159 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3160
3161 ar = cfg->mfc_array;
3162 vol = bd->bd_volid;
3163 if (vol >= cfg->mfc_no_ld) {
3164 /* do hotspares */
3165 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3166 goto freeme;
3167 }
3168
3169 /* calculate offset to ld structure */
3170 ld = (struct mfi_ld_cfg *)(
3171 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3172 cfg->mfc_array_size * cfg->mfc_no_array);
3173
3174 /* use span 0 only when raid group is not spanned */
3175 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3176 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3177 else
3178 span = 0;
3179 arr = ld[vol].mlc_span[span].mls_index;
3180
3181 /* offset disk into pd list */
3182 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3183
3184 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3185 /* disk is missing but succeed command */
3186 bd->bd_status = BIOC_SDFAILED;
3187 rv = 0;
3188
3189 /* try to find an unused disk for the target to rebuild */
3190 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3191 MFII_DATA_IN, false))
3192 goto freeme;
3193
3194 for (i = 0; i < pl->mpl_no_pd; i++) {
3195 if (pl->mpl_address[i].mpa_scsi_type != 0)
3196 continue;
3197
3198 memset(&mbox, 0, sizeof(mbox));
3199 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3200 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3201 pd, sizeof(*pd), MFII_DATA_IN, false))
3202 continue;
3203
3204 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3205 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3206 break;
3207 }
3208
3209 if (i == pl->mpl_no_pd)
3210 goto freeme;
3211 } else {
3212 memset(&mbox, 0, sizeof(mbox));
3213 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3214 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3215 MFII_DATA_IN, false)) {
3216 bd->bd_status = BIOC_SDINVALID;
3217 goto freeme;
3218 }
3219 }
3220
3221 /* get the remaining fields */
3222 bd->bd_channel = pd->mpd_enc_idx;
3223 bd->bd_target = pd->mpd_enc_slot;
3224
3225 /* get status */
3226 switch (pd->mpd_fw_state){
3227 case MFI_PD_UNCONFIG_GOOD:
3228 case MFI_PD_UNCONFIG_BAD:
3229 bd->bd_status = BIOC_SDUNUSED;
3230 break;
3231
3232 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3233 bd->bd_status = BIOC_SDHOTSPARE;
3234 break;
3235
3236 case MFI_PD_OFFLINE:
3237 bd->bd_status = BIOC_SDOFFLINE;
3238 break;
3239
3240 case MFI_PD_FAILED:
3241 bd->bd_status = BIOC_SDFAILED;
3242 break;
3243
3244 case MFI_PD_REBUILD:
3245 bd->bd_status = BIOC_SDREBUILD;
3246 break;
3247
3248 case MFI_PD_ONLINE:
3249 bd->bd_status = BIOC_SDONLINE;
3250 break;
3251
3252 case MFI_PD_COPYBACK:
3253 case MFI_PD_SYSTEM:
3254 default:
3255 bd->bd_status = BIOC_SDINVALID;
3256 break;
3257 }
3258
3259 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3260
3261 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3262 vendp = inqbuf->vendor;
3263 memcpy(vend, vendp, sizeof vend - 1);
3264 vend[sizeof vend - 1] = '\0';
3265 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3266
3267 /* XXX find a way to retrieve serial nr from drive */
3268 /* XXX find a way to get bd_procdev */
3269
3270 #if 0
3271 mfp = &pd->mpd_progress;
3272 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3273 mp = &mfp->mfp_patrol_read;
3274 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3275 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3276 }
3277 #endif
3278
3279 rv = 0;
3280 freeme:
3281 free(pd, M_DEVBUF);
3282 free(pl, M_DEVBUF);
3283
3284 return (rv);
3285 }
3286
3287 static int
3288 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3289 {
3290 uint32_t opc;
3291 int rv = 0;
3292 int8_t ret;
3293 mfii_direction_t dir = MFII_DATA_NONE;
3294
3295 switch (ba->ba_opcode) {
3296 case BIOC_SADISABLE:
3297 opc = MR_DCMD_SPEAKER_DISABLE;
3298 break;
3299
3300 case BIOC_SAENABLE:
3301 opc = MR_DCMD_SPEAKER_ENABLE;
3302 break;
3303
3304 case BIOC_SASILENCE:
3305 opc = MR_DCMD_SPEAKER_SILENCE;
3306 break;
3307
3308 case BIOC_GASTATUS:
3309 opc = MR_DCMD_SPEAKER_GET;
3310 dir = MFII_DATA_IN;
3311 break;
3312
3313 case BIOC_SATEST:
3314 opc = MR_DCMD_SPEAKER_TEST;
3315 break;
3316
3317 default:
3318 DNPRINTF(MFII_D_IOCTL,
3319 "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3320 DEVNAME(sc), ba->ba_opcode);
3321 return (EINVAL);
3322 }
3323
3324 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3325 rv = EINVAL;
3326 else
3327 if (ba->ba_opcode == BIOC_GASTATUS)
3328 ba->ba_status = ret;
3329 else
3330 ba->ba_status = 0;
3331
3332 return (rv);
3333 }
3334
3335 static int
3336 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3337 {
3338 int i, found, rv = EINVAL;
3339 union mfi_mbox mbox;
3340 uint32_t cmd;
3341 struct mfi_pd_list *pd;
3342
3343 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3344 bb->bb_status);
3345
3346 /* channel 0 means not in an enclosure so can't be blinked */
3347 if (bb->bb_channel == 0)
3348 return (EINVAL);
3349
3350 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3351
3352 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3353 MFII_DATA_IN, false))
3354 goto done;
3355
3356 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3357 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3358 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3359 found = 1;
3360 break;
3361 }
3362
3363 if (!found)
3364 goto done;
3365
3366 memset(&mbox, 0, sizeof(mbox));
3367 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3368
3369 switch (bb->bb_status) {
3370 case BIOC_SBUNBLINK:
3371 cmd = MR_DCMD_PD_UNBLINK;
3372 break;
3373
3374 case BIOC_SBBLINK:
3375 cmd = MR_DCMD_PD_BLINK;
3376 break;
3377
3378 case BIOC_SBALARM:
3379 default:
3380 DNPRINTF(MFII_D_IOCTL,
3381 "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3382 DEVNAME(sc), bb->bb_status);
3383 goto done;
3384 }
3385
3386
3387 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3388 goto done;
3389
3390 rv = 0;
3391 done:
3392 free(pd, M_DEVBUF);
3393 return (rv);
3394 }
3395
3396 static int
3397 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3398 {
3399 struct mfii_foreign_scan_info *fsi;
3400 struct mfi_pd_details *pd;
3401 union mfi_mbox mbox;
3402 int rv;
3403
3404 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3405 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3406
3407 memset(&mbox, 0, sizeof mbox);
3408 mbox.s[0] = pd_id;
3409 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3410 MFII_DATA_IN, false);
3411 if (rv != 0)
3412 goto done;
3413
3414 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3415 mbox.s[0] = pd_id;
3416 mbox.s[1] = pd->mpd_pd.mfp_seq;
3417 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3418 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3419 MFII_DATA_NONE, false);
3420 if (rv != 0)
3421 goto done;
3422 }
3423
3424 memset(&mbox, 0, sizeof mbox);
3425 mbox.s[0] = pd_id;
3426 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3427 MFII_DATA_IN, false);
3428 if (rv != 0)
3429 goto done;
3430
3431 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3432 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3433 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3434 if (rv != 0)
3435 goto done;
3436
3437 if (fsi->count > 0) {
3438 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3439 NULL, 0, MFII_DATA_NONE, false);
3440 if (rv != 0)
3441 goto done;
3442 }
3443 }
3444
3445 memset(&mbox, 0, sizeof mbox);
3446 mbox.s[0] = pd_id;
3447 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3448 MFII_DATA_IN, false);
3449 if (rv != 0)
3450 goto done;
3451
3452 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3453 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3454 rv = ENXIO;
3455
3456 done:
3457 free(fsi, M_DEVBUF);
3458 free(pd, M_DEVBUF);
3459
3460 return (rv);
3461 }
3462
3463 static int
3464 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3465 {
3466 struct mfi_hotspare *hs;
3467 struct mfi_pd_details *pd;
3468 union mfi_mbox mbox;
3469 size_t size;
3470 int rv = EINVAL;
3471
3472 /* we really could skip and expect that inq took care of it */
3473 if (mfii_bio_getitall(sc)) {
3474 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3475 DEVNAME(sc));
3476 return (rv);
3477 }
3478 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3479
3480 hs = malloc(size, M_DEVBUF, M_WAITOK);
3481 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3482
3483 memset(&mbox, 0, sizeof mbox);
3484 mbox.s[0] = pd_id;
3485 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3486 MFII_DATA_IN, false);
3487 if (rv != 0)
3488 goto done;
3489
3490 memset(hs, 0, size);
3491 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3492 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3493 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3494 MFII_DATA_OUT, false);
3495
3496 done:
3497 free(hs, M_DEVBUF);
3498 free(pd, M_DEVBUF);
3499
3500 return (rv);
3501 }
3502
3503 static int
3504 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3505 {
3506 struct mfi_pd_details *pd;
3507 struct mfi_pd_list *pl;
3508 int i, found, rv = EINVAL;
3509 union mfi_mbox mbox;
3510
3511 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3512 bs->bs_status);
3513
3514 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3515 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3516
3517 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3518 MFII_DATA_IN, false))
3519 goto done;
3520
3521 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3522 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3523 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3524 found = 1;
3525 break;
3526 }
3527
3528 if (!found)
3529 goto done;
3530
3531 memset(&mbox, 0, sizeof(mbox));
3532 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3533
3534 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3535 MFII_DATA_IN, false))
3536 goto done;
3537
3538 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3539 mbox.s[1] = pd->mpd_pd.mfp_seq;
3540
3541 switch (bs->bs_status) {
3542 case BIOC_SSONLINE:
3543 mbox.b[4] = MFI_PD_ONLINE;
3544 break;
3545
3546 case BIOC_SSOFFLINE:
3547 mbox.b[4] = MFI_PD_OFFLINE;
3548 break;
3549
3550 case BIOC_SSHOTSPARE:
3551 mbox.b[4] = MFI_PD_HOTSPARE;
3552 break;
3553
3554 case BIOC_SSREBUILD:
3555 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3556 if ((rv = mfii_makegood(sc,
3557 pl->mpl_address[i].mpa_pd_id)))
3558 goto done;
3559
3560 if ((rv = mfii_makespare(sc,
3561 pl->mpl_address[i].mpa_pd_id)))
3562 goto done;
3563
3564 memset(&mbox, 0, sizeof(mbox));
3565 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3566 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3567 pd, sizeof(*pd), MFII_DATA_IN, false);
3568 if (rv != 0)
3569 goto done;
3570
3571 /* rebuilding might be started by mfii_makespare() */
3572 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3573 rv = 0;
3574 goto done;
3575 }
3576
3577 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3578 mbox.s[1] = pd->mpd_pd.mfp_seq;
3579 }
3580 mbox.b[4] = MFI_PD_REBUILD;
3581 break;
3582
3583 default:
3584 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3585 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3586 goto done;
3587 }
3588
3589
3590 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3591 MFII_DATA_NONE, false);
3592 done:
3593 free(pd, M_DEVBUF);
3594 free(pl, M_DEVBUF);
3595 return (rv);
3596 }
3597
3598 #if 0
3599 int
3600 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3601 {
3602 uint32_t opc;
3603 int rv = 0;
3604 struct mfi_pr_properties prop;
3605 struct mfi_pr_status status;
3606 uint32_t time, exec_freq;
3607
3608 switch (bp->bp_opcode) {
3609 case BIOC_SPSTOP:
3610 case BIOC_SPSTART:
3611 if (bp->bp_opcode == BIOC_SPSTART)
3612 opc = MR_DCMD_PR_START;
3613 else
3614 opc = MR_DCMD_PR_STOP;
3615 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3616 return (EINVAL);
3617 break;
3618
3619 case BIOC_SPMANUAL:
3620 case BIOC_SPDISABLE:
3621 case BIOC_SPAUTO:
3622 /* Get device's time. */
3623 opc = MR_DCMD_TIME_SECS_GET;
3624 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3625 MFII_DATA_IN, false))
3626 return (EINVAL);
3627
3628 opc = MR_DCMD_PR_GET_PROPERTIES;
3629 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3630 MFII_DATA_IN, false))
3631 return (EINVAL);
3632
3633 switch (bp->bp_opcode) {
3634 case BIOC_SPMANUAL:
3635 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3636 break;
3637 case BIOC_SPDISABLE:
3638 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3639 break;
3640 case BIOC_SPAUTO:
3641 if (bp->bp_autoival != 0) {
3642 if (bp->bp_autoival == -1)
3643 /* continuously */
3644 exec_freq = 0xffffffffU;
3645 else if (bp->bp_autoival > 0)
3646 exec_freq = bp->bp_autoival;
3647 else
3648 return (EINVAL);
3649 prop.exec_freq = exec_freq;
3650 }
3651 if (bp->bp_autonext != 0) {
3652 if (bp->bp_autonext < 0)
3653 return (EINVAL);
3654 else
3655 prop.next_exec =
3656 time + bp->bp_autonext;
3657 }
3658 prop.op_mode = MFI_PR_OPMODE_AUTO;
3659 break;
3660 }
3661
3662 opc = MR_DCMD_PR_SET_PROPERTIES;
3663 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3664 MFII_DATA_OUT, false))
3665 return (EINVAL);
3666
3667 break;
3668
3669 case BIOC_GPSTATUS:
3670 opc = MR_DCMD_PR_GET_PROPERTIES;
3671 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3672 MFII_DATA_IN, false))
3673 return (EINVAL);
3674
3675 opc = MR_DCMD_PR_GET_STATUS;
3676 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3677 MFII_DATA_IN, false))
3678 return (EINVAL);
3679
3680 /* Get device's time. */
3681 opc = MR_DCMD_TIME_SECS_GET;
3682 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3683 MFII_DATA_IN, false))
3684 return (EINVAL);
3685
3686 switch (prop.op_mode) {
3687 case MFI_PR_OPMODE_AUTO:
3688 bp->bp_mode = BIOC_SPMAUTO;
3689 bp->bp_autoival = prop.exec_freq;
3690 bp->bp_autonext = prop.next_exec;
3691 bp->bp_autonow = time;
3692 break;
3693 case MFI_PR_OPMODE_MANUAL:
3694 bp->bp_mode = BIOC_SPMMANUAL;
3695 break;
3696 case MFI_PR_OPMODE_DISABLED:
3697 bp->bp_mode = BIOC_SPMDISABLED;
3698 break;
3699 default:
3700 printf("%s: unknown patrol mode %d\n",
3701 DEVNAME(sc), prop.op_mode);
3702 break;
3703 }
3704
3705 switch (status.state) {
3706 case MFI_PR_STATE_STOPPED:
3707 bp->bp_status = BIOC_SPSSTOPPED;
3708 break;
3709 case MFI_PR_STATE_READY:
3710 bp->bp_status = BIOC_SPSREADY;
3711 break;
3712 case MFI_PR_STATE_ACTIVE:
3713 bp->bp_status = BIOC_SPSACTIVE;
3714 break;
3715 case MFI_PR_STATE_ABORTED:
3716 bp->bp_status = BIOC_SPSABORTED;
3717 break;
3718 default:
3719 printf("%s: unknown patrol state %d\n",
3720 DEVNAME(sc), status.state);
3721 break;
3722 }
3723
3724 break;
3725
3726 default:
3727 DNPRINTF(MFII_D_IOCTL,
3728 "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3729 DEVNAME(sc), bp->bp_opcode);
3730 return (EINVAL);
3731 }
3732
3733 return (rv);
3734 }
3735 #endif
3736
3737 static int
3738 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3739 {
3740 struct mfi_conf *cfg;
3741 struct mfi_hotspare *hs;
3742 struct mfi_pd_details *pd;
3743 struct bioc_disk *sdhs;
3744 struct bioc_vol *vdhs;
3745 struct scsipi_inquiry_data *inqbuf;
3746 char vend[8+16+4+1], *vendp;
3747 int i, rv = EINVAL;
3748 uint32_t size;
3749 union mfi_mbox mbox;
3750
3751 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3752
3753 if (!bio_hs)
3754 return (EINVAL);
3755
3756 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3757
3758 /* send single element command to retrieve size for full structure */
3759 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3760 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3761 MFII_DATA_IN, false))
3762 goto freeme;
3763
3764 size = cfg->mfc_size;
3765 free(cfg, M_DEVBUF);
3766
3767 /* memory for read config */
3768 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3769 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3770 MFII_DATA_IN, false))
3771 goto freeme;
3772
3773 /* calculate offset to hs structure */
3774 hs = (struct mfi_hotspare *)(
3775 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3776 cfg->mfc_array_size * cfg->mfc_no_array +
3777 cfg->mfc_ld_size * cfg->mfc_no_ld);
3778
3779 if (volid < cfg->mfc_no_ld)
3780 goto freeme; /* not a hotspare */
3781
3782 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3783 goto freeme; /* not a hotspare */
3784
3785 /* offset into hotspare structure */
3786 i = volid - cfg->mfc_no_ld;
3787
3788 DNPRINTF(MFII_D_IOCTL,
3789 "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3790 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3791 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3792
3793 /* get pd fields */
3794 memset(&mbox, 0, sizeof(mbox));
3795 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3796 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3797 MFII_DATA_IN, false)) {
3798 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3799 DEVNAME(sc));
3800 goto freeme;
3801 }
3802
3803 switch (type) {
3804 case MFI_MGMT_VD:
3805 vdhs = bio_hs;
3806 vdhs->bv_status = BIOC_SVONLINE;
3807 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3808 vdhs->bv_level = -1; /* hotspare */
3809 vdhs->bv_nodisk = 1;
3810 break;
3811
3812 case MFI_MGMT_SD:
3813 sdhs = bio_hs;
3814 sdhs->bd_status = BIOC_SDHOTSPARE;
3815 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3816 sdhs->bd_channel = pd->mpd_enc_idx;
3817 sdhs->bd_target = pd->mpd_enc_slot;
3818 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3819 vendp = inqbuf->vendor;
3820 memcpy(vend, vendp, sizeof vend - 1);
3821 vend[sizeof vend - 1] = '\0';
3822 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3823 break;
3824
3825 default:
3826 goto freeme;
3827 }
3828
3829 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3830 rv = 0;
3831 freeme:
3832 free(pd, M_DEVBUF);
3833 free(cfg, M_DEVBUF);
3834
3835 return (rv);
3836 }
3837
3838 #endif /* NBIO > 0 */
3839
3840 static void
3841 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3842 {
3843 struct mfi_bbu_status bbu;
3844 u_int32_t status;
3845 u_int32_t mask;
3846 u_int32_t soh_bad;
3847 int rv;
3848
3849 mutex_enter(&sc->sc_lock);
3850 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3851 sizeof(bbu), MFII_DATA_IN, false);
3852 mutex_exit(&sc->sc_lock);
3853 if (rv != 0) {
3854 edata->state = ENVSYS_SINVALID;
3855 edata->value_cur = 0;
3856 return;
3857 }
3858
3859 switch (bbu.battery_type) {
3860 case MFI_BBU_TYPE_IBBU:
3861 case MFI_BBU_TYPE_IBBU09:
3862 case MFI_BBU_TYPE_CVPM02:
3863 mask = MFI_BBU_STATE_BAD_IBBU;
3864 soh_bad = 0;
3865 break;
3866 case MFI_BBU_TYPE_BBU:
3867 mask = MFI_BBU_STATE_BAD_BBU;
3868 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3869 break;
3870
3871 case MFI_BBU_TYPE_NONE:
3872 default:
3873 edata->state = ENVSYS_SCRITICAL;
3874 edata->value_cur = 0;
3875 return;
3876 }
3877
3878 status = le32toh(bbu.fw_status) & mask;
3879 switch (edata->sensor) {
3880 case 0:
3881 edata->value_cur = (status || soh_bad) ? 0 : 1;
3882 edata->state =
3883 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3884 return;
3885 case 1:
3886 edata->value_cur = le16toh(bbu.voltage) * 1000;
3887 edata->state = ENVSYS_SVALID;
3888 return;
3889 case 2:
3890 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3891 edata->state = ENVSYS_SVALID;
3892 return;
3893 case 3:
3894 edata->value_cur =
3895 le16toh(bbu.temperature) * 1000000 + 273150000;
3896 edata->state = ENVSYS_SVALID;
3897 return;
3898 }
3899 }
3900
3901 static void
3902 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3903 {
3904 struct bioc_vol bv;
3905 int error;
3906
3907 memset(&bv, 0, sizeof(bv));
3908 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3909 mutex_enter(&sc->sc_lock);
3910 error = mfii_ioctl_vol(sc, &bv);
3911 mutex_exit(&sc->sc_lock);
3912 if (error)
3913 bv.bv_status = BIOC_SVINVALID;
3914 bio_vol_to_envsys(edata, &bv);
3915 }
3916
3917 static void
3918 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3919 {
3920 sensor->units = ENVSYS_DRIVE;
3921 sensor->state = ENVSYS_SINVALID;
3922 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3923 /* Enable monitoring for drive state changes */
3924 sensor->flags |= ENVSYS_FMONSTCHANGED;
3925 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3926 }
3927
3928 static void
3929 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3930 {
3931 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3932 aprint_error_dev(sc->sc_dev,
3933 "failed to attach sensor %s\n", s->desc);
3934 }
3935
3936 static int
3937 mfii_create_sensors(struct mfii_softc *sc)
3938 {
3939 int i, j, rv;
3940 const int nsensors = MFI_BBU_SENSORS + MFII_MAX_LD_EXT;
3941
3942 sc->sc_sme = sysmon_envsys_create();
3943 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3944 M_DEVBUF, M_WAITOK | M_ZERO);
3945
3946 /* BBU */
3947 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3948 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3949 sc->sc_sensors[0].value_cur = 0;
3950 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3951 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3952 sc->sc_sensors[1].value_cur = 0;
3953 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3954 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3955 sc->sc_sensors[2].value_cur = 0;
3956 sc->sc_sensors[3].units = ENVSYS_STEMP;
3957 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3958 sc->sc_sensors[3].value_cur = 0;
3959
3960 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3961 sc->sc_bbuok = true;
3962 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3963 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3964 "%s BBU state", DEVNAME(sc));
3965 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3966 "%s BBU voltage", DEVNAME(sc));
3967 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3968 "%s BBU current", DEVNAME(sc));
3969 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3970 "%s BBU temperature", DEVNAME(sc));
3971 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3972 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3973 }
3974 }
3975
3976 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3977 j = i + MFI_BBU_SENSORS;
3978 mfii_init_ld_sensor(sc, &sc->sc_sensors[j], i);
3979 mfii_attach_sensor(sc, &sc->sc_sensors[j]);
3980 }
3981
3982 sc->sc_sme->sme_name = DEVNAME(sc);
3983 sc->sc_sme->sme_cookie = sc;
3984 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3985 rv = sysmon_envsys_register(sc->sc_sme);
3986 if (rv) {
3987 aprint_error_dev(sc->sc_dev,
3988 "unable to register with sysmon (rv = %d)\n", rv);
3989 sysmon_envsys_destroy(sc->sc_sme);
3990 sc->sc_sme = NULL;
3991 }
3992 return rv;
3993
3994 }
3995
3996 static int
3997 mfii_destroy_sensors(struct mfii_softc *sc)
3998 {
3999 if (sc->sc_sme == NULL)
4000 return 0;
4001 sysmon_envsys_unregister(sc->sc_sme);
4002 sc->sc_sme = NULL;
4003 free(sc->sc_sensors, M_DEVBUF);
4004 return 0;
4005 }
4006
4007 static void
4008 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
4009 {
4010 struct mfii_softc *sc = sme->sme_cookie;
4011
4012 if (edata->sensor >= MFI_BBU_SENSORS + MFII_MAX_LD_EXT)
4013 return;
4014
4015 if (edata->sensor < MFI_BBU_SENSORS) {
4016 if (sc->sc_bbuok)
4017 mfii_bbu(sc, edata);
4018 } else {
4019 mfii_refresh_ld_sensor(sc, edata);
4020 }
4021 }
4022