mfii.c revision 1.24 1 /* $NetBSD: mfii.c,v 1.24 2022/07/16 06:52:41 msaitoh Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.24 2022/07/16 06:52:41 msaitoh Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 #define MFII_MAX_LD_EXT 256
210
211 struct mfii_ld_list_ext {
212 uint32_t mll_no_ld;
213 uint32_t mll_res;
214 struct {
215 struct mfi_ld mll_ld;
216 uint8_t mll_state; /* states are the same as MFI_ */
217 uint8_t mll_res2;
218 uint8_t mll_res3;
219 uint8_t mll_res4;
220 uint64_t mll_size;
221 } mll_list[MFII_MAX_LD_EXT];
222 } __packed;
223
224 struct mfii_dmamem {
225 bus_dmamap_t mdm_map;
226 bus_dma_segment_t mdm_seg;
227 size_t mdm_size;
228 void * mdm_kva;
229 };
230 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
231 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
232 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
233 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
234
235 struct mfii_softc;
236
237 typedef enum mfii_direction {
238 MFII_DATA_NONE = 0,
239 MFII_DATA_IN,
240 MFII_DATA_OUT
241 } mfii_direction_t;
242
243 struct mfii_ccb {
244 struct mfii_softc *ccb_sc;
245 void *ccb_request;
246 u_int64_t ccb_request_dva;
247 bus_addr_t ccb_request_offset;
248
249 void *ccb_mfi;
250 u_int64_t ccb_mfi_dva;
251 bus_addr_t ccb_mfi_offset;
252
253 struct mfi_sense *ccb_sense;
254 u_int64_t ccb_sense_dva;
255 bus_addr_t ccb_sense_offset;
256
257 struct mfii_sge *ccb_sgl;
258 u_int64_t ccb_sgl_dva;
259 bus_addr_t ccb_sgl_offset;
260 u_int ccb_sgl_len;
261
262 struct mfii_request_descr ccb_req;
263
264 bus_dmamap_t ccb_dmamap64;
265 bus_dmamap_t ccb_dmamap32;
266 bool ccb_dma64;
267
268 /* data for sgl */
269 void *ccb_data;
270 size_t ccb_len;
271
272 mfii_direction_t ccb_direction;
273
274 void *ccb_cookie;
275 kmutex_t ccb_mtx;
276 kcondvar_t ccb_cv;
277 void (*ccb_done)(struct mfii_softc *,
278 struct mfii_ccb *);
279
280 u_int32_t ccb_flags;
281 #define MFI_CCB_F_ERR (1<<0)
282 u_int ccb_smid;
283 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
284 };
285 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
286
287 struct mfii_iop {
288 int bar;
289 int num_sge_loc;
290 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
291 #define MFII_IOP_NUM_SGE_LOC_35 1
292 u_int16_t ldio_ctx_reg_lock_flags;
293 u_int8_t ldio_req_type;
294 u_int8_t ldio_ctx_type_nseg;
295 u_int8_t sge_flag_chain;
296 u_int8_t sge_flag_eol;
297 u_int8_t iop_flag;
298 #define MFII_IOP_QUIRK_REGREAD 0x01
299 #define MFII_IOP_HAS_32BITDESC_BIT 0x02
300 };
301
302 struct mfii_softc {
303 device_t sc_dev;
304 struct scsipi_channel sc_chan;
305 struct scsipi_adapter sc_adapt;
306
307 const struct mfii_iop *sc_iop;
308 u_int sc_iop_flag;
309 #define MFII_IOP_DESC_32BIT 0x01
310
311 pci_chipset_tag_t sc_pc;
312 pcitag_t sc_tag;
313
314 bus_space_tag_t sc_iot;
315 bus_space_handle_t sc_ioh;
316 bus_size_t sc_ios;
317 bus_dma_tag_t sc_dmat;
318 bus_dma_tag_t sc_dmat64;
319 bool sc_64bit_dma;
320
321 void *sc_ih;
322
323 kmutex_t sc_ccb_mtx;
324 kmutex_t sc_post_mtx;
325
326 u_int sc_max_fw_cmds;
327 u_int sc_max_cmds;
328 u_int sc_max_sgl;
329
330 u_int sc_reply_postq_depth;
331 u_int sc_reply_postq_index;
332 kmutex_t sc_reply_postq_mtx;
333 struct mfii_dmamem *sc_reply_postq;
334
335 struct mfii_dmamem *sc_requests;
336 struct mfii_dmamem *sc_mfi;
337 struct mfii_dmamem *sc_sense;
338 struct mfii_dmamem *sc_sgl;
339
340 struct mfii_ccb *sc_ccb;
341 struct mfii_ccb_list sc_ccb_freeq;
342
343 struct mfii_ccb *sc_aen_ccb;
344 struct workqueue *sc_aen_wq;
345 struct work sc_aen_work;
346
347 kmutex_t sc_abort_mtx;
348 struct mfii_ccb_list sc_abort_list;
349 struct workqueue *sc_abort_wq;
350 struct work sc_abort_work;
351
352 /* save some useful information for logical drives that is missing
353 * in sc_ld_list
354 */
355 struct {
356 bool ld_present;
357 char ld_dev[16]; /* device name sd? */
358 } sc_ld[MFII_MAX_LD_EXT];
359 int sc_target_lds[MFII_MAX_LD_EXT];
360 bool sc_max256vd;
361
362 /* bio */
363 struct mfi_conf *sc_cfg;
364 struct mfi_ctrl_info sc_info;
365 struct mfii_ld_list_ext sc_ld_list;
366 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
367 int sc_no_pd; /* used physical disks */
368 int sc_ld_sz; /* sizeof sc_ld_details */
369
370 /* mgmt lock */
371 kmutex_t sc_lock;
372 bool sc_running;
373
374 /* sensors */
375 struct sysmon_envsys *sc_sme;
376 envsys_data_t *sc_sensors;
377 bool sc_bbuok;
378
379 device_t sc_child;
380 };
381
382 // #define MFII_DEBUG
383 #ifdef MFII_DEBUG
384 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
385 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
386 #define MFII_D_CMD 0x0001
387 #define MFII_D_INTR 0x0002
388 #define MFII_D_MISC 0x0004
389 #define MFII_D_DMA 0x0008
390 #define MFII_D_IOCTL 0x0010
391 #define MFII_D_RW 0x0020
392 #define MFII_D_MEM 0x0040
393 #define MFII_D_CCB 0x0080
394 uint32_t mfii_debug = 0
395 /* | MFII_D_CMD */
396 /* | MFII_D_INTR */
397 | MFII_D_MISC
398 /* | MFII_D_DMA */
399 /* | MFII_D_IOCTL */
400 /* | MFII_D_RW */
401 /* | MFII_D_MEM */
402 /* | MFII_D_CCB */
403 ;
404 #else
405 #define DPRINTF(x...)
406 #define DNPRINTF(n,x...)
407 #endif
408
409 static int mfii_match(device_t, cfdata_t, void *);
410 static void mfii_attach(device_t, device_t, void *);
411 static int mfii_detach(device_t, int);
412 static int mfii_rescan(device_t, const char *, const int *);
413 static void mfii_childdetached(device_t, device_t);
414 static bool mfii_suspend(device_t, const pmf_qual_t *);
415 static bool mfii_resume(device_t, const pmf_qual_t *);
416 static bool mfii_shutdown(device_t, int);
417
418
419 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
420 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
421 mfii_childdetached, DVF_DETACH_SHUTDOWN);
422
423 static void mfii_scsipi_request(struct scsipi_channel *,
424 scsipi_adapter_req_t, void *);
425 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
426
427 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
428
429 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
430 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
431
432 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
433 static void mfii_dmamem_free(struct mfii_softc *,
434 struct mfii_dmamem *);
435
436 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
437 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
438 static int mfii_init_ccb(struct mfii_softc *);
439 static void mfii_scrub_ccb(struct mfii_ccb *);
440
441 static int mfii_transition_firmware(struct mfii_softc *);
442 static int mfii_initialise_firmware(struct mfii_softc *);
443 static int mfii_get_info(struct mfii_softc *);
444
445 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
446 static void mfii_start64(struct mfii_softc *, struct mfii_ccb *);
447 static void mfii_start_common(struct mfii_softc *,
448 struct mfii_ccb *, bool);
449 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
450 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
451 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
452 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
453 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
454 static int mfii_my_intr(struct mfii_softc *);
455 static int mfii_intr(void *);
456 static void mfii_postq(struct mfii_softc *);
457
458 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
459 void *, int);
460 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
461 void *, int);
462
463 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
464
465 static int mfii_mgmt(struct mfii_softc *, uint32_t,
466 const union mfi_mbox *, void *, size_t,
467 mfii_direction_t, bool);
468 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
469 uint32_t, const union mfi_mbox *, void *, size_t,
470 mfii_direction_t, bool);
471 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
472
473 static int mfii_scsi_cmd_io(struct mfii_softc *,
474 struct mfii_ccb *, struct scsipi_xfer *);
475 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
476 struct mfii_ccb *, struct scsipi_xfer *);
477 static void mfii_scsi_cmd_tmo(void *);
478
479 static void mfii_abort_task(struct work *, void *);
480 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
481 uint16_t, uint16_t, uint8_t, uint32_t);
482 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
483 struct mfii_ccb *);
484
485 static int mfii_aen_register(struct mfii_softc *);
486 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
487 struct mfii_dmamem *, uint32_t);
488 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
489 static void mfii_aen(struct work *, void *);
490 static void mfii_aen_unregister(struct mfii_softc *);
491
492 static void mfii_aen_pd_insert(struct mfii_softc *,
493 const struct mfi_evtarg_pd_address *);
494 static void mfii_aen_pd_remove(struct mfii_softc *,
495 const struct mfi_evtarg_pd_address *);
496 static void mfii_aen_pd_state_change(struct mfii_softc *,
497 const struct mfi_evtarg_pd_state *);
498 static void mfii_aen_ld_update(struct mfii_softc *);
499
500 #if NBIO > 0
501 static int mfii_ioctl(device_t, u_long, void *);
502 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
503 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
504 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
505 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
506 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
507 static int mfii_ioctl_setstate(struct mfii_softc *,
508 struct bioc_setstate *);
509 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
510 static int mfii_bio_getitall(struct mfii_softc *);
511 #endif /* NBIO > 0 */
512
513 #if 0
514 static const char *mfi_bbu_indicators[] = {
515 "pack missing",
516 "voltage low",
517 "temp high",
518 "charge active",
519 "discharge active",
520 "learn cycle req'd",
521 "learn cycle active",
522 "learn cycle failed",
523 "learn cycle timeout",
524 "I2C errors",
525 "replace pack",
526 "low capacity",
527 "periodic learn req'd"
528 };
529 #endif
530
531 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
532 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
533 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
534 static int mfii_create_sensors(struct mfii_softc *);
535 static int mfii_destroy_sensors(struct mfii_softc *);
536 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
537 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
538
539 /*
540 * mfii boards support asynchronous (and non-polled) completion of
541 * dcmds by proxying them through a passthru mpii command that points
542 * at a dcmd frame. since the passthru command is submitted like
543 * the scsi commands using an SMID in the request descriptor,
544 * ccb_request memory * must contain the passthru command because
545 * that is what the SMID refers to. this means ccb_request cannot
546 * contain the dcmd. rather than allocating separate dma memory to
547 * hold the dcmd, we reuse the sense memory buffer for it.
548 */
549
550 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
551
552 static inline void
553 mfii_dcmd_scrub(struct mfii_ccb *ccb)
554 {
555 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
556 }
557
558 static inline struct mfi_dcmd_frame *
559 mfii_dcmd_frame(struct mfii_ccb *ccb)
560 {
561 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
562 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
563 }
564
565 static inline void
566 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
567 {
568 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
569 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
570 }
571
572 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
573
574 static const struct mfii_iop mfii_iop_thunderbolt = {
575 MFII_BAR,
576 MFII_IOP_NUM_SGE_LOC_ORIG,
577 0,
578 MFII_REQ_TYPE_LDIO,
579 0,
580 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
581 0,
582 0
583 };
584
585 /*
586 * a lot of these values depend on us not implementing fastpath yet.
587 */
588 static const struct mfii_iop mfii_iop_25 = {
589 MFII_BAR,
590 MFII_IOP_NUM_SGE_LOC_ORIG,
591 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
592 MFII_REQ_TYPE_NO_LOCK,
593 MFII_RAID_CTX_TYPE_CUDA | 0x1,
594 MFII_SGE_CHAIN_ELEMENT,
595 MFII_SGE_END_OF_LIST,
596 0
597 };
598
599 static const struct mfii_iop mfii_iop_35 = {
600 MFII_BAR_35,
601 MFII_IOP_NUM_SGE_LOC_35,
602 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
603 MFII_REQ_TYPE_NO_LOCK,
604 MFII_RAID_CTX_TYPE_CUDA | 0x1,
605 MFII_SGE_CHAIN_ELEMENT,
606 MFII_SGE_END_OF_LIST,
607 0
608 };
609
610 #if 0
611 static const struct mfii_iop mfii_iop_aero = {
612 MFII_BAR_35,
613 MFII_IOP_NUM_SGE_LOC_35,
614 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
615 MFII_REQ_TYPE_NO_LOCK,
616 MFII_RAID_CTX_TYPE_CUDA | 0x1,
617 MFII_SGE_CHAIN_ELEMENT,
618 MFII_SGE_END_OF_LIST,
619 MFII_IOP_QUIRK_REGREAD | MFII_IOP_HAS_32BITDESC_BIT
620 };
621 #endif
622
623 struct mfii_device {
624 pcireg_t mpd_vendor;
625 pcireg_t mpd_product;
626 const struct mfii_iop *mpd_iop;
627 };
628
629 static const struct mfii_device mfii_devices[] = {
630 /* Fusion */
631 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
632 &mfii_iop_thunderbolt },
633 /* Fury */
634 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
635 &mfii_iop_25 },
636 /* Invader */
637 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
638 &mfii_iop_25 },
639 /* Intruder */
640 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
641 &mfii_iop_25 },
642 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
643 &mfii_iop_25 },
644 /* Cutlass */
645 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
646 &mfii_iop_25 },
647 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
648 &mfii_iop_25 },
649 /* Crusader */
650 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
651 &mfii_iop_35 },
652 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
653 &mfii_iop_35 },
654 /* Ventura */
655 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
656 &mfii_iop_35 },
657 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
658 &mfii_iop_35 },
659 /* Tomcat */
660 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
661 &mfii_iop_35 },
662 /* Harpoon */
663 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
664 &mfii_iop_35 },
665 #if 0
666 /* Aero */
667 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
668 &mfii_iop_aero },
669 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_3,
670 &mfii_iop_aero },
671 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
672 &mfii_iop_aero },
673 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_3,
674 &mfii_iop_aero }
675 #endif
676 };
677
678 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
679
680 static const struct mfii_iop *
681 mfii_find_iop(struct pci_attach_args *pa)
682 {
683 const struct mfii_device *mpd;
684 int i;
685
686 for (i = 0; i < __arraycount(mfii_devices); i++) {
687 mpd = &mfii_devices[i];
688
689 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
690 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
691 return (mpd->mpd_iop);
692 }
693
694 return (NULL);
695 }
696
697 static int
698 mfii_match(device_t parent, cfdata_t match, void *aux)
699 {
700 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
701 }
702
703 static void
704 mfii_attach(device_t parent, device_t self, void *aux)
705 {
706 struct mfii_softc *sc = device_private(self);
707 struct pci_attach_args *pa = aux;
708 pcireg_t memtype;
709 pci_intr_handle_t ih;
710 char intrbuf[PCI_INTRSTR_LEN];
711 const char *intrstr;
712 u_int32_t status, scpad2, scpad3;
713 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
714 struct scsipi_adapter *adapt = &sc->sc_adapt;
715 struct scsipi_channel *chan = &sc->sc_chan;
716 union mfi_mbox mbox;
717
718 /* init sc */
719 sc->sc_dev = self;
720 sc->sc_iop = mfii_find_iop(aux);
721 sc->sc_dmat = pa->pa_dmat;
722 if (pci_dma64_available(pa)) {
723 sc->sc_dmat64 = pa->pa_dmat64;
724 sc->sc_64bit_dma = 1;
725 } else {
726 sc->sc_dmat64 = pa->pa_dmat;
727 sc->sc_64bit_dma = 0;
728 }
729 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
730 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
731 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
732 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
733
734 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
735
736 sc->sc_aen_ccb = NULL;
737 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
738 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
739 PRI_BIO, IPL_BIO, WQ_MPSAFE);
740
741 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
742 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
743 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
744
745 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
746 SIMPLEQ_INIT(&sc->sc_abort_list);
747
748 /* wire up the bus shizz */
749 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
750 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
751 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
752 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
753 aprint_error(": unable to map registers\n");
754 return;
755 }
756
757 /* disable interrupts */
758 mfii_write(sc, MFI_OMSK, 0xffffffff);
759
760 if (pci_intr_map(pa, &ih) != 0) {
761 aprint_error(": unable to map interrupt\n");
762 goto pci_unmap;
763 }
764 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
765 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
766
767 /* lets get started */
768 if (mfii_transition_firmware(sc))
769 goto pci_unmap;
770 sc->sc_running = true;
771
772 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
773 scpad3 = mfii_read(sc, MFII_OSP3);
774 status = mfii_fw_state(sc);
775 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
776 if (sc->sc_max_fw_cmds == 0)
777 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
778 /*
779 * reduce max_cmds by 1 to ensure that the reply queue depth does not
780 * exceed FW supplied max_fw_cmds.
781 */
782 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
783
784 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
785 scpad2 = mfii_read(sc, MFII_OSP2);
786 chain_frame_sz =
787 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
788 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
789 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
790 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
791
792 nsge_in_io = (MFII_REQUEST_SIZE -
793 sizeof(struct mpii_msg_scsi_io) -
794 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
795 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
796
797 /* round down to nearest power of two */
798 sc->sc_max_sgl = 1;
799 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
800 sc->sc_max_sgl <<= 1;
801
802 /* Check for atomic(32bit) descriptor */
803 if (((sc->sc_iop->iop_flag & MFII_IOP_HAS_32BITDESC_BIT) != 0) &&
804 ((scpad2 & MFI_STATE_ATOMIC_DESCRIPTOR) != 0))
805 sc->sc_iop_flag |= MFII_IOP_DESC_32BIT;
806
807 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
808 DEVNAME(sc), status, scpad2, scpad3);
809 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
810 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
811 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
812 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
813 sc->sc_max_sgl);
814
815 /* sense memory */
816 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
817 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
818 if (sc->sc_sense == NULL) {
819 aprint_error(": unable to allocate sense memory\n");
820 goto pci_unmap;
821 }
822
823 /* reply post queue */
824 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
825
826 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
827 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
828 if (sc->sc_reply_postq == NULL)
829 goto free_sense;
830
831 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
832 MFII_DMA_LEN(sc->sc_reply_postq));
833
834 /* MPII request frame array */
835 sc->sc_requests = mfii_dmamem_alloc(sc,
836 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
837 if (sc->sc_requests == NULL)
838 goto free_reply_postq;
839
840 /* MFI command frame array */
841 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
842 if (sc->sc_mfi == NULL)
843 goto free_requests;
844
845 /* MPII SGL array */
846 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
847 sizeof(struct mfii_sge) * sc->sc_max_sgl);
848 if (sc->sc_sgl == NULL)
849 goto free_mfi;
850
851 if (mfii_init_ccb(sc) != 0) {
852 aprint_error(": could not init ccb list\n");
853 goto free_sgl;
854 }
855
856 /* kickstart firmware with all addresses and pointers */
857 if (mfii_initialise_firmware(sc) != 0) {
858 aprint_error(": could not initialize firmware\n");
859 goto free_sgl;
860 }
861
862 mutex_enter(&sc->sc_lock);
863 if (mfii_get_info(sc) != 0) {
864 mutex_exit(&sc->sc_lock);
865 aprint_error(": could not retrieve controller information\n");
866 goto free_sgl;
867 }
868 mutex_exit(&sc->sc_lock);
869
870 aprint_normal(": \"%s\", firmware %s",
871 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
872 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
873 aprint_normal(", %uMB cache",
874 le16toh(sc->sc_info.mci_memory_size));
875 }
876 aprint_normal("\n");
877 aprint_naive("\n");
878
879 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
880 mfii_intr, sc, DEVNAME(sc));
881 if (sc->sc_ih == NULL) {
882 aprint_error_dev(self, "can't establish interrupt");
883 if (intrstr)
884 aprint_error(" at %s", intrstr);
885 aprint_error("\n");
886 goto free_sgl;
887 }
888 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
889
890 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
891 sc->sc_ld[i].ld_present = 1;
892
893 sc->sc_max256vd =
894 (sc->sc_info.mci_adapter_ops3 & MFI_INFO_AOPS3_SUPP_MAX_EXT_LDS) ?
895 true : false;
896
897 if (sc->sc_max256vd)
898 aprint_verbose_dev(self, "Max 256 VD support\n");
899
900 memset(adapt, 0, sizeof(*adapt));
901 adapt->adapt_dev = sc->sc_dev;
902 adapt->adapt_nchannels = 1;
903 /* keep a few commands for management */
904 if (sc->sc_max_cmds > 4)
905 adapt->adapt_openings = sc->sc_max_cmds - 4;
906 else
907 adapt->adapt_openings = sc->sc_max_cmds;
908 adapt->adapt_max_periph = adapt->adapt_openings;
909 adapt->adapt_request = mfii_scsipi_request;
910 adapt->adapt_minphys = minphys;
911 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
912
913 memset(chan, 0, sizeof(*chan));
914 chan->chan_adapter = adapt;
915 chan->chan_bustype = &scsi_sas_bustype;
916 chan->chan_channel = 0;
917 chan->chan_flags = 0;
918 chan->chan_nluns = 8;
919 chan->chan_ntargets = sc->sc_info.mci_max_lds;
920 chan->chan_id = sc->sc_info.mci_max_lds;
921
922 mfii_rescan(sc->sc_dev, NULL, NULL);
923
924 if (mfii_aen_register(sc) != 0) {
925 /* error printed by mfii_aen_register */
926 goto intr_disestablish;
927 }
928
929 memset(&mbox, 0, sizeof(mbox));
930 if (sc->sc_max256vd)
931 mbox.b[0] = 1;
932 mutex_enter(&sc->sc_lock);
933 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
934 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
935 mutex_exit(&sc->sc_lock);
936 aprint_error_dev(self,
937 "getting list of logical disks failed\n");
938 goto intr_disestablish;
939 }
940 mutex_exit(&sc->sc_lock);
941 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
942 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
943 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
944 sc->sc_target_lds[target] = i;
945 }
946
947 /* enable interrupts */
948 mfii_write(sc, MFI_OSTS, 0xffffffff);
949 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
950
951 #if NBIO > 0
952 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
953 panic("%s: controller registration failed", DEVNAME(sc));
954 #endif /* NBIO > 0 */
955
956 if (mfii_create_sensors(sc) != 0)
957 aprint_error_dev(self, "unable to create sensors\n");
958
959 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
960 mfii_shutdown))
961 aprint_error_dev(self, "couldn't establish power handler\n");
962 return;
963 intr_disestablish:
964 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
965 free_sgl:
966 mfii_dmamem_free(sc, sc->sc_sgl);
967 free_mfi:
968 mfii_dmamem_free(sc, sc->sc_mfi);
969 free_requests:
970 mfii_dmamem_free(sc, sc->sc_requests);
971 free_reply_postq:
972 mfii_dmamem_free(sc, sc->sc_reply_postq);
973 free_sense:
974 mfii_dmamem_free(sc, sc->sc_sense);
975 pci_unmap:
976 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
977 }
978
979 #if 0
980 struct srp_gc mfii_dev_handles_gc =
981 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
982
983 static inline uint16_t
984 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
985 {
986 struct srp_ref sr;
987 uint16_t *map, handle;
988
989 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
990 handle = map[target];
991 srp_leave(&sr);
992
993 return (handle);
994 }
995
996 static int
997 mfii_dev_handles_update(struct mfii_softc *sc)
998 {
999 struct mfii_ld_map *lm;
1000 uint16_t *dev_handles = NULL;
1001 int i;
1002 int rv = 0;
1003
1004 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
1005
1006 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
1007 MFII_DATA_IN, false);
1008
1009 if (rv != 0) {
1010 rv = EIO;
1011 goto free_lm;
1012 }
1013
1014 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
1015 M_DEVBUF, M_WAITOK);
1016
1017 for (i = 0; i < MFI_MAX_PD; i++)
1018 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
1019
1020 /* commit the updated info */
1021 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
1022 srp_update_locked(&mfii_dev_handles_gc,
1023 &sc->sc_pd->pd_dev_handles, dev_handles);
1024
1025 free_lm:
1026 free(lm, M_TEMP, sizeof(*lm));
1027
1028 return (rv);
1029 }
1030
1031 static void
1032 mfii_dev_handles_dtor(void *null, void *v)
1033 {
1034 uint16_t *dev_handles = v;
1035
1036 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
1037 }
1038 #endif /* 0 */
1039
1040 static int
1041 mfii_detach(device_t self, int flags)
1042 {
1043 struct mfii_softc *sc = device_private(self);
1044 int error;
1045
1046 if (sc->sc_ih == NULL)
1047 return (0);
1048
1049 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
1050 return error;
1051
1052 mfii_destroy_sensors(sc);
1053 #if NBIO > 0
1054 bio_unregister(sc->sc_dev);
1055 #endif
1056 mfii_shutdown(sc->sc_dev, 0);
1057 mfii_write(sc, MFI_OMSK, 0xffffffff);
1058
1059 mfii_aen_unregister(sc);
1060 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1061 mfii_dmamem_free(sc, sc->sc_sgl);
1062 mfii_dmamem_free(sc, sc->sc_mfi);
1063 mfii_dmamem_free(sc, sc->sc_requests);
1064 mfii_dmamem_free(sc, sc->sc_reply_postq);
1065 mfii_dmamem_free(sc, sc->sc_sense);
1066 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1067
1068 return (0);
1069 }
1070
1071 static int
1072 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1073 {
1074 struct mfii_softc *sc = device_private(self);
1075
1076 if (sc->sc_child != NULL)
1077 return 0;
1078
1079 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
1080 CFARGS_NONE);
1081 return 0;
1082 }
1083
1084 static void
1085 mfii_childdetached(device_t self, device_t child)
1086 {
1087 struct mfii_softc *sc = device_private(self);
1088
1089 KASSERT(self == sc->sc_dev);
1090 KASSERT(child == sc->sc_child);
1091
1092 if (child == sc->sc_child)
1093 sc->sc_child = NULL;
1094 }
1095
1096 static bool
1097 mfii_suspend(device_t dev, const pmf_qual_t *q)
1098 {
1099 /* XXX to be implemented */
1100 return false;
1101 }
1102
1103 static bool
1104 mfii_resume(device_t dev, const pmf_qual_t *q)
1105 {
1106 /* XXX to be implemented */
1107 return false;
1108 }
1109
1110 static bool
1111 mfii_shutdown(device_t dev, int how)
1112 {
1113 struct mfii_softc *sc = device_private(dev);
1114 struct mfii_ccb *ccb;
1115 union mfi_mbox mbox;
1116 bool rv = true;
1117
1118 memset(&mbox, 0, sizeof(mbox));
1119
1120 mutex_enter(&sc->sc_lock);
1121 DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1122 ccb = mfii_get_ccb(sc);
1123 if (ccb == NULL)
1124 return false;
1125 mutex_enter(&sc->sc_ccb_mtx);
1126 if (sc->sc_running) {
1127 sc->sc_running = 0; /* prevent new commands */
1128 mutex_exit(&sc->sc_ccb_mtx);
1129 #if 0 /* XXX why does this hang ? */
1130 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1131 mfii_scrub_ccb(ccb);
1132 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1133 NULL, 0, MFII_DATA_NONE, true)) {
1134 aprint_error_dev(dev,
1135 "shutdown: cache flush failed\n");
1136 rv = false;
1137 goto fail;
1138 }
1139 printf("ok1\n");
1140 #endif
1141 mbox.b[0] = 0;
1142 mfii_scrub_ccb(ccb);
1143 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1144 NULL, 0, MFII_DATA_NONE, true)) {
1145 aprint_error_dev(dev, "shutdown: "
1146 "firmware shutdown failed\n");
1147 rv = false;
1148 goto fail;
1149 }
1150 } else {
1151 mutex_exit(&sc->sc_ccb_mtx);
1152 }
1153 fail:
1154 mfii_put_ccb(sc, ccb);
1155 mutex_exit(&sc->sc_lock);
1156 return rv;
1157 }
1158
1159 /* Register read function without retry */
1160 static inline u_int32_t
1161 mfii_read_wor(struct mfii_softc *sc, bus_size_t r)
1162 {
1163 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1164 BUS_SPACE_BARRIER_READ);
1165 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1166 }
1167
1168 static u_int32_t
1169 mfii_read(struct mfii_softc *sc, bus_size_t r)
1170 {
1171 uint32_t rv;
1172 int i = 0;
1173
1174 if ((sc->sc_iop->iop_flag & MFII_IOP_QUIRK_REGREAD) != 0) {
1175 do {
1176 rv = mfii_read_wor(sc, r);
1177 i++;
1178 } while ((rv == 0) && (i < 3));
1179 } else
1180 rv = mfii_read_wor(sc, r);
1181
1182 return rv;
1183 }
1184
1185 static void
1186 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1187 {
1188 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1189 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1190 BUS_SPACE_BARRIER_WRITE);
1191 }
1192
1193 static struct mfii_dmamem *
1194 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1195 {
1196 struct mfii_dmamem *m;
1197 int nsegs;
1198
1199 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1200 m->mdm_size = size;
1201
1202 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1203 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1204 goto mdmfree;
1205
1206 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1207 &nsegs, BUS_DMA_NOWAIT) != 0)
1208 goto destroy;
1209
1210 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1211 BUS_DMA_NOWAIT) != 0)
1212 goto free;
1213
1214 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1215 BUS_DMA_NOWAIT) != 0)
1216 goto unmap;
1217
1218 memset(m->mdm_kva, 0, size);
1219 return (m);
1220
1221 unmap:
1222 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1223 free:
1224 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1225 destroy:
1226 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1227 mdmfree:
1228 free(m, M_DEVBUF);
1229
1230 return (NULL);
1231 }
1232
1233 static void
1234 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1235 {
1236 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1237 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1238 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1239 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1240 free(m, M_DEVBUF);
1241 }
1242
1243 static void
1244 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1245 {
1246 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1247 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1248 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1249
1250 io->function = MFII_FUNCTION_PASSTHRU_IO;
1251 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1252 io->chain_offset = io->sgl_offset0 / 4;
1253
1254 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1255 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1256 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1257
1258 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1259 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1260
1261 mfii_start(sc, ccb);
1262 }
1263
1264 static int
1265 mfii_aen_register(struct mfii_softc *sc)
1266 {
1267 struct mfi_evt_log_info mel;
1268 struct mfii_ccb *ccb;
1269 struct mfii_dmamem *mdm;
1270 int rv;
1271
1272 ccb = mfii_get_ccb(sc);
1273 if (ccb == NULL) {
1274 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1275 return (ENOMEM);
1276 }
1277
1278 memset(&mel, 0, sizeof(mel));
1279 mfii_scrub_ccb(ccb);
1280
1281 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1282 &mel, sizeof(mel), MFII_DATA_IN, true);
1283 if (rv != 0) {
1284 mfii_put_ccb(sc, ccb);
1285 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1286 return (EIO);
1287 }
1288
1289 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1290 if (mdm == NULL) {
1291 mfii_put_ccb(sc, ccb);
1292 aprint_error_dev(sc->sc_dev,
1293 "unable to allocate event data\n");
1294 return (ENOMEM);
1295 }
1296
1297 /* replay all the events from boot */
1298 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1299
1300 return (0);
1301 }
1302
1303 static void
1304 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1305 struct mfii_dmamem *mdm, uint32_t seq)
1306 {
1307 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1308 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1309 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1310 union mfi_evt_class_locale mec;
1311
1312 mfii_scrub_ccb(ccb);
1313 mfii_dcmd_scrub(ccb);
1314 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1315
1316 ccb->ccb_cookie = mdm;
1317 ccb->ccb_done = mfii_aen_done;
1318 sc->sc_aen_ccb = ccb;
1319
1320 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1321 mec.mec_members.reserved = 0;
1322 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1323
1324 hdr->mfh_cmd = MFI_CMD_DCMD;
1325 hdr->mfh_sg_count = 1;
1326 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1327 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1328 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1329 dcmd->mdf_mbox.w[0] = htole32(seq);
1330 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1331 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1332 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1333
1334 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1335 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1336
1337 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1338 mfii_dcmd_start(sc, ccb);
1339 }
1340
1341 static void
1342 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1343 {
1344 KASSERT(sc->sc_aen_ccb == ccb);
1345
1346 /*
1347 * defer to a thread with KERNEL_LOCK so we can run autoconf
1348 * We shouldn't have more than one AEN command pending at a time,
1349 * so no need to lock
1350 */
1351 if (sc->sc_running)
1352 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1353 }
1354
1355 static void
1356 mfii_aen(struct work *wk, void *arg)
1357 {
1358 struct mfii_softc *sc = arg;
1359 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1360 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1361 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1362
1363 mfii_dcmd_sync(sc, ccb,
1364 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1365 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1366 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1367
1368 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1369 le32toh(med->med_seq_num), le32toh(med->med_code),
1370 med->med_arg_type, med->med_description);
1371
1372 switch (le32toh(med->med_code)) {
1373 case MR_EVT_PD_INSERTED_EXT:
1374 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1375 break;
1376
1377 mfii_aen_pd_insert(sc, &med->args.pd_address);
1378 break;
1379 case MR_EVT_PD_REMOVED_EXT:
1380 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1381 break;
1382
1383 mfii_aen_pd_remove(sc, &med->args.pd_address);
1384 break;
1385
1386 case MR_EVT_PD_STATE_CHANGE:
1387 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1388 break;
1389
1390 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1391 break;
1392
1393 case MR_EVT_LD_CREATED:
1394 case MR_EVT_LD_DELETED:
1395 mfii_aen_ld_update(sc);
1396 break;
1397
1398 default:
1399 break;
1400 }
1401
1402 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1403 }
1404
1405 static void
1406 mfii_aen_pd_insert(struct mfii_softc *sc,
1407 const struct mfi_evtarg_pd_address *pd)
1408 {
1409 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1410 le16toh(pd->device_id), le16toh(pd->encl_id));
1411 }
1412
1413 static void
1414 mfii_aen_pd_remove(struct mfii_softc *sc,
1415 const struct mfi_evtarg_pd_address *pd)
1416 {
1417 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1418 le16toh(pd->device_id), le16toh(pd->encl_id));
1419 }
1420
1421 static void
1422 mfii_aen_pd_state_change(struct mfii_softc *sc,
1423 const struct mfi_evtarg_pd_state *state)
1424 {
1425 return;
1426 }
1427
1428 static void
1429 mfii_aen_ld_update(struct mfii_softc *sc)
1430 {
1431 union mfi_mbox mbox;
1432 int i, target, old, nld;
1433 int newlds[MFII_MAX_LD_EXT];
1434
1435 memset(&mbox, 0, sizeof(mbox));
1436 if (sc->sc_max256vd)
1437 mbox.b[0] = 1;
1438 mutex_enter(&sc->sc_lock);
1439 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
1440 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1441 mutex_exit(&sc->sc_lock);
1442 DNPRINTF(MFII_D_MISC,
1443 "%s: getting list of logical disks failed\n", DEVNAME(sc));
1444 return;
1445 }
1446 mutex_exit(&sc->sc_lock);
1447
1448 memset(newlds, -1, sizeof(newlds));
1449
1450 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1451 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1452 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1453 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1454 newlds[target] = i;
1455 }
1456
1457 for (i = 0; i < MFII_MAX_LD_EXT; i++) {
1458 old = sc->sc_target_lds[i];
1459 nld = newlds[i];
1460
1461 if (old == -1 && nld != -1) {
1462 printf("%s: logical drive %d added (target %d)\n",
1463 DEVNAME(sc), i, nld);
1464
1465 // XXX scsi_probe_target(sc->sc_scsibus, i);
1466
1467 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1468 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1469 } else if (nld == -1 && old != -1) {
1470 printf("%s: logical drive %d removed (target %d)\n",
1471 DEVNAME(sc), i, old);
1472
1473 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1474 sysmon_envsys_sensor_detach(sc->sc_sme,
1475 &sc->sc_sensors[i]);
1476 }
1477 }
1478
1479 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1480 }
1481
1482 static void
1483 mfii_aen_unregister(struct mfii_softc *sc)
1484 {
1485 /* XXX */
1486 }
1487
1488 static int
1489 mfii_transition_firmware(struct mfii_softc *sc)
1490 {
1491 int32_t fw_state, cur_state;
1492 int max_wait, i;
1493
1494 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1495
1496 while (fw_state != MFI_STATE_READY) {
1497 cur_state = fw_state;
1498 switch (fw_state) {
1499 case MFI_STATE_FAULT:
1500 printf("%s: firmware fault\n", DEVNAME(sc));
1501 return (1);
1502 case MFI_STATE_WAIT_HANDSHAKE:
1503 mfii_write(sc, MFI_SKINNY_IDB,
1504 MFI_INIT_CLEAR_HANDSHAKE);
1505 max_wait = 2;
1506 break;
1507 case MFI_STATE_OPERATIONAL:
1508 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1509 max_wait = 10;
1510 break;
1511 case MFI_STATE_UNDEFINED:
1512 case MFI_STATE_BB_INIT:
1513 max_wait = 2;
1514 break;
1515 case MFI_STATE_FW_INIT:
1516 case MFI_STATE_DEVICE_SCAN:
1517 case MFI_STATE_FLUSH_CACHE:
1518 max_wait = 20;
1519 break;
1520 default:
1521 printf("%s: unknown firmware state %d\n",
1522 DEVNAME(sc), fw_state);
1523 return (1);
1524 }
1525 for (i = 0; i < (max_wait * 10); i++) {
1526 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1527 if (fw_state == cur_state)
1528 DELAY(100000);
1529 else
1530 break;
1531 }
1532 if (fw_state == cur_state) {
1533 printf("%s: firmware stuck in state %#x\n",
1534 DEVNAME(sc), fw_state);
1535 return (1);
1536 }
1537 }
1538
1539 return (0);
1540 }
1541
1542 static int
1543 mfii_get_info(struct mfii_softc *sc)
1544 {
1545 int i, rv;
1546
1547 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1548 sizeof(sc->sc_info), MFII_DATA_IN, true);
1549
1550 if (rv != 0)
1551 return (rv);
1552
1553 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1554 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1555 DEVNAME(sc),
1556 sc->sc_info.mci_image_component[i].mic_name,
1557 sc->sc_info.mci_image_component[i].mic_version,
1558 sc->sc_info.mci_image_component[i].mic_build_date,
1559 sc->sc_info.mci_image_component[i].mic_build_time);
1560 }
1561
1562 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1563 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1564 DEVNAME(sc),
1565 sc->sc_info.mci_pending_image_component[i].mic_name,
1566 sc->sc_info.mci_pending_image_component[i].mic_version,
1567 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1568 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1569 }
1570
1571 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1572 DEVNAME(sc),
1573 sc->sc_info.mci_max_arms,
1574 sc->sc_info.mci_max_spans,
1575 sc->sc_info.mci_max_arrays,
1576 sc->sc_info.mci_max_lds,
1577 sc->sc_info.mci_product_name);
1578
1579 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1580 DEVNAME(sc),
1581 sc->sc_info.mci_serial_number,
1582 sc->sc_info.mci_hw_present,
1583 sc->sc_info.mci_current_fw_time,
1584 sc->sc_info.mci_max_cmds,
1585 sc->sc_info.mci_max_sg_elements);
1586
1587 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1588 DEVNAME(sc),
1589 sc->sc_info.mci_max_request_size,
1590 sc->sc_info.mci_lds_present,
1591 sc->sc_info.mci_lds_degraded,
1592 sc->sc_info.mci_lds_offline,
1593 sc->sc_info.mci_pd_present);
1594
1595 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1596 DEVNAME(sc),
1597 sc->sc_info.mci_pd_disks_present,
1598 sc->sc_info.mci_pd_disks_pred_failure,
1599 sc->sc_info.mci_pd_disks_failed);
1600
1601 DPRINTF("%s: nvram %d mem %d flash %d\n",
1602 DEVNAME(sc),
1603 sc->sc_info.mci_nvram_size,
1604 sc->sc_info.mci_memory_size,
1605 sc->sc_info.mci_flash_size);
1606
1607 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1608 DEVNAME(sc),
1609 sc->sc_info.mci_ram_correctable_errors,
1610 sc->sc_info.mci_ram_uncorrectable_errors,
1611 sc->sc_info.mci_cluster_allowed,
1612 sc->sc_info.mci_cluster_active);
1613
1614 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1615 DEVNAME(sc),
1616 sc->sc_info.mci_max_strips_per_io,
1617 sc->sc_info.mci_raid_levels,
1618 sc->sc_info.mci_adapter_ops,
1619 sc->sc_info.mci_ld_ops);
1620
1621 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1622 DEVNAME(sc),
1623 sc->sc_info.mci_stripe_sz_ops.min,
1624 sc->sc_info.mci_stripe_sz_ops.max,
1625 sc->sc_info.mci_pd_ops,
1626 sc->sc_info.mci_pd_mix_support);
1627
1628 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1629 DEVNAME(sc),
1630 sc->sc_info.mci_ecc_bucket_count,
1631 sc->sc_info.mci_package_version);
1632
1633 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1634 DEVNAME(sc),
1635 sc->sc_info.mci_properties.mcp_seq_num,
1636 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1637 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1638 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1639
1640 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1641 DEVNAME(sc),
1642 sc->sc_info.mci_properties.mcp_rebuild_rate,
1643 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1644 sc->sc_info.mci_properties.mcp_bgi_rate,
1645 sc->sc_info.mci_properties.mcp_cc_rate);
1646
1647 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1648 DEVNAME(sc),
1649 sc->sc_info.mci_properties.mcp_recon_rate,
1650 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1651 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1652 sc->sc_info.mci_properties.mcp_spinup_delay,
1653 sc->sc_info.mci_properties.mcp_cluster_enable);
1654
1655 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1656 DEVNAME(sc),
1657 sc->sc_info.mci_properties.mcp_coercion_mode,
1658 sc->sc_info.mci_properties.mcp_alarm_enable,
1659 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1660 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1661 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1662
1663 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1664 DEVNAME(sc),
1665 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1666 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1667 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1668
1669 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1670 DEVNAME(sc),
1671 sc->sc_info.mci_pci.mip_vendor,
1672 sc->sc_info.mci_pci.mip_device,
1673 sc->sc_info.mci_pci.mip_subvendor,
1674 sc->sc_info.mci_pci.mip_subdevice);
1675
1676 DPRINTF("%s: type %#x port_count %d port_addr ",
1677 DEVNAME(sc),
1678 sc->sc_info.mci_host.mih_type,
1679 sc->sc_info.mci_host.mih_port_count);
1680
1681 for (i = 0; i < 8; i++)
1682 DPRINTF("%.0" PRIx64 " ",
1683 sc->sc_info.mci_host.mih_port_addr[i]);
1684 DPRINTF("\n");
1685
1686 DPRINTF("%s: type %.x port_count %d port_addr ",
1687 DEVNAME(sc),
1688 sc->sc_info.mci_device.mid_type,
1689 sc->sc_info.mci_device.mid_port_count);
1690
1691 for (i = 0; i < 8; i++)
1692 DPRINTF("%.0" PRIx64 " ",
1693 sc->sc_info.mci_device.mid_port_addr[i]);
1694 DPRINTF("\n");
1695
1696 return (0);
1697 }
1698
1699 static int
1700 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1701 {
1702 struct mfi_frame_header *hdr = ccb->ccb_request;
1703 u_int64_t r;
1704 int to = 0, rv = 0;
1705
1706 #ifdef DIAGNOSTIC
1707 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1708 panic("mfii_mfa_poll called with cookie or done set");
1709 #endif
1710
1711 hdr->mfh_context = ccb->ccb_smid;
1712 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1713 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1714
1715 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1716 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1717
1718 /*
1719 * Even if the Aero card supports 32bit descriptor, 64bit descriptor
1720 * access is required for MFI_CMD_INIT.
1721 * Currently, mfii_mfa_poll() is called for MFI_CMD_INIT only.
1722 */
1723 mfii_start64(sc, ccb);
1724
1725 for (;;) {
1726 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1727 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1728 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1729
1730 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1731 break;
1732
1733 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1734 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1735 ccb->ccb_smid);
1736 ccb->ccb_flags |= MFI_CCB_F_ERR;
1737 rv = 1;
1738 break;
1739 }
1740
1741 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1742 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1744
1745 delay(1000);
1746 }
1747
1748 if (ccb->ccb_len > 0) {
1749 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1750 0, ccb->ccb_dmamap32->dm_mapsize,
1751 (ccb->ccb_direction == MFII_DATA_IN) ?
1752 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1753
1754 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1755 }
1756
1757 return (rv);
1758 }
1759
1760 static int
1761 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1762 {
1763 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1764 void *cookie;
1765 int rv = 1;
1766
1767 done = ccb->ccb_done;
1768 cookie = ccb->ccb_cookie;
1769
1770 ccb->ccb_done = mfii_poll_done;
1771 ccb->ccb_cookie = &rv;
1772
1773 mfii_start(sc, ccb);
1774
1775 do {
1776 delay(10);
1777 mfii_postq(sc);
1778 } while (rv == 1);
1779
1780 ccb->ccb_cookie = cookie;
1781 done(sc, ccb);
1782
1783 return (0);
1784 }
1785
1786 static void
1787 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1788 {
1789 int *rv = ccb->ccb_cookie;
1790
1791 *rv = 0;
1792 }
1793
1794 static int
1795 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1796 {
1797 #ifdef DIAGNOSTIC
1798 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1799 panic("mfii_exec called with cookie or done set");
1800 #endif
1801
1802 ccb->ccb_cookie = ccb;
1803 ccb->ccb_done = mfii_exec_done;
1804
1805 mfii_start(sc, ccb);
1806
1807 mutex_enter(&ccb->ccb_mtx);
1808 while (ccb->ccb_cookie != NULL)
1809 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1810 mutex_exit(&ccb->ccb_mtx);
1811
1812 return (0);
1813 }
1814
1815 static void
1816 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1817 {
1818 mutex_enter(&ccb->ccb_mtx);
1819 ccb->ccb_cookie = NULL;
1820 cv_signal(&ccb->ccb_cv);
1821 mutex_exit(&ccb->ccb_mtx);
1822 }
1823
1824 static int
1825 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1826 void *buf, size_t len, mfii_direction_t dir, bool poll)
1827 {
1828 struct mfii_ccb *ccb;
1829 int rv;
1830
1831 KASSERT(mutex_owned(&sc->sc_lock));
1832 if (!sc->sc_running)
1833 return EAGAIN;
1834
1835 ccb = mfii_get_ccb(sc);
1836 if (ccb == NULL)
1837 return (ENOMEM);
1838
1839 mfii_scrub_ccb(ccb);
1840 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1841 mfii_put_ccb(sc, ccb);
1842
1843 return (rv);
1844 }
1845
1846 static int
1847 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1848 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1849 bool poll)
1850 {
1851 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1852 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1853 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1854 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1855 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1856 int rv = EIO;
1857
1858 if (cold)
1859 poll = true;
1860
1861 ccb->ccb_data = buf;
1862 ccb->ccb_len = len;
1863 ccb->ccb_direction = dir;
1864 switch (dir) {
1865 case MFII_DATA_IN:
1866 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1867 break;
1868 case MFII_DATA_OUT:
1869 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1870 break;
1871 case MFII_DATA_NONE:
1872 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1873 break;
1874 }
1875
1876 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1877 rv = ENOMEM;
1878 goto done;
1879 }
1880
1881 hdr->mfh_cmd = MFI_CMD_DCMD;
1882 hdr->mfh_context = ccb->ccb_smid;
1883 hdr->mfh_data_len = htole32(len);
1884 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1885 KASSERT(!ccb->ccb_dma64);
1886
1887 dcmd->mdf_opcode = opc;
1888 /* handle special opcodes */
1889 if (mbox != NULL)
1890 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1891
1892 io->function = MFII_FUNCTION_PASSTHRU_IO;
1893 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1894 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1895
1896 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1897 sge->sg_len = htole32(MFI_FRAME_SIZE);
1898 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1899
1900 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1901 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1902
1903 if (poll) {
1904 ccb->ccb_done = mfii_empty_done;
1905 mfii_poll(sc, ccb);
1906 } else
1907 mfii_exec(sc, ccb);
1908
1909 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1910 rv = 0;
1911 }
1912
1913 done:
1914 return (rv);
1915 }
1916
1917 static void
1918 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1919 {
1920 return;
1921 }
1922
1923 static int
1924 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1925 void *sglp, int nosleep)
1926 {
1927 union mfi_sgl *sgl = sglp;
1928 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1929 int error;
1930 int i;
1931
1932 KASSERT(!ccb->ccb_dma64);
1933 if (ccb->ccb_len == 0)
1934 return (0);
1935
1936 error = bus_dmamap_load(sc->sc_dmat, dmap,
1937 ccb->ccb_data, ccb->ccb_len, NULL,
1938 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1939 if (error) {
1940 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1941 return (1);
1942 }
1943
1944 for (i = 0; i < dmap->dm_nsegs; i++) {
1945 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1946 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1947 }
1948
1949 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1950 ccb->ccb_direction == MFII_DATA_OUT ?
1951 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1952
1953 return (0);
1954 }
1955
1956 static void
1957 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1958 {
1959
1960 mfii_start_common(sc, ccb,
1961 ((sc->sc_iop_flag & MFII_IOP_DESC_32BIT) != 0) ? true : false);
1962 }
1963
1964 static void
1965 mfii_start64(struct mfii_softc *sc, struct mfii_ccb *ccb)
1966 {
1967
1968 mfii_start_common(sc, ccb, false);
1969 }
1970
1971 static void
1972 mfii_start_common(struct mfii_softc *sc, struct mfii_ccb *ccb, bool do32)
1973 {
1974 uint32_t *r = (uint32_t *)&ccb->ccb_req;
1975
1976 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1977 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1978 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1979
1980 if (do32)
1981 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_ISQP, r[0]);
1982 else {
1983 #if defined(__LP64__)
1984 uint64_t buf;
1985
1986 buf = ((uint64_t)r[1] << 32) | r[0];
1987 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
1988 #else
1989 mutex_enter(&sc->sc_post_mtx);
1990 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1991 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1992 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1993 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1994 mutex_exit(&sc->sc_post_mtx);
1995 #endif
1996 }
1997 }
1998
1999 static void
2000 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2001 {
2002 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2003 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2004 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2005
2006 if (ccb->ccb_sgl_len > 0) {
2007 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2008 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2009 BUS_DMASYNC_POSTWRITE);
2010 }
2011
2012 if (ccb->ccb_dma64) {
2013 KASSERT(ccb->ccb_len > 0);
2014 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
2015 0, ccb->ccb_dmamap64->dm_mapsize,
2016 (ccb->ccb_direction == MFII_DATA_IN) ?
2017 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2018
2019 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
2020 } else if (ccb->ccb_len > 0) {
2021 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
2022 0, ccb->ccb_dmamap32->dm_mapsize,
2023 (ccb->ccb_direction == MFII_DATA_IN) ?
2024 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2025
2026 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
2027 }
2028
2029 ccb->ccb_done(sc, ccb);
2030 }
2031
2032 static int
2033 mfii_initialise_firmware(struct mfii_softc *sc)
2034 {
2035 struct mpii_msg_iocinit_request *iiq;
2036 struct mfii_dmamem *m;
2037 struct mfii_ccb *ccb;
2038 struct mfi_init_frame *init;
2039 int rv;
2040
2041 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2042 if (m == NULL)
2043 return (1);
2044
2045 iiq = MFII_DMA_KVA(m);
2046 memset(iiq, 0, sizeof(*iiq));
2047
2048 iiq->function = MPII_FUNCTION_IOC_INIT;
2049 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2050
2051 iiq->msg_version_maj = 0x02;
2052 iiq->msg_version_min = 0x00;
2053 iiq->hdr_version_unit = 0x10;
2054 iiq->hdr_version_dev = 0x0;
2055
2056 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2057
2058 iiq->reply_descriptor_post_queue_depth =
2059 htole16(sc->sc_reply_postq_depth);
2060 iiq->reply_free_queue_depth = htole16(0);
2061
2062 iiq->sense_buffer_address_high = htole32(
2063 MFII_DMA_DVA(sc->sc_sense) >> 32);
2064
2065 iiq->reply_descriptor_post_queue_address_lo =
2066 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
2067 iiq->reply_descriptor_post_queue_address_hi =
2068 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2069
2070 iiq->system_request_frame_base_address_lo =
2071 htole32(MFII_DMA_DVA(sc->sc_requests));
2072 iiq->system_request_frame_base_address_hi =
2073 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
2074
2075 iiq->timestamp = htole64(time_uptime);
2076
2077 ccb = mfii_get_ccb(sc);
2078 if (ccb == NULL) {
2079 /* shouldn't ever run out of ccbs during attach */
2080 return (1);
2081 }
2082 mfii_scrub_ccb(ccb);
2083 init = ccb->ccb_request;
2084
2085 init->mif_header.mfh_cmd = MFI_CMD_INIT;
2086 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2087 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
2088 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
2089
2090 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2091 0, MFII_DMA_LEN(sc->sc_reply_postq),
2092 BUS_DMASYNC_PREREAD);
2093
2094 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2095 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2096
2097 rv = mfii_mfa_poll(sc, ccb);
2098
2099 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2100 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2101
2102 mfii_put_ccb(sc, ccb);
2103 mfii_dmamem_free(sc, m);
2104
2105 return (rv);
2106 }
2107
2108 static int
2109 mfii_my_intr(struct mfii_softc *sc)
2110 {
2111 u_int32_t status;
2112
2113 status = mfii_read(sc, MFI_OSTS);
2114
2115 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2116 if (ISSET(status, 0x1)) {
2117 mfii_write(sc, MFI_OSTS, status);
2118 return (1);
2119 }
2120
2121 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2122 }
2123
2124 static int
2125 mfii_intr(void *arg)
2126 {
2127 struct mfii_softc *sc = arg;
2128
2129 if (!mfii_my_intr(sc))
2130 return (0);
2131
2132 mfii_postq(sc);
2133
2134 return (1);
2135 }
2136
2137 static void
2138 mfii_postq(struct mfii_softc *sc)
2139 {
2140 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2141 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2142 struct mpii_reply_descr *rdp;
2143 struct mfii_ccb *ccb;
2144 int rpi = 0;
2145
2146 mutex_enter(&sc->sc_reply_postq_mtx);
2147
2148 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2149 0, MFII_DMA_LEN(sc->sc_reply_postq),
2150 BUS_DMASYNC_POSTREAD);
2151
2152 for (;;) {
2153 rdp = &postq[sc->sc_reply_postq_index];
2154 DNPRINTF(MFII_D_INTR,
2155 "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2156 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2157 rdp->data == 0xffffffff);
2158 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2159 MPII_REPLY_DESCR_UNUSED)
2160 break;
2161 if (rdp->data == 0xffffffff) {
2162 /*
2163 * ioc is still writing to the reply post queue
2164 * race condition - bail!
2165 */
2166 break;
2167 }
2168
2169 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2170 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2171 memset(rdp, 0xff, sizeof(*rdp));
2172
2173 sc->sc_reply_postq_index++;
2174 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2175 rpi = 1;
2176 }
2177
2178 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2179 0, MFII_DMA_LEN(sc->sc_reply_postq),
2180 BUS_DMASYNC_PREREAD);
2181
2182 if (rpi)
2183 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2184
2185 mutex_exit(&sc->sc_reply_postq_mtx);
2186
2187 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2188 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2189 mfii_done(sc, ccb);
2190 }
2191 }
2192
2193 static void
2194 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2195 void *arg)
2196 {
2197 struct scsipi_periph *periph;
2198 struct scsipi_xfer *xs;
2199 struct scsipi_adapter *adapt = chan->chan_adapter;
2200 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2201 struct mfii_ccb *ccb;
2202 int timeout;
2203 int target;
2204
2205 switch (req) {
2206 case ADAPTER_REQ_GROW_RESOURCES:
2207 /* Not supported. */
2208 return;
2209 case ADAPTER_REQ_SET_XFER_MODE:
2210 {
2211 struct scsipi_xfer_mode *xm = arg;
2212 xm->xm_mode = PERIPH_CAP_TQING;
2213 xm->xm_period = 0;
2214 xm->xm_offset = 0;
2215 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2216 return;
2217 }
2218 case ADAPTER_REQ_RUN_XFER:
2219 break;
2220 }
2221
2222 xs = arg;
2223 periph = xs->xs_periph;
2224 target = periph->periph_target;
2225
2226 if (target >= MFII_MAX_LD_EXT || !sc->sc_ld[target].ld_present ||
2227 periph->periph_lun != 0) {
2228 xs->error = XS_SELTIMEOUT;
2229 scsipi_done(xs);
2230 return;
2231 }
2232
2233 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2234 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2235 /* the cache is stable storage, don't flush */
2236 xs->error = XS_NOERROR;
2237 xs->status = SCSI_OK;
2238 xs->resid = 0;
2239 scsipi_done(xs);
2240 return;
2241 }
2242
2243 ccb = mfii_get_ccb(sc);
2244 if (ccb == NULL) {
2245 xs->error = XS_RESOURCE_SHORTAGE;
2246 scsipi_done(xs);
2247 return;
2248 }
2249 mfii_scrub_ccb(ccb);
2250 ccb->ccb_cookie = xs;
2251 ccb->ccb_done = mfii_scsi_cmd_done;
2252 ccb->ccb_data = xs->data;
2253 ccb->ccb_len = xs->datalen;
2254
2255 timeout = mstohz(xs->timeout);
2256 if (timeout == 0)
2257 timeout = 1;
2258 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2259
2260 switch (xs->cmd->opcode) {
2261 case SCSI_READ_6_COMMAND:
2262 case READ_10:
2263 case READ_12:
2264 case READ_16:
2265 case SCSI_WRITE_6_COMMAND:
2266 case WRITE_10:
2267 case WRITE_12:
2268 case WRITE_16:
2269 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2270 goto stuffup;
2271 break;
2272
2273 default:
2274 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2275 goto stuffup;
2276 break;
2277 }
2278
2279 xs->error = XS_NOERROR;
2280 xs->resid = 0;
2281
2282 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2283 xs->cmd->opcode);
2284
2285 if (xs->xs_control & XS_CTL_POLL) {
2286 if (mfii_poll(sc, ccb) != 0)
2287 goto stuffup;
2288 return;
2289 }
2290
2291 mfii_start(sc, ccb);
2292
2293 return;
2294
2295 stuffup:
2296 xs->error = XS_DRIVER_STUFFUP;
2297 scsipi_done(xs);
2298 mfii_put_ccb(sc, ccb);
2299 }
2300
2301 static void
2302 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2303 {
2304 struct scsipi_xfer *xs = ccb->ccb_cookie;
2305 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2306 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2307
2308 if (callout_stop(&xs->xs_callout) != 0)
2309 return;
2310
2311 switch (ctx->status) {
2312 case MFI_STAT_OK:
2313 break;
2314
2315 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2316 xs->error = XS_SENSE;
2317 memset(&xs->sense, 0, sizeof(xs->sense));
2318 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2319 break;
2320
2321 case MFI_STAT_LD_OFFLINE:
2322 case MFI_STAT_DEVICE_NOT_FOUND:
2323 xs->error = XS_SELTIMEOUT;
2324 break;
2325
2326 default:
2327 xs->error = XS_DRIVER_STUFFUP;
2328 break;
2329 }
2330
2331 scsipi_done(xs);
2332 mfii_put_ccb(sc, ccb);
2333 }
2334
2335 static int
2336 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2337 struct scsipi_xfer *xs)
2338 {
2339 struct scsipi_periph *periph = xs->xs_periph;
2340 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2341 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2342 int segs;
2343
2344 io->dev_handle = htole16(periph->periph_target);
2345 io->function = MFII_FUNCTION_LDIO_REQUEST;
2346 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2347 io->sgl_flags = htole16(0x02); /* XXX */
2348 io->sense_buffer_length = sizeof(xs->sense);
2349 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2350 io->data_length = htole32(xs->datalen);
2351 io->io_flags = htole16(xs->cmdlen);
2352 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2353 case XS_CTL_DATA_IN:
2354 ccb->ccb_direction = MFII_DATA_IN;
2355 io->direction = MPII_SCSIIO_DIR_READ;
2356 break;
2357 case XS_CTL_DATA_OUT:
2358 ccb->ccb_direction = MFII_DATA_OUT;
2359 io->direction = MPII_SCSIIO_DIR_WRITE;
2360 break;
2361 default:
2362 ccb->ccb_direction = MFII_DATA_NONE;
2363 io->direction = MPII_SCSIIO_DIR_NONE;
2364 break;
2365 }
2366 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2367
2368 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2369 ctx->timeout_value = htole16(0x14); /* XXX */
2370 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2371 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2372
2373 if (mfii_load_ccb(sc, ccb, ctx + 1,
2374 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2375 return (1);
2376
2377 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2378 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2379 switch (sc->sc_iop->num_sge_loc) {
2380 case MFII_IOP_NUM_SGE_LOC_ORIG:
2381 ctx->num_sge = segs;
2382 break;
2383 case MFII_IOP_NUM_SGE_LOC_35:
2384 /* 12 bit field, but we're only using the lower 8 */
2385 ctx->span_arm = segs;
2386 break;
2387 }
2388
2389 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2390 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2391
2392 return (0);
2393 }
2394
2395 static int
2396 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2397 struct scsipi_xfer *xs)
2398 {
2399 struct scsipi_periph *periph = xs->xs_periph;
2400 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2401 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2402
2403 io->dev_handle = htole16(periph->periph_target);
2404 io->function = MFII_FUNCTION_LDIO_REQUEST;
2405 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2406 io->sgl_flags = htole16(0x02); /* XXX */
2407 io->sense_buffer_length = sizeof(xs->sense);
2408 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2409 io->data_length = htole32(xs->datalen);
2410 io->io_flags = htole16(xs->cmdlen);
2411 io->lun[0] = htobe16(periph->periph_lun);
2412 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2413 case XS_CTL_DATA_IN:
2414 ccb->ccb_direction = MFII_DATA_IN;
2415 io->direction = MPII_SCSIIO_DIR_READ;
2416 break;
2417 case XS_CTL_DATA_OUT:
2418 ccb->ccb_direction = MFII_DATA_OUT;
2419 io->direction = MPII_SCSIIO_DIR_WRITE;
2420 break;
2421 default:
2422 ccb->ccb_direction = MFII_DATA_NONE;
2423 io->direction = MPII_SCSIIO_DIR_NONE;
2424 break;
2425 }
2426 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2427
2428 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2429
2430 if (mfii_load_ccb(sc, ccb, ctx + 1,
2431 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2432 return (1);
2433
2434 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2435 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2436
2437 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2438 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2439
2440 return (0);
2441 }
2442
2443 #if 0
2444 void
2445 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2446 {
2447 struct scsi_link *link = xs->sc_link;
2448 struct mfii_softc *sc = link->adapter_softc;
2449 struct mfii_ccb *ccb = xs->io;
2450
2451 mfii_scrub_ccb(ccb);
2452 ccb->ccb_cookie = xs;
2453 ccb->ccb_done = mfii_scsi_cmd_done;
2454 ccb->ccb_data = xs->data;
2455 ccb->ccb_len = xs->datalen;
2456
2457 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2458
2459 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2460 if (xs->error != XS_NOERROR)
2461 goto done;
2462
2463 xs->resid = 0;
2464
2465 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2466 if (mfii_poll(sc, ccb) != 0)
2467 goto stuffup;
2468 return;
2469 }
2470
2471 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2472 mfii_start(sc, ccb);
2473
2474 return;
2475
2476 stuffup:
2477 xs->error = XS_DRIVER_STUFFUP;
2478 done:
2479 scsi_done(xs);
2480 }
2481
2482 int
2483 mfii_pd_scsi_probe(struct scsi_link *link)
2484 {
2485 struct mfii_softc *sc = link->adapter_softc;
2486 struct mfi_pd_details mpd;
2487 union mfi_mbox mbox;
2488 int rv;
2489
2490 if (link->lun > 0)
2491 return (0);
2492
2493 memset(&mbox, 0, sizeof(mbox));
2494 mbox.s[0] = htole16(link->target);
2495
2496 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2497 MFII_DATA_IN, true);
2498 if (rv != 0)
2499 return (EIO);
2500
2501 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2502 return (ENXIO);
2503
2504 return (0);
2505 }
2506
2507 int
2508 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2509 struct scsipi_xfer *xs)
2510 {
2511 struct scsi_link *link = xs->sc_link;
2512 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2513 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2514 uint16_t dev_handle;
2515
2516 dev_handle = mfii_dev_handle(sc, link->target);
2517 if (dev_handle == htole16(0xffff))
2518 return (XS_SELTIMEOUT);
2519
2520 io->dev_handle = dev_handle;
2521 io->function = 0;
2522 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2523 io->sgl_flags = htole16(0x02); /* XXX */
2524 io->sense_buffer_length = sizeof(xs->sense);
2525 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2526 io->data_length = htole32(xs->datalen);
2527 io->io_flags = htole16(xs->cmdlen);
2528 io->lun[0] = htobe16(link->lun);
2529 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2530 case XS_CTL_DATA_IN:
2531 ccb->ccb_direction = MFII_DATA_IN;
2532 io->direction = MPII_SCSIIO_DIR_READ;
2533 break;
2534 case XS_CTL_DATA_OUT:
2535 ccb->ccb_direction = MFII_DATA_OUT;
2536 io->direction = MPII_SCSIIO_DIR_WRITE;
2537 break;
2538 default:
2539 ccb->ccb_direction = MFII_DATA_NONE;
2540 io->direction = MPII_SCSIIO_DIR_NONE;
2541 break;
2542 }
2543 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2544
2545 ctx->virtual_disk_target_id = htole16(link->target);
2546 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2547 ctx->timeout_value = sc->sc_pd->pd_timeout;
2548
2549 if (mfii_load_ccb(sc, ccb, ctx + 1,
2550 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2551 return (XS_DRIVER_STUFFUP);
2552
2553 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2554 KASSERT(ccb->ccb_dma64);
2555
2556 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2557 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2558 ccb->ccb_req.dev_handle = dev_handle;
2559
2560 return (XS_NOERROR);
2561 }
2562 #endif
2563
2564 static int
2565 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2566 int nosleep)
2567 {
2568 struct mpii_msg_request *req = ccb->ccb_request;
2569 struct mfii_sge *sge = NULL, *nsge = sglp;
2570 struct mfii_sge *ce = NULL;
2571 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2572 u_int space;
2573 int i;
2574
2575 int error;
2576
2577 if (ccb->ccb_len == 0)
2578 return (0);
2579
2580 ccb->ccb_dma64 = true;
2581 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2582 ccb->ccb_data, ccb->ccb_len, NULL,
2583 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2584 if (error) {
2585 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2586 return (1);
2587 }
2588
2589 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2590 sizeof(*nsge);
2591 if (dmap->dm_nsegs > space) {
2592 space--;
2593
2594 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2595 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2596
2597 ce = nsge + space;
2598 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2599 ce->sg_len = htole32(ccb->ccb_sgl_len);
2600 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2601
2602 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2603 }
2604
2605 for (i = 0; i < dmap->dm_nsegs; i++) {
2606 if (nsge == ce)
2607 nsge = ccb->ccb_sgl;
2608
2609 sge = nsge;
2610
2611 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2612 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2613 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2614
2615 nsge = sge + 1;
2616 }
2617 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2618
2619 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2620 ccb->ccb_direction == MFII_DATA_OUT ?
2621 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2622
2623 if (ccb->ccb_sgl_len > 0) {
2624 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2625 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2626 BUS_DMASYNC_PREWRITE);
2627 }
2628
2629 return (0);
2630 }
2631
2632 static void
2633 mfii_scsi_cmd_tmo(void *p)
2634 {
2635 struct mfii_ccb *ccb = p;
2636 struct mfii_softc *sc = ccb->ccb_sc;
2637 bool start_abort;
2638
2639 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2640
2641 mutex_enter(&sc->sc_abort_mtx);
2642 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2643 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2644 if (start_abort)
2645 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2646 mutex_exit(&sc->sc_abort_mtx);
2647 }
2648
2649 static void
2650 mfii_abort_task(struct work *wk, void *scp)
2651 {
2652 struct mfii_softc *sc = scp;
2653 struct mfii_ccb *list;
2654
2655 mutex_enter(&sc->sc_abort_mtx);
2656 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2657 SIMPLEQ_INIT(&sc->sc_abort_list);
2658 mutex_exit(&sc->sc_abort_mtx);
2659
2660 while (list != NULL) {
2661 struct mfii_ccb *ccb = list;
2662 struct scsipi_xfer *xs = ccb->ccb_cookie;
2663 struct scsipi_periph *periph = xs->xs_periph;
2664 struct mfii_ccb *accb;
2665
2666 list = SIMPLEQ_NEXT(ccb, ccb_link);
2667
2668 if (!sc->sc_ld[periph->periph_target].ld_present) {
2669 /* device is gone */
2670 xs->error = XS_SELTIMEOUT;
2671 scsipi_done(xs);
2672 mfii_put_ccb(sc, ccb);
2673 continue;
2674 }
2675
2676 accb = mfii_get_ccb(sc);
2677 mfii_scrub_ccb(accb);
2678 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2679 MPII_SCSI_TASK_ABORT_TASK,
2680 htole32(MFII_TASK_MGMT_FLAGS_PD));
2681
2682 accb->ccb_cookie = ccb;
2683 accb->ccb_done = mfii_scsi_cmd_abort_done;
2684
2685 mfii_start(sc, accb);
2686 }
2687 }
2688
2689 static void
2690 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2691 uint16_t smid, uint8_t type, uint32_t flags)
2692 {
2693 struct mfii_task_mgmt *msg;
2694 struct mpii_msg_scsi_task_request *req;
2695
2696 msg = accb->ccb_request;
2697 req = &msg->mpii_request;
2698 req->dev_handle = dev_handle;
2699 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2700 req->task_type = type;
2701 req->task_mid = htole16( smid);
2702 msg->flags = flags;
2703
2704 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2705 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2706 }
2707
2708 static void
2709 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2710 {
2711 struct mfii_ccb *ccb = accb->ccb_cookie;
2712 struct scsipi_xfer *xs = ccb->ccb_cookie;
2713
2714 /* XXX check accb completion? */
2715
2716 mfii_put_ccb(sc, accb);
2717 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2718
2719 xs->error = XS_TIMEOUT;
2720 scsipi_done(xs);
2721 mfii_put_ccb(sc, ccb);
2722 }
2723
2724 static struct mfii_ccb *
2725 mfii_get_ccb(struct mfii_softc *sc)
2726 {
2727 struct mfii_ccb *ccb;
2728
2729 mutex_enter(&sc->sc_ccb_mtx);
2730 if (!sc->sc_running) {
2731 ccb = NULL;
2732 } else {
2733 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2734 if (ccb != NULL)
2735 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2736 }
2737 mutex_exit(&sc->sc_ccb_mtx);
2738 return (ccb);
2739 }
2740
2741 static void
2742 mfii_scrub_ccb(struct mfii_ccb *ccb)
2743 {
2744 ccb->ccb_cookie = NULL;
2745 ccb->ccb_done = NULL;
2746 ccb->ccb_flags = 0;
2747 ccb->ccb_data = NULL;
2748 ccb->ccb_direction = MFII_DATA_NONE;
2749 ccb->ccb_dma64 = false;
2750 ccb->ccb_len = 0;
2751 ccb->ccb_sgl_len = 0;
2752 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2753 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2754 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2755 }
2756
2757 static void
2758 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2759 {
2760 mutex_enter(&sc->sc_ccb_mtx);
2761 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2762 mutex_exit(&sc->sc_ccb_mtx);
2763 }
2764
2765 static int
2766 mfii_init_ccb(struct mfii_softc *sc)
2767 {
2768 struct mfii_ccb *ccb;
2769 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2770 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2771 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2772 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2773 u_int i;
2774 int error;
2775
2776 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2777 M_DEVBUF, M_WAITOK|M_ZERO);
2778
2779 for (i = 0; i < sc->sc_max_cmds; i++) {
2780 ccb = &sc->sc_ccb[i];
2781 ccb->ccb_sc = sc;
2782
2783 /* create a dma map for transfer */
2784 error = bus_dmamap_create(sc->sc_dmat,
2785 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2786 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2787 if (error) {
2788 printf("%s: cannot create ccb dmamap32 (%d)\n",
2789 DEVNAME(sc), error);
2790 goto destroy;
2791 }
2792 error = bus_dmamap_create(sc->sc_dmat64,
2793 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2794 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2795 if (error) {
2796 printf("%s: cannot create ccb dmamap64 (%d)\n",
2797 DEVNAME(sc), error);
2798 goto destroy32;
2799 }
2800
2801 /* select i + 1'th request. 0 is reserved for events */
2802 ccb->ccb_smid = i + 1;
2803 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2804 ccb->ccb_request = request + ccb->ccb_request_offset;
2805 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2806 ccb->ccb_request_offset;
2807
2808 /* select i'th MFI command frame */
2809 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2810 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2811 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2812 ccb->ccb_mfi_offset;
2813
2814 /* select i'th sense */
2815 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2816 ccb->ccb_sense = (struct mfi_sense *)(sense +
2817 ccb->ccb_sense_offset);
2818 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2819 ccb->ccb_sense_offset;
2820
2821 /* select i'th sgl */
2822 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2823 sc->sc_max_sgl * i;
2824 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2825 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2826 ccb->ccb_sgl_offset;
2827
2828 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2829 cv_init(&ccb->ccb_cv, "mfiiexec");
2830
2831 /* add ccb to queue */
2832 mfii_put_ccb(sc, ccb);
2833 }
2834
2835 return (0);
2836
2837 destroy32:
2838 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2839 destroy:
2840 /* free dma maps and ccb memory */
2841 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2842 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2843 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2844 }
2845
2846 free(sc->sc_ccb, M_DEVBUF);
2847
2848 return (1);
2849 }
2850
2851 #if NBIO > 0
2852 static int
2853 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2854 {
2855 struct mfii_softc *sc = device_private(dev);
2856 int error = 0;
2857
2858 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2859
2860 mutex_enter(&sc->sc_lock);
2861
2862 switch (cmd) {
2863 case BIOCINQ:
2864 DNPRINTF(MFII_D_IOCTL, "inq\n");
2865 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2866 break;
2867
2868 case BIOCVOL:
2869 DNPRINTF(MFII_D_IOCTL, "vol\n");
2870 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2871 break;
2872
2873 case BIOCDISK:
2874 DNPRINTF(MFII_D_IOCTL, "disk\n");
2875 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2876 break;
2877
2878 case BIOCALARM:
2879 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2880 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2881 break;
2882
2883 case BIOCBLINK:
2884 DNPRINTF(MFII_D_IOCTL, "blink\n");
2885 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2886 break;
2887
2888 case BIOCSETSTATE:
2889 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2890 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2891 break;
2892
2893 #if 0
2894 case BIOCPATROL:
2895 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2896 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2897 break;
2898 #endif
2899
2900 default:
2901 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2902 error = ENOTTY;
2903 }
2904
2905 mutex_exit(&sc->sc_lock);
2906
2907 return (error);
2908 }
2909
2910 static int
2911 mfii_bio_getitall(struct mfii_softc *sc)
2912 {
2913 int i, d, rv = EINVAL;
2914 size_t size;
2915 union mfi_mbox mbox;
2916 struct mfi_conf *cfg = NULL;
2917 struct mfi_ld_details *ld_det = NULL;
2918
2919 /* get info */
2920 if (mfii_get_info(sc)) {
2921 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2922 DEVNAME(sc));
2923 goto done;
2924 }
2925
2926 /* send single element command to retrieve size for full structure */
2927 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2928 if (cfg == NULL)
2929 goto done;
2930 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2931 MFII_DATA_IN, false)) {
2932 free(cfg, M_DEVBUF);
2933 goto done;
2934 }
2935
2936 size = cfg->mfc_size;
2937 free(cfg, M_DEVBUF);
2938
2939 /* memory for read config */
2940 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2941 if (cfg == NULL)
2942 goto done;
2943 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2944 MFII_DATA_IN, false)) {
2945 free(cfg, M_DEVBUF);
2946 goto done;
2947 }
2948
2949 /* replace current pointer with new one */
2950 if (sc->sc_cfg)
2951 free(sc->sc_cfg, M_DEVBUF);
2952 sc->sc_cfg = cfg;
2953
2954 /* get all ld info */
2955 memset(&mbox, 0, sizeof(mbox));
2956 if (sc->sc_max256vd)
2957 mbox.b[0] = 1;
2958 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
2959 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2960 goto done;
2961
2962 /* get memory for all ld structures */
2963 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2964 if (sc->sc_ld_sz != size) {
2965 if (sc->sc_ld_details)
2966 free(sc->sc_ld_details, M_DEVBUF);
2967
2968 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2969 if (ld_det == NULL)
2970 goto done;
2971 sc->sc_ld_sz = size;
2972 sc->sc_ld_details = ld_det;
2973 }
2974
2975 /* find used physical disks */
2976 size = sizeof(struct mfi_ld_details);
2977 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2978 memset(&mbox, 0, sizeof(mbox));
2979 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2980 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2981 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2982 goto done;
2983
2984 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2985 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2986 }
2987 sc->sc_no_pd = d;
2988
2989 rv = 0;
2990 done:
2991 return (rv);
2992 }
2993
2994 static int
2995 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2996 {
2997 int rv = EINVAL;
2998 struct mfi_conf *cfg = NULL;
2999
3000 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
3001
3002 if (mfii_bio_getitall(sc)) {
3003 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3004 DEVNAME(sc));
3005 goto done;
3006 }
3007
3008 /* count unused disks as volumes */
3009 if (sc->sc_cfg == NULL)
3010 goto done;
3011 cfg = sc->sc_cfg;
3012
3013 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3014 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3015 #if notyet
3016 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3017 (bi->bi_nodisk - sc->sc_no_pd);
3018 #endif
3019 /* tell bio who we are */
3020 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3021
3022 rv = 0;
3023 done:
3024 return (rv);
3025 }
3026
3027 static int
3028 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3029 {
3030 int i, per, rv = EINVAL;
3031
3032 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3033 DEVNAME(sc), bv->bv_volid);
3034
3035 /* we really could skip and expect that inq took care of it */
3036 if (mfii_bio_getitall(sc)) {
3037 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3038 DEVNAME(sc));
3039 goto done;
3040 }
3041
3042 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3043 /* go do hotspares & unused disks */
3044 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3045 goto done;
3046 }
3047
3048 i = bv->bv_volid;
3049 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
3050 sizeof(bv->bv_dev));
3051
3052 switch (sc->sc_ld_list.mll_list[i].mll_state) {
3053 case MFI_LD_OFFLINE:
3054 bv->bv_status = BIOC_SVOFFLINE;
3055 break;
3056
3057 case MFI_LD_PART_DEGRADED:
3058 case MFI_LD_DEGRADED:
3059 bv->bv_status = BIOC_SVDEGRADED;
3060 break;
3061
3062 case MFI_LD_ONLINE:
3063 bv->bv_status = BIOC_SVONLINE;
3064 break;
3065
3066 default:
3067 bv->bv_status = BIOC_SVINVALID;
3068 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3069 DEVNAME(sc),
3070 sc->sc_ld_list.mll_list[i].mll_state);
3071 }
3072
3073 /* additional status can modify MFI status */
3074 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3075 case MFI_LD_PROG_CC:
3076 bv->bv_status = BIOC_SVSCRUB;
3077 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3078 bv->bv_percent = (per * 100) / 0xffff;
3079 bv->bv_seconds =
3080 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3081 break;
3082
3083 case MFI_LD_PROG_BGI:
3084 bv->bv_status = BIOC_SVSCRUB;
3085 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3086 bv->bv_percent = (per * 100) / 0xffff;
3087 bv->bv_seconds =
3088 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3089 break;
3090
3091 case MFI_LD_PROG_FGI:
3092 case MFI_LD_PROG_RECONSTRUCT:
3093 /* nothing yet */
3094 break;
3095 }
3096
3097 #if 0
3098 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3099 bv->bv_cache = BIOC_CVWRITEBACK;
3100 else
3101 bv->bv_cache = BIOC_CVWRITETHROUGH;
3102 #endif
3103
3104 /*
3105 * The RAID levels are determined per the SNIA DDF spec, this is only
3106 * a subset that is valid for the MFI controller.
3107 */
3108 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3109 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3110 bv->bv_level *= 10;
3111
3112 bv->bv_nodisk =
3113 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3114 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3115
3116 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3117 bv->bv_stripe_size =
3118 (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3119 / 1024; /* in KB */
3120
3121 rv = 0;
3122 done:
3123 return (rv);
3124 }
3125
3126 static int
3127 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3128 {
3129 struct mfi_conf *cfg;
3130 struct mfi_array *ar;
3131 struct mfi_ld_cfg *ld;
3132 struct mfi_pd_details *pd;
3133 struct mfi_pd_list *pl;
3134 struct scsipi_inquiry_data *inqbuf;
3135 char vend[8+16+4+1], *vendp;
3136 int i, rv = EINVAL;
3137 int arr, vol, disk, span;
3138 union mfi_mbox mbox;
3139
3140 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3141 DEVNAME(sc), bd->bd_diskid);
3142
3143 /* we really could skip and expect that inq took care of it */
3144 if (mfii_bio_getitall(sc)) {
3145 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3146 DEVNAME(sc));
3147 return (rv);
3148 }
3149 cfg = sc->sc_cfg;
3150
3151 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3152 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3153
3154 ar = cfg->mfc_array;
3155 vol = bd->bd_volid;
3156 if (vol >= cfg->mfc_no_ld) {
3157 /* do hotspares */
3158 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3159 goto freeme;
3160 }
3161
3162 /* calculate offset to ld structure */
3163 ld = (struct mfi_ld_cfg *)(
3164 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3165 cfg->mfc_array_size * cfg->mfc_no_array);
3166
3167 /* use span 0 only when raid group is not spanned */
3168 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3169 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3170 else
3171 span = 0;
3172 arr = ld[vol].mlc_span[span].mls_index;
3173
3174 /* offset disk into pd list */
3175 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3176
3177 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3178 /* disk is missing but succeed command */
3179 bd->bd_status = BIOC_SDFAILED;
3180 rv = 0;
3181
3182 /* try to find an unused disk for the target to rebuild */
3183 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3184 MFII_DATA_IN, false))
3185 goto freeme;
3186
3187 for (i = 0; i < pl->mpl_no_pd; i++) {
3188 if (pl->mpl_address[i].mpa_scsi_type != 0)
3189 continue;
3190
3191 memset(&mbox, 0, sizeof(mbox));
3192 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3193 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3194 pd, sizeof(*pd), MFII_DATA_IN, false))
3195 continue;
3196
3197 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3198 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3199 break;
3200 }
3201
3202 if (i == pl->mpl_no_pd)
3203 goto freeme;
3204 } else {
3205 memset(&mbox, 0, sizeof(mbox));
3206 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3207 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3208 MFII_DATA_IN, false)) {
3209 bd->bd_status = BIOC_SDINVALID;
3210 goto freeme;
3211 }
3212 }
3213
3214 /* get the remaining fields */
3215 bd->bd_channel = pd->mpd_enc_idx;
3216 bd->bd_target = pd->mpd_enc_slot;
3217
3218 /* get status */
3219 switch (pd->mpd_fw_state){
3220 case MFI_PD_UNCONFIG_GOOD:
3221 case MFI_PD_UNCONFIG_BAD:
3222 bd->bd_status = BIOC_SDUNUSED;
3223 break;
3224
3225 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3226 bd->bd_status = BIOC_SDHOTSPARE;
3227 break;
3228
3229 case MFI_PD_OFFLINE:
3230 bd->bd_status = BIOC_SDOFFLINE;
3231 break;
3232
3233 case MFI_PD_FAILED:
3234 bd->bd_status = BIOC_SDFAILED;
3235 break;
3236
3237 case MFI_PD_REBUILD:
3238 bd->bd_status = BIOC_SDREBUILD;
3239 break;
3240
3241 case MFI_PD_ONLINE:
3242 bd->bd_status = BIOC_SDONLINE;
3243 break;
3244
3245 case MFI_PD_COPYBACK:
3246 case MFI_PD_SYSTEM:
3247 default:
3248 bd->bd_status = BIOC_SDINVALID;
3249 break;
3250 }
3251
3252 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3253
3254 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3255 vendp = inqbuf->vendor;
3256 memcpy(vend, vendp, sizeof vend - 1);
3257 vend[sizeof vend - 1] = '\0';
3258 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3259
3260 /* XXX find a way to retrieve serial nr from drive */
3261 /* XXX find a way to get bd_procdev */
3262
3263 #if 0
3264 mfp = &pd->mpd_progress;
3265 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3266 mp = &mfp->mfp_patrol_read;
3267 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3268 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3269 }
3270 #endif
3271
3272 rv = 0;
3273 freeme:
3274 free(pd, M_DEVBUF);
3275 free(pl, M_DEVBUF);
3276
3277 return (rv);
3278 }
3279
3280 static int
3281 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3282 {
3283 uint32_t opc;
3284 int rv = 0;
3285 int8_t ret;
3286 mfii_direction_t dir = MFII_DATA_NONE;
3287
3288 switch (ba->ba_opcode) {
3289 case BIOC_SADISABLE:
3290 opc = MR_DCMD_SPEAKER_DISABLE;
3291 break;
3292
3293 case BIOC_SAENABLE:
3294 opc = MR_DCMD_SPEAKER_ENABLE;
3295 break;
3296
3297 case BIOC_SASILENCE:
3298 opc = MR_DCMD_SPEAKER_SILENCE;
3299 break;
3300
3301 case BIOC_GASTATUS:
3302 opc = MR_DCMD_SPEAKER_GET;
3303 dir = MFII_DATA_IN;
3304 break;
3305
3306 case BIOC_SATEST:
3307 opc = MR_DCMD_SPEAKER_TEST;
3308 break;
3309
3310 default:
3311 DNPRINTF(MFII_D_IOCTL,
3312 "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3313 DEVNAME(sc), ba->ba_opcode);
3314 return (EINVAL);
3315 }
3316
3317 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3318 rv = EINVAL;
3319 else
3320 if (ba->ba_opcode == BIOC_GASTATUS)
3321 ba->ba_status = ret;
3322 else
3323 ba->ba_status = 0;
3324
3325 return (rv);
3326 }
3327
3328 static int
3329 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3330 {
3331 int i, found, rv = EINVAL;
3332 union mfi_mbox mbox;
3333 uint32_t cmd;
3334 struct mfi_pd_list *pd;
3335
3336 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3337 bb->bb_status);
3338
3339 /* channel 0 means not in an enclosure so can't be blinked */
3340 if (bb->bb_channel == 0)
3341 return (EINVAL);
3342
3343 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3344
3345 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3346 MFII_DATA_IN, false))
3347 goto done;
3348
3349 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3350 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3351 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3352 found = 1;
3353 break;
3354 }
3355
3356 if (!found)
3357 goto done;
3358
3359 memset(&mbox, 0, sizeof(mbox));
3360 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3361
3362 switch (bb->bb_status) {
3363 case BIOC_SBUNBLINK:
3364 cmd = MR_DCMD_PD_UNBLINK;
3365 break;
3366
3367 case BIOC_SBBLINK:
3368 cmd = MR_DCMD_PD_BLINK;
3369 break;
3370
3371 case BIOC_SBALARM:
3372 default:
3373 DNPRINTF(MFII_D_IOCTL,
3374 "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3375 DEVNAME(sc), bb->bb_status);
3376 goto done;
3377 }
3378
3379
3380 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3381 goto done;
3382
3383 rv = 0;
3384 done:
3385 free(pd, M_DEVBUF);
3386 return (rv);
3387 }
3388
3389 static int
3390 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3391 {
3392 struct mfii_foreign_scan_info *fsi;
3393 struct mfi_pd_details *pd;
3394 union mfi_mbox mbox;
3395 int rv;
3396
3397 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3398 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3399
3400 memset(&mbox, 0, sizeof mbox);
3401 mbox.s[0] = pd_id;
3402 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3403 MFII_DATA_IN, false);
3404 if (rv != 0)
3405 goto done;
3406
3407 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3408 mbox.s[0] = pd_id;
3409 mbox.s[1] = pd->mpd_pd.mfp_seq;
3410 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3411 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3412 MFII_DATA_NONE, false);
3413 if (rv != 0)
3414 goto done;
3415 }
3416
3417 memset(&mbox, 0, sizeof mbox);
3418 mbox.s[0] = pd_id;
3419 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3420 MFII_DATA_IN, false);
3421 if (rv != 0)
3422 goto done;
3423
3424 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3425 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3426 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3427 if (rv != 0)
3428 goto done;
3429
3430 if (fsi->count > 0) {
3431 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3432 NULL, 0, MFII_DATA_NONE, false);
3433 if (rv != 0)
3434 goto done;
3435 }
3436 }
3437
3438 memset(&mbox, 0, sizeof mbox);
3439 mbox.s[0] = pd_id;
3440 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3441 MFII_DATA_IN, false);
3442 if (rv != 0)
3443 goto done;
3444
3445 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3446 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3447 rv = ENXIO;
3448
3449 done:
3450 free(fsi, M_DEVBUF);
3451 free(pd, M_DEVBUF);
3452
3453 return (rv);
3454 }
3455
3456 static int
3457 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3458 {
3459 struct mfi_hotspare *hs;
3460 struct mfi_pd_details *pd;
3461 union mfi_mbox mbox;
3462 size_t size;
3463 int rv = EINVAL;
3464
3465 /* we really could skip and expect that inq took care of it */
3466 if (mfii_bio_getitall(sc)) {
3467 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3468 DEVNAME(sc));
3469 return (rv);
3470 }
3471 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3472
3473 hs = malloc(size, M_DEVBUF, M_WAITOK);
3474 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3475
3476 memset(&mbox, 0, sizeof mbox);
3477 mbox.s[0] = pd_id;
3478 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3479 MFII_DATA_IN, false);
3480 if (rv != 0)
3481 goto done;
3482
3483 memset(hs, 0, size);
3484 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3485 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3486 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3487 MFII_DATA_OUT, false);
3488
3489 done:
3490 free(hs, M_DEVBUF);
3491 free(pd, M_DEVBUF);
3492
3493 return (rv);
3494 }
3495
3496 static int
3497 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3498 {
3499 struct mfi_pd_details *pd;
3500 struct mfi_pd_list *pl;
3501 int i, found, rv = EINVAL;
3502 union mfi_mbox mbox;
3503
3504 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3505 bs->bs_status);
3506
3507 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3508 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3509
3510 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3511 MFII_DATA_IN, false))
3512 goto done;
3513
3514 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3515 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3516 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3517 found = 1;
3518 break;
3519 }
3520
3521 if (!found)
3522 goto done;
3523
3524 memset(&mbox, 0, sizeof(mbox));
3525 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3526
3527 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3528 MFII_DATA_IN, false))
3529 goto done;
3530
3531 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3532 mbox.s[1] = pd->mpd_pd.mfp_seq;
3533
3534 switch (bs->bs_status) {
3535 case BIOC_SSONLINE:
3536 mbox.b[4] = MFI_PD_ONLINE;
3537 break;
3538
3539 case BIOC_SSOFFLINE:
3540 mbox.b[4] = MFI_PD_OFFLINE;
3541 break;
3542
3543 case BIOC_SSHOTSPARE:
3544 mbox.b[4] = MFI_PD_HOTSPARE;
3545 break;
3546
3547 case BIOC_SSREBUILD:
3548 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3549 if ((rv = mfii_makegood(sc,
3550 pl->mpl_address[i].mpa_pd_id)))
3551 goto done;
3552
3553 if ((rv = mfii_makespare(sc,
3554 pl->mpl_address[i].mpa_pd_id)))
3555 goto done;
3556
3557 memset(&mbox, 0, sizeof(mbox));
3558 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3559 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3560 pd, sizeof(*pd), MFII_DATA_IN, false);
3561 if (rv != 0)
3562 goto done;
3563
3564 /* rebuilding might be started by mfii_makespare() */
3565 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3566 rv = 0;
3567 goto done;
3568 }
3569
3570 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3571 mbox.s[1] = pd->mpd_pd.mfp_seq;
3572 }
3573 mbox.b[4] = MFI_PD_REBUILD;
3574 break;
3575
3576 default:
3577 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3578 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3579 goto done;
3580 }
3581
3582
3583 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3584 MFII_DATA_NONE, false);
3585 done:
3586 free(pd, M_DEVBUF);
3587 free(pl, M_DEVBUF);
3588 return (rv);
3589 }
3590
3591 #if 0
3592 int
3593 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3594 {
3595 uint32_t opc;
3596 int rv = 0;
3597 struct mfi_pr_properties prop;
3598 struct mfi_pr_status status;
3599 uint32_t time, exec_freq;
3600
3601 switch (bp->bp_opcode) {
3602 case BIOC_SPSTOP:
3603 case BIOC_SPSTART:
3604 if (bp->bp_opcode == BIOC_SPSTART)
3605 opc = MR_DCMD_PR_START;
3606 else
3607 opc = MR_DCMD_PR_STOP;
3608 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3609 return (EINVAL);
3610 break;
3611
3612 case BIOC_SPMANUAL:
3613 case BIOC_SPDISABLE:
3614 case BIOC_SPAUTO:
3615 /* Get device's time. */
3616 opc = MR_DCMD_TIME_SECS_GET;
3617 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3618 MFII_DATA_IN, false))
3619 return (EINVAL);
3620
3621 opc = MR_DCMD_PR_GET_PROPERTIES;
3622 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3623 MFII_DATA_IN, false))
3624 return (EINVAL);
3625
3626 switch (bp->bp_opcode) {
3627 case BIOC_SPMANUAL:
3628 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3629 break;
3630 case BIOC_SPDISABLE:
3631 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3632 break;
3633 case BIOC_SPAUTO:
3634 if (bp->bp_autoival != 0) {
3635 if (bp->bp_autoival == -1)
3636 /* continuously */
3637 exec_freq = 0xffffffffU;
3638 else if (bp->bp_autoival > 0)
3639 exec_freq = bp->bp_autoival;
3640 else
3641 return (EINVAL);
3642 prop.exec_freq = exec_freq;
3643 }
3644 if (bp->bp_autonext != 0) {
3645 if (bp->bp_autonext < 0)
3646 return (EINVAL);
3647 else
3648 prop.next_exec =
3649 time + bp->bp_autonext;
3650 }
3651 prop.op_mode = MFI_PR_OPMODE_AUTO;
3652 break;
3653 }
3654
3655 opc = MR_DCMD_PR_SET_PROPERTIES;
3656 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3657 MFII_DATA_OUT, false))
3658 return (EINVAL);
3659
3660 break;
3661
3662 case BIOC_GPSTATUS:
3663 opc = MR_DCMD_PR_GET_PROPERTIES;
3664 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3665 MFII_DATA_IN, false))
3666 return (EINVAL);
3667
3668 opc = MR_DCMD_PR_GET_STATUS;
3669 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3670 MFII_DATA_IN, false))
3671 return (EINVAL);
3672
3673 /* Get device's time. */
3674 opc = MR_DCMD_TIME_SECS_GET;
3675 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3676 MFII_DATA_IN, false))
3677 return (EINVAL);
3678
3679 switch (prop.op_mode) {
3680 case MFI_PR_OPMODE_AUTO:
3681 bp->bp_mode = BIOC_SPMAUTO;
3682 bp->bp_autoival = prop.exec_freq;
3683 bp->bp_autonext = prop.next_exec;
3684 bp->bp_autonow = time;
3685 break;
3686 case MFI_PR_OPMODE_MANUAL:
3687 bp->bp_mode = BIOC_SPMMANUAL;
3688 break;
3689 case MFI_PR_OPMODE_DISABLED:
3690 bp->bp_mode = BIOC_SPMDISABLED;
3691 break;
3692 default:
3693 printf("%s: unknown patrol mode %d\n",
3694 DEVNAME(sc), prop.op_mode);
3695 break;
3696 }
3697
3698 switch (status.state) {
3699 case MFI_PR_STATE_STOPPED:
3700 bp->bp_status = BIOC_SPSSTOPPED;
3701 break;
3702 case MFI_PR_STATE_READY:
3703 bp->bp_status = BIOC_SPSREADY;
3704 break;
3705 case MFI_PR_STATE_ACTIVE:
3706 bp->bp_status = BIOC_SPSACTIVE;
3707 break;
3708 case MFI_PR_STATE_ABORTED:
3709 bp->bp_status = BIOC_SPSABORTED;
3710 break;
3711 default:
3712 printf("%s: unknown patrol state %d\n",
3713 DEVNAME(sc), status.state);
3714 break;
3715 }
3716
3717 break;
3718
3719 default:
3720 DNPRINTF(MFII_D_IOCTL,
3721 "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3722 DEVNAME(sc), bp->bp_opcode);
3723 return (EINVAL);
3724 }
3725
3726 return (rv);
3727 }
3728 #endif
3729
3730 static int
3731 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3732 {
3733 struct mfi_conf *cfg;
3734 struct mfi_hotspare *hs;
3735 struct mfi_pd_details *pd;
3736 struct bioc_disk *sdhs;
3737 struct bioc_vol *vdhs;
3738 struct scsipi_inquiry_data *inqbuf;
3739 char vend[8+16+4+1], *vendp;
3740 int i, rv = EINVAL;
3741 uint32_t size;
3742 union mfi_mbox mbox;
3743
3744 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3745
3746 if (!bio_hs)
3747 return (EINVAL);
3748
3749 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3750
3751 /* send single element command to retrieve size for full structure */
3752 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3753 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3754 MFII_DATA_IN, false))
3755 goto freeme;
3756
3757 size = cfg->mfc_size;
3758 free(cfg, M_DEVBUF);
3759
3760 /* memory for read config */
3761 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3762 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3763 MFII_DATA_IN, false))
3764 goto freeme;
3765
3766 /* calculate offset to hs structure */
3767 hs = (struct mfi_hotspare *)(
3768 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3769 cfg->mfc_array_size * cfg->mfc_no_array +
3770 cfg->mfc_ld_size * cfg->mfc_no_ld);
3771
3772 if (volid < cfg->mfc_no_ld)
3773 goto freeme; /* not a hotspare */
3774
3775 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3776 goto freeme; /* not a hotspare */
3777
3778 /* offset into hotspare structure */
3779 i = volid - cfg->mfc_no_ld;
3780
3781 DNPRINTF(MFII_D_IOCTL,
3782 "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3783 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3784 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3785
3786 /* get pd fields */
3787 memset(&mbox, 0, sizeof(mbox));
3788 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3789 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3790 MFII_DATA_IN, false)) {
3791 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3792 DEVNAME(sc));
3793 goto freeme;
3794 }
3795
3796 switch (type) {
3797 case MFI_MGMT_VD:
3798 vdhs = bio_hs;
3799 vdhs->bv_status = BIOC_SVONLINE;
3800 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3801 vdhs->bv_level = -1; /* hotspare */
3802 vdhs->bv_nodisk = 1;
3803 break;
3804
3805 case MFI_MGMT_SD:
3806 sdhs = bio_hs;
3807 sdhs->bd_status = BIOC_SDHOTSPARE;
3808 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3809 sdhs->bd_channel = pd->mpd_enc_idx;
3810 sdhs->bd_target = pd->mpd_enc_slot;
3811 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3812 vendp = inqbuf->vendor;
3813 memcpy(vend, vendp, sizeof vend - 1);
3814 vend[sizeof vend - 1] = '\0';
3815 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3816 break;
3817
3818 default:
3819 goto freeme;
3820 }
3821
3822 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3823 rv = 0;
3824 freeme:
3825 free(pd, M_DEVBUF);
3826 free(cfg, M_DEVBUF);
3827
3828 return (rv);
3829 }
3830
3831 #endif /* NBIO > 0 */
3832
3833 #define MFI_BBU_SENSORS 4
3834
3835 static void
3836 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3837 {
3838 struct mfi_bbu_status bbu;
3839 u_int32_t status;
3840 u_int32_t mask;
3841 u_int32_t soh_bad;
3842 int rv;
3843
3844 mutex_enter(&sc->sc_lock);
3845 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3846 sizeof(bbu), MFII_DATA_IN, false);
3847 mutex_exit(&sc->sc_lock);
3848 if (rv != 0) {
3849 edata->state = ENVSYS_SINVALID;
3850 edata->value_cur = 0;
3851 return;
3852 }
3853
3854 switch (bbu.battery_type) {
3855 case MFI_BBU_TYPE_IBBU:
3856 case MFI_BBU_TYPE_IBBU09:
3857 case MFI_BBU_TYPE_CVPM02:
3858 mask = MFI_BBU_STATE_BAD_IBBU;
3859 soh_bad = 0;
3860 break;
3861 case MFI_BBU_TYPE_BBU:
3862 mask = MFI_BBU_STATE_BAD_BBU;
3863 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3864 break;
3865
3866 case MFI_BBU_TYPE_NONE:
3867 default:
3868 edata->state = ENVSYS_SCRITICAL;
3869 edata->value_cur = 0;
3870 return;
3871 }
3872
3873 status = le32toh(bbu.fw_status) & mask;
3874 switch (edata->sensor) {
3875 case 0:
3876 edata->value_cur = (status || soh_bad) ? 0 : 1;
3877 edata->state =
3878 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3879 return;
3880 case 1:
3881 edata->value_cur = le16toh(bbu.voltage) * 1000;
3882 edata->state = ENVSYS_SVALID;
3883 return;
3884 case 2:
3885 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3886 edata->state = ENVSYS_SVALID;
3887 return;
3888 case 3:
3889 edata->value_cur =
3890 le16toh(bbu.temperature) * 1000000 + 273150000;
3891 edata->state = ENVSYS_SVALID;
3892 return;
3893 }
3894 }
3895
3896 static void
3897 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3898 {
3899 struct bioc_vol bv;
3900 int error;
3901
3902 memset(&bv, 0, sizeof(bv));
3903 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3904 mutex_enter(&sc->sc_lock);
3905 error = mfii_ioctl_vol(sc, &bv);
3906 mutex_exit(&sc->sc_lock);
3907 if (error)
3908 bv.bv_status = BIOC_SVINVALID;
3909 bio_vol_to_envsys(edata, &bv);
3910 }
3911
3912 static void
3913 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3914 {
3915 sensor->units = ENVSYS_DRIVE;
3916 sensor->state = ENVSYS_SINVALID;
3917 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3918 /* Enable monitoring for drive state changes */
3919 sensor->flags |= ENVSYS_FMONSTCHANGED;
3920 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3921 }
3922
3923 static void
3924 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3925 {
3926 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3927 aprint_error_dev(sc->sc_dev,
3928 "failed to attach sensor %s\n", s->desc);
3929 }
3930
3931 static int
3932 mfii_create_sensors(struct mfii_softc *sc)
3933 {
3934 int i, rv;
3935 const int nsensors = MFI_BBU_SENSORS + MFII_MAX_LD_EXT;
3936
3937 sc->sc_sme = sysmon_envsys_create();
3938 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3939 M_DEVBUF, M_WAITOK | M_ZERO);
3940
3941 /* BBU */
3942 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3943 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3944 sc->sc_sensors[0].value_cur = 0;
3945 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3946 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3947 sc->sc_sensors[1].value_cur = 0;
3948 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3949 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3950 sc->sc_sensors[2].value_cur = 0;
3951 sc->sc_sensors[3].units = ENVSYS_STEMP;
3952 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3953 sc->sc_sensors[3].value_cur = 0;
3954
3955 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3956 sc->sc_bbuok = true;
3957 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3958 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3959 "%s BBU state", DEVNAME(sc));
3960 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3961 "%s BBU voltage", DEVNAME(sc));
3962 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3963 "%s BBU current", DEVNAME(sc));
3964 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3965 "%s BBU temperature", DEVNAME(sc));
3966 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3967 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3968 }
3969 }
3970
3971 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3972 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3973 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3974 }
3975
3976 sc->sc_sme->sme_name = DEVNAME(sc);
3977 sc->sc_sme->sme_cookie = sc;
3978 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3979 rv = sysmon_envsys_register(sc->sc_sme);
3980 if (rv) {
3981 aprint_error_dev(sc->sc_dev,
3982 "unable to register with sysmon (rv = %d)\n", rv);
3983 }
3984 return rv;
3985
3986 }
3987
3988 static int
3989 mfii_destroy_sensors(struct mfii_softc *sc)
3990 {
3991 if (sc->sc_sme == NULL)
3992 return 0;
3993 sysmon_envsys_unregister(sc->sc_sme);
3994 sc->sc_sme = NULL;
3995 free(sc->sc_sensors, M_DEVBUF);
3996 return 0;
3997 }
3998
3999 static void
4000 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
4001 {
4002 struct mfii_softc *sc = sme->sme_cookie;
4003
4004 if (edata->sensor >= MFI_BBU_SENSORS + MFII_MAX_LD_EXT)
4005 return;
4006
4007 if (edata->sensor < MFI_BBU_SENSORS) {
4008 if (sc->sc_bbuok)
4009 mfii_bbu(sc, edata);
4010 } else {
4011 mfii_refresh_ld_sensor(sc, edata);
4012 }
4013 }
4014