mfii.c revision 1.4.4.3 1 /* $NetBSD: mfii.c,v 1.4.4.3 2022/09/29 18:26:42 martin Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.4.4.3 2022/09/29 18:26:42 martin Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271
272 struct mfii_iop {
273 int bar;
274 int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
276 #define MFII_IOP_NUM_SGE_LOC_35 1
277 u_int16_t ldio_ctx_reg_lock_flags;
278 u_int8_t ldio_req_type;
279 u_int8_t ldio_ctx_type_nseg;
280 u_int8_t sge_flag_chain;
281 u_int8_t sge_flag_eol;
282 };
283
284 struct mfii_softc {
285 device_t sc_dev;
286 struct scsipi_channel sc_chan;
287 struct scsipi_adapter sc_adapt;
288
289 const struct mfii_iop *sc_iop;
290
291 pci_chipset_tag_t sc_pc;
292 pcitag_t sc_tag;
293
294 bus_space_tag_t sc_iot;
295 bus_space_handle_t sc_ioh;
296 bus_size_t sc_ios;
297 bus_dma_tag_t sc_dmat;
298 bus_dma_tag_t sc_dmat64;
299 bool sc_64bit_dma;
300
301 void *sc_ih;
302
303 kmutex_t sc_ccb_mtx;
304 kmutex_t sc_post_mtx;
305
306 u_int sc_max_fw_cmds;
307 u_int sc_max_cmds;
308 u_int sc_max_sgl;
309
310 u_int sc_reply_postq_depth;
311 u_int sc_reply_postq_index;
312 kmutex_t sc_reply_postq_mtx;
313 struct mfii_dmamem *sc_reply_postq;
314
315 struct mfii_dmamem *sc_requests;
316 struct mfii_dmamem *sc_mfi;
317 struct mfii_dmamem *sc_sense;
318 struct mfii_dmamem *sc_sgl;
319
320 struct mfii_ccb *sc_ccb;
321 struct mfii_ccb_list sc_ccb_freeq;
322
323 struct mfii_ccb *sc_aen_ccb;
324 struct workqueue *sc_aen_wq;
325 struct work sc_aen_work;
326
327 kmutex_t sc_abort_mtx;
328 struct mfii_ccb_list sc_abort_list;
329 struct workqueue *sc_abort_wq;
330 struct work sc_abort_work;
331
332 /* save some useful information for logical drives that is missing
333 * in sc_ld_list
334 */
335 struct {
336 bool ld_present;
337 char ld_dev[16]; /* device name sd? */
338 } sc_ld[MFI_MAX_LD];
339 int sc_target_lds[MFI_MAX_LD];
340
341 /* bio */
342 struct mfi_conf *sc_cfg;
343 struct mfi_ctrl_info sc_info;
344 struct mfi_ld_list sc_ld_list;
345 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
346 int sc_no_pd; /* used physical disks */
347 int sc_ld_sz; /* sizeof sc_ld_details */
348
349 /* mgmt lock */
350 kmutex_t sc_lock;
351 bool sc_running;
352
353 /* sensors */
354 struct sysmon_envsys *sc_sme;
355 envsys_data_t *sc_sensors;
356 bool sc_bbuok;
357
358 device_t sc_child;
359 };
360
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
365 #define MFII_D_CMD 0x0001
366 #define MFII_D_INTR 0x0002
367 #define MFII_D_MISC 0x0004
368 #define MFII_D_DMA 0x0008
369 #define MFII_D_IOCTL 0x0010
370 #define MFII_D_RW 0x0020
371 #define MFII_D_MEM 0x0040
372 #define MFII_D_CCB 0x0080
373 uint32_t mfii_debug = 0
374 /* | MFII_D_CMD */
375 /* | MFII_D_INTR */
376 | MFII_D_MISC
377 /* | MFII_D_DMA */
378 /* | MFII_D_IOCTL */
379 /* | MFII_D_RW */
380 /* | MFII_D_MEM */
381 /* | MFII_D_CCB */
382 ;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387
388 static int mfii_match(device_t, cfdata_t, void *);
389 static void mfii_attach(device_t, device_t, void *);
390 static int mfii_detach(device_t, int);
391 static int mfii_rescan(device_t, const char *, const int *);
392 static void mfii_childdetached(device_t, device_t);
393 static bool mfii_suspend(device_t, const pmf_qual_t *);
394 static bool mfii_resume(device_t, const pmf_qual_t *);
395 static bool mfii_shutdown(device_t, int);
396
397
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 mfii_childdetached, DVF_DETACH_SHUTDOWN);
401
402 static void mfii_scsipi_request(struct scsipi_channel *,
403 scsipi_adapter_req_t, void *);
404 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405
406 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
407
408 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
409 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410
411 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 static void mfii_dmamem_free(struct mfii_softc *,
413 struct mfii_dmamem *);
414
415 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
416 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 static int mfii_init_ccb(struct mfii_softc *);
418 static void mfii_scrub_ccb(struct mfii_ccb *);
419
420 static int mfii_transition_firmware(struct mfii_softc *);
421 static int mfii_initialise_firmware(struct mfii_softc *);
422 static int mfii_get_info(struct mfii_softc *);
423
424 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 static int mfii_my_intr(struct mfii_softc *);
431 static int mfii_intr(void *);
432 static void mfii_postq(struct mfii_softc *);
433
434 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 void *, int);
436 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 void *, int);
438
439 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440
441 static int mfii_mgmt(struct mfii_softc *, uint32_t,
442 const union mfi_mbox *, void *, size_t,
443 mfii_direction_t, bool);
444 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 uint32_t, const union mfi_mbox *, void *, size_t,
446 mfii_direction_t, bool);
447 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448
449 static int mfii_scsi_cmd_io(struct mfii_softc *,
450 struct mfii_ccb *, struct scsipi_xfer *);
451 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
452 struct mfii_ccb *, struct scsipi_xfer *);
453 static void mfii_scsi_cmd_tmo(void *);
454
455 static void mfii_abort_task(struct work *, void *);
456 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
457 uint16_t, uint16_t, uint8_t, uint32_t);
458 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
459 struct mfii_ccb *);
460
461 static int mfii_aen_register(struct mfii_softc *);
462 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
463 struct mfii_dmamem *, uint32_t);
464 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
465 static void mfii_aen(struct work *, void *);
466 static void mfii_aen_unregister(struct mfii_softc *);
467
468 static void mfii_aen_pd_insert(struct mfii_softc *,
469 const struct mfi_evtarg_pd_address *);
470 static void mfii_aen_pd_remove(struct mfii_softc *,
471 const struct mfi_evtarg_pd_address *);
472 static void mfii_aen_pd_state_change(struct mfii_softc *,
473 const struct mfi_evtarg_pd_state *);
474 static void mfii_aen_ld_update(struct mfii_softc *);
475
476 #if NBIO > 0
477 static int mfii_ioctl(device_t, u_long, void *);
478 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
479 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
480 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
481 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
482 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
483 static int mfii_ioctl_setstate(struct mfii_softc *,
484 struct bioc_setstate *);
485 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
486 static int mfii_bio_getitall(struct mfii_softc *);
487 #endif /* NBIO > 0 */
488
489 #if 0
490 static const char *mfi_bbu_indicators[] = {
491 "pack missing",
492 "voltage low",
493 "temp high",
494 "charge active",
495 "discharge active",
496 "learn cycle req'd",
497 "learn cycle active",
498 "learn cycle failed",
499 "learn cycle timeout",
500 "I2C errors",
501 "replace pack",
502 "low capacity",
503 "periodic learn req'd"
504 };
505 #endif
506
507 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
508 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
509 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
510 static int mfii_create_sensors(struct mfii_softc *);
511 static int mfii_destroy_sensors(struct mfii_softc *);
512 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
513 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
514
515 /*
516 * mfii boards support asynchronous (and non-polled) completion of
517 * dcmds by proxying them through a passthru mpii command that points
518 * at a dcmd frame. since the passthru command is submitted like
519 * the scsi commands using an SMID in the request descriptor,
520 * ccb_request memory * must contain the passthru command because
521 * that is what the SMID refers to. this means ccb_request cannot
522 * contain the dcmd. rather than allocating separate dma memory to
523 * hold the dcmd, we reuse the sense memory buffer for it.
524 */
525
526 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
527
528 static inline void
529 mfii_dcmd_scrub(struct mfii_ccb *ccb)
530 {
531 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
532 }
533
534 static inline struct mfi_dcmd_frame *
535 mfii_dcmd_frame(struct mfii_ccb *ccb)
536 {
537 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
538 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
539 }
540
541 static inline void
542 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
543 {
544 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
545 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
546 }
547
548 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
549
550 static const struct mfii_iop mfii_iop_thunderbolt = {
551 MFII_BAR,
552 MFII_IOP_NUM_SGE_LOC_ORIG,
553 0,
554 MFII_REQ_TYPE_LDIO,
555 0,
556 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
557 0
558 };
559
560 /*
561 * a lot of these values depend on us not implementing fastpath yet.
562 */
563 static const struct mfii_iop mfii_iop_25 = {
564 MFII_BAR,
565 MFII_IOP_NUM_SGE_LOC_ORIG,
566 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
567 MFII_REQ_TYPE_NO_LOCK,
568 MFII_RAID_CTX_TYPE_CUDA | 0x1,
569 MFII_SGE_CHAIN_ELEMENT,
570 MFII_SGE_END_OF_LIST
571 };
572
573 static const struct mfii_iop mfii_iop_35 = {
574 MFII_BAR_35,
575 MFII_IOP_NUM_SGE_LOC_35,
576 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
577 MFII_REQ_TYPE_NO_LOCK,
578 MFII_RAID_CTX_TYPE_CUDA | 0x1,
579 MFII_SGE_CHAIN_ELEMENT,
580 MFII_SGE_END_OF_LIST
581 };
582
583 struct mfii_device {
584 pcireg_t mpd_vendor;
585 pcireg_t mpd_product;
586 const struct mfii_iop *mpd_iop;
587 };
588
589 static const struct mfii_device mfii_devices[] = {
590 /* Fusion */
591 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
592 &mfii_iop_thunderbolt },
593 /* Fury */
594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
595 &mfii_iop_25 },
596 /* Invader */
597 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
598 &mfii_iop_25 },
599 /* Intruder */
600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
601 &mfii_iop_25 },
602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
603 &mfii_iop_25 },
604 /* Cutlass */
605 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
606 &mfii_iop_25 },
607 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
608 &mfii_iop_25 },
609 /* Crusader */
610 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
611 &mfii_iop_35 },
612 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
613 &mfii_iop_35 },
614 /* Ventura */
615 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
616 &mfii_iop_35 },
617 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
618 &mfii_iop_35 },
619 /* Tomcat */
620 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
621 &mfii_iop_35 },
622 /* Harpoon */
623 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
624 &mfii_iop_35 }
625 };
626
627 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
628
629 static const struct mfii_iop *
630 mfii_find_iop(struct pci_attach_args *pa)
631 {
632 const struct mfii_device *mpd;
633 int i;
634
635 for (i = 0; i < __arraycount(mfii_devices); i++) {
636 mpd = &mfii_devices[i];
637
638 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
639 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
640 return (mpd->mpd_iop);
641 }
642
643 return (NULL);
644 }
645
646 static int
647 mfii_match(device_t parent, cfdata_t match, void *aux)
648 {
649 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
650 }
651
652 static void
653 mfii_attach(device_t parent, device_t self, void *aux)
654 {
655 struct mfii_softc *sc = device_private(self);
656 struct pci_attach_args *pa = aux;
657 pcireg_t memtype;
658 pci_intr_handle_t *ihp;
659 char intrbuf[PCI_INTRSTR_LEN];
660 const char *intrstr;
661 u_int32_t status, scpad2, scpad3;
662 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
663 struct scsipi_adapter *adapt = &sc->sc_adapt;
664 struct scsipi_channel *chan = &sc->sc_chan;
665
666 /* init sc */
667 sc->sc_dev = self;
668 sc->sc_iop = mfii_find_iop(aux);
669 sc->sc_dmat = pa->pa_dmat;
670 if (pci_dma64_available(pa)) {
671 sc->sc_dmat64 = pa->pa_dmat64;
672 sc->sc_64bit_dma = 1;
673 } else {
674 sc->sc_dmat64 = pa->pa_dmat;
675 sc->sc_64bit_dma = 0;
676 }
677 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
678 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
679 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
680 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
681
682 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
683
684 sc->sc_aen_ccb = NULL;
685 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
686 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
687 PRI_BIO, IPL_BIO, WQ_MPSAFE);
688
689 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
690 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
691 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
692
693 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
694 SIMPLEQ_INIT(&sc->sc_abort_list);
695
696 /* wire up the bus shizz */
697 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
698 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
699 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
700 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
701 aprint_error(": unable to map registers\n");
702 return;
703 }
704
705 /* disable interrupts */
706 mfii_write(sc, MFI_OMSK, 0xffffffff);
707
708 if (pci_intr_alloc(pa, &ihp, NULL, 0)) {
709 aprint_error(": unable to map interrupt\n");
710 goto pci_unmap;
711 }
712 intrstr = pci_intr_string(pa->pa_pc, ihp[0], intrbuf, sizeof(intrbuf));
713 pci_intr_setattr(pa->pa_pc, &ihp[0], PCI_INTR_MPSAFE, true);
714
715 /* lets get started */
716 if (mfii_transition_firmware(sc))
717 goto pci_unmap;
718 sc->sc_running = true;
719
720 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
721 scpad3 = mfii_read(sc, MFII_OSP3);
722 status = mfii_fw_state(sc);
723 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
724 if (sc->sc_max_fw_cmds == 0)
725 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
726 /*
727 * reduce max_cmds by 1 to ensure that the reply queue depth does not
728 * exceed FW supplied max_fw_cmds.
729 */
730 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
731
732 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
733 scpad2 = mfii_read(sc, MFII_OSP2);
734 chain_frame_sz =
735 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
736 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
737 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
738 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
739
740 nsge_in_io = (MFII_REQUEST_SIZE -
741 sizeof(struct mpii_msg_scsi_io) -
742 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
743 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
744
745 /* round down to nearest power of two */
746 sc->sc_max_sgl = 1;
747 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
748 sc->sc_max_sgl <<= 1;
749
750 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
751 DEVNAME(sc), status, scpad2, scpad3);
752 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
753 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
754 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
755 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
756 sc->sc_max_sgl);
757
758 /* sense memory */
759 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
760 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
761 if (sc->sc_sense == NULL) {
762 aprint_error(": unable to allocate sense memory\n");
763 goto pci_unmap;
764 }
765
766 /* reply post queue */
767 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
768
769 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
770 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
771 if (sc->sc_reply_postq == NULL)
772 goto free_sense;
773
774 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
775 MFII_DMA_LEN(sc->sc_reply_postq));
776
777 /* MPII request frame array */
778 sc->sc_requests = mfii_dmamem_alloc(sc,
779 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
780 if (sc->sc_requests == NULL)
781 goto free_reply_postq;
782
783 /* MFI command frame array */
784 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
785 if (sc->sc_mfi == NULL)
786 goto free_requests;
787
788 /* MPII SGL array */
789 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
790 sizeof(struct mfii_sge) * sc->sc_max_sgl);
791 if (sc->sc_sgl == NULL)
792 goto free_mfi;
793
794 if (mfii_init_ccb(sc) != 0) {
795 aprint_error(": could not init ccb list\n");
796 goto free_sgl;
797 }
798
799 /* kickstart firmware with all addresses and pointers */
800 if (mfii_initialise_firmware(sc) != 0) {
801 aprint_error(": could not initialize firmware\n");
802 goto free_sgl;
803 }
804
805 mutex_enter(&sc->sc_lock);
806 if (mfii_get_info(sc) != 0) {
807 mutex_exit(&sc->sc_lock);
808 aprint_error(": could not retrieve controller information\n");
809 goto free_sgl;
810 }
811 mutex_exit(&sc->sc_lock);
812
813 aprint_normal(": \"%s\", firmware %s",
814 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
815 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
816 aprint_normal(", %uMB cache",
817 le16toh(sc->sc_info.mci_memory_size));
818 }
819 aprint_normal("\n");
820 aprint_naive("\n");
821
822 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ihp[0], IPL_BIO,
823 mfii_intr, sc, DEVNAME(sc));
824 if (sc->sc_ih == NULL) {
825 aprint_error_dev(self, "can't establish interrupt");
826 if (intrstr)
827 aprint_error(" at %s", intrstr);
828 aprint_error("\n");
829 goto free_sgl;
830 }
831 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
832
833 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
834 sc->sc_ld[i].ld_present = 1;
835
836 memset(adapt, 0, sizeof(*adapt));
837 adapt->adapt_dev = sc->sc_dev;
838 adapt->adapt_nchannels = 1;
839 /* keep a few commands for management */
840 if (sc->sc_max_cmds > 4)
841 adapt->adapt_openings = sc->sc_max_cmds - 4;
842 else
843 adapt->adapt_openings = sc->sc_max_cmds;
844 adapt->adapt_max_periph = adapt->adapt_openings;
845 adapt->adapt_request = mfii_scsipi_request;
846 adapt->adapt_minphys = minphys;
847 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
848
849 memset(chan, 0, sizeof(*chan));
850 chan->chan_adapter = adapt;
851 chan->chan_bustype = &scsi_sas_bustype;
852 chan->chan_channel = 0;
853 chan->chan_flags = 0;
854 chan->chan_nluns = 8;
855 chan->chan_ntargets = sc->sc_info.mci_max_lds;
856 chan->chan_id = sc->sc_info.mci_max_lds;
857
858 mfii_rescan(sc->sc_dev, "scsi", NULL);
859
860 if (mfii_aen_register(sc) != 0) {
861 /* error printed by mfii_aen_register */
862 goto intr_disestablish;
863 }
864
865 mutex_enter(&sc->sc_lock);
866 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
867 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
868 mutex_exit(&sc->sc_lock);
869 aprint_error_dev(self,
870 "getting list of logical disks failed\n");
871 goto intr_disestablish;
872 }
873 mutex_exit(&sc->sc_lock);
874 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
875 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
876 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
877 sc->sc_target_lds[target] = i;
878 }
879
880 /* enable interrupts */
881 mfii_write(sc, MFI_OSTS, 0xffffffff);
882 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
883
884 #if NBIO > 0
885 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
886 panic("%s: controller registration failed", DEVNAME(sc));
887 #endif /* NBIO > 0 */
888
889 if (mfii_create_sensors(sc) != 0)
890 aprint_error_dev(self, "unable to create sensors\n");
891
892 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
893 mfii_shutdown))
894 aprint_error_dev(self, "couldn't establish power handler\n");
895 return;
896 intr_disestablish:
897 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
898 free_sgl:
899 mfii_dmamem_free(sc, sc->sc_sgl);
900 free_mfi:
901 mfii_dmamem_free(sc, sc->sc_mfi);
902 free_requests:
903 mfii_dmamem_free(sc, sc->sc_requests);
904 free_reply_postq:
905 mfii_dmamem_free(sc, sc->sc_reply_postq);
906 free_sense:
907 mfii_dmamem_free(sc, sc->sc_sense);
908 pci_unmap:
909 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
910 }
911
912 #if 0
913 struct srp_gc mfii_dev_handles_gc =
914 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
915
916 static inline uint16_t
917 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
918 {
919 struct srp_ref sr;
920 uint16_t *map, handle;
921
922 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
923 handle = map[target];
924 srp_leave(&sr);
925
926 return (handle);
927 }
928
929 static int
930 mfii_dev_handles_update(struct mfii_softc *sc)
931 {
932 struct mfii_ld_map *lm;
933 uint16_t *dev_handles = NULL;
934 int i;
935 int rv = 0;
936
937 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
938
939 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
940 MFII_DATA_IN, false);
941
942 if (rv != 0) {
943 rv = EIO;
944 goto free_lm;
945 }
946
947 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
948 M_DEVBUF, M_WAITOK);
949
950 for (i = 0; i < MFI_MAX_PD; i++)
951 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
952
953 /* commit the updated info */
954 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
955 srp_update_locked(&mfii_dev_handles_gc,
956 &sc->sc_pd->pd_dev_handles, dev_handles);
957
958 free_lm:
959 free(lm, M_TEMP, sizeof(*lm));
960
961 return (rv);
962 }
963
964 static void
965 mfii_dev_handles_dtor(void *null, void *v)
966 {
967 uint16_t *dev_handles = v;
968
969 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
970 }
971 #endif /* 0 */
972
973 static int
974 mfii_detach(device_t self, int flags)
975 {
976 struct mfii_softc *sc = device_private(self);
977 int error;
978
979 if (sc->sc_ih == NULL)
980 return (0);
981
982 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
983 return error;
984
985 mfii_destroy_sensors(sc);
986 #if NBIO > 0
987 bio_unregister(sc->sc_dev);
988 #endif
989 mfii_shutdown(sc->sc_dev, 0);
990 mfii_write(sc, MFI_OMSK, 0xffffffff);
991
992 mfii_aen_unregister(sc);
993 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
994 mfii_dmamem_free(sc, sc->sc_sgl);
995 mfii_dmamem_free(sc, sc->sc_mfi);
996 mfii_dmamem_free(sc, sc->sc_requests);
997 mfii_dmamem_free(sc, sc->sc_reply_postq);
998 mfii_dmamem_free(sc, sc->sc_sense);
999 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1000
1001 return (0);
1002 }
1003
1004 static int
1005 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1006 {
1007 struct mfii_softc *sc = device_private(self);
1008 if (sc->sc_child != NULL)
1009 return 0;
1010
1011 sc->sc_child = config_found_sm_loc(self, ifattr, locators,
1012 &sc->sc_chan, scsiprint, NULL);
1013 return 0;
1014 }
1015
1016 static void
1017 mfii_childdetached(device_t self, device_t child)
1018 {
1019 struct mfii_softc *sc = device_private(self);
1020
1021 KASSERT(self == sc->sc_dev);
1022 KASSERT(child == sc->sc_child);
1023
1024 if (child == sc->sc_child)
1025 sc->sc_child = NULL;
1026 }
1027
1028 static bool
1029 mfii_suspend(device_t dev, const pmf_qual_t *q)
1030 {
1031 /* XXX to be implemented */
1032 return false;
1033 }
1034
1035 static bool
1036 mfii_resume(device_t dev, const pmf_qual_t *q)
1037 {
1038 /* XXX to be implemented */
1039 return false;
1040 }
1041
1042 static bool
1043 mfii_shutdown(device_t dev, int how)
1044 {
1045 struct mfii_softc *sc = device_private(dev);
1046 struct mfii_ccb *ccb;
1047 union mfi_mbox mbox;
1048 bool rv = true;
1049
1050 memset(&mbox, 0, sizeof(mbox));
1051
1052 mutex_enter(&sc->sc_lock);
1053 DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1054 ccb = mfii_get_ccb(sc);
1055 if (ccb == NULL)
1056 return false;
1057 mutex_enter(&sc->sc_ccb_mtx);
1058 if (sc->sc_running) {
1059 sc->sc_running = 0; /* prevent new commands */
1060 mutex_exit(&sc->sc_ccb_mtx);
1061 #if 0 /* XXX why does this hang ? */
1062 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1063 mfii_scrub_ccb(ccb);
1064 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1065 NULL, 0, MFII_DATA_NONE, true)) {
1066 aprint_error_dev(dev,
1067 "shutdown: cache flush failed\n");
1068 rv = false;
1069 goto fail;
1070 }
1071 printf("ok1\n");
1072 #endif
1073 mbox.b[0] = 0;
1074 mfii_scrub_ccb(ccb);
1075 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1076 NULL, 0, MFII_DATA_NONE, true)) {
1077 aprint_error_dev(dev, "shutdown: "
1078 "firmware shutdown failed\n");
1079 rv = false;
1080 goto fail;
1081 }
1082 } else {
1083 mutex_exit(&sc->sc_ccb_mtx);
1084 }
1085 fail:
1086 mfii_put_ccb(sc, ccb);
1087 mutex_exit(&sc->sc_lock);
1088 return rv;
1089 }
1090
1091 static u_int32_t
1092 mfii_read(struct mfii_softc *sc, bus_size_t r)
1093 {
1094 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1095 BUS_SPACE_BARRIER_READ);
1096 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1097 }
1098
1099 static void
1100 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1101 {
1102 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1103 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1104 BUS_SPACE_BARRIER_WRITE);
1105 }
1106
1107 static struct mfii_dmamem *
1108 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1109 {
1110 struct mfii_dmamem *m;
1111 int nsegs;
1112
1113 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1114 if (m == NULL)
1115 return (NULL);
1116
1117 m->mdm_size = size;
1118
1119 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1120 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1121 goto mdmfree;
1122
1123 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1124 &nsegs, BUS_DMA_NOWAIT) != 0)
1125 goto destroy;
1126
1127 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1128 BUS_DMA_NOWAIT) != 0)
1129 goto free;
1130
1131 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1132 BUS_DMA_NOWAIT) != 0)
1133 goto unmap;
1134
1135 memset(m->mdm_kva, 0, size);
1136 return (m);
1137
1138 unmap:
1139 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1140 free:
1141 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1142 destroy:
1143 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1144 mdmfree:
1145 free(m, M_DEVBUF);
1146
1147 return (NULL);
1148 }
1149
1150 static void
1151 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1152 {
1153 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1154 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1155 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1156 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1157 free(m, M_DEVBUF);
1158 }
1159
1160 static void
1161 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1162 {
1163 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1164 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1165 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1166
1167 io->function = MFII_FUNCTION_PASSTHRU_IO;
1168 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1169 io->chain_offset = io->sgl_offset0 / 4;
1170
1171 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1172 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1173 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1174
1175 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1176 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1177
1178 mfii_start(sc, ccb);
1179 }
1180
1181 static int
1182 mfii_aen_register(struct mfii_softc *sc)
1183 {
1184 struct mfi_evt_log_info mel;
1185 struct mfii_ccb *ccb;
1186 struct mfii_dmamem *mdm;
1187 int rv;
1188
1189 ccb = mfii_get_ccb(sc);
1190 if (ccb == NULL) {
1191 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1192 return (ENOMEM);
1193 }
1194
1195 memset(&mel, 0, sizeof(mel));
1196 mfii_scrub_ccb(ccb);
1197
1198 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1199 &mel, sizeof(mel), MFII_DATA_IN, true);
1200 if (rv != 0) {
1201 mfii_put_ccb(sc, ccb);
1202 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1203 return (EIO);
1204 }
1205
1206 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1207 if (mdm == NULL) {
1208 mfii_put_ccb(sc, ccb);
1209 aprint_error_dev(sc->sc_dev,
1210 "unable to allocate event data\n");
1211 return (ENOMEM);
1212 }
1213
1214 /* replay all the events from boot */
1215 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1216
1217 return (0);
1218 }
1219
1220 static void
1221 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1222 struct mfii_dmamem *mdm, uint32_t seq)
1223 {
1224 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1225 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1226 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1227 union mfi_evt_class_locale mec;
1228
1229 mfii_scrub_ccb(ccb);
1230 mfii_dcmd_scrub(ccb);
1231 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1232
1233 ccb->ccb_cookie = mdm;
1234 ccb->ccb_done = mfii_aen_done;
1235 sc->sc_aen_ccb = ccb;
1236
1237 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1238 mec.mec_members.reserved = 0;
1239 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1240
1241 hdr->mfh_cmd = MFI_CMD_DCMD;
1242 hdr->mfh_sg_count = 1;
1243 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1244 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1245 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1246 dcmd->mdf_mbox.w[0] = htole32(seq);
1247 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1248 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1249 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1250
1251 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1252 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1253
1254 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1255 mfii_dcmd_start(sc, ccb);
1256 }
1257
1258 static void
1259 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1260 {
1261 KASSERT(sc->sc_aen_ccb == ccb);
1262
1263 /*
1264 * defer to a thread with KERNEL_LOCK so we can run autoconf
1265 * We shouldn't have more than one AEN command pending at a time,
1266 * so no need to lock
1267 */
1268 if (sc->sc_running)
1269 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1270 }
1271
1272 static void
1273 mfii_aen(struct work *wk, void *arg)
1274 {
1275 struct mfii_softc *sc = arg;
1276 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1277 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1278 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1279
1280 mfii_dcmd_sync(sc, ccb,
1281 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1282 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1283 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1284
1285 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1286 le32toh(med->med_seq_num), le32toh(med->med_code),
1287 med->med_arg_type, med->med_description);
1288
1289 switch (le32toh(med->med_code)) {
1290 case MR_EVT_PD_INSERTED_EXT:
1291 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1292 break;
1293
1294 mfii_aen_pd_insert(sc, &med->args.pd_address);
1295 break;
1296 case MR_EVT_PD_REMOVED_EXT:
1297 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1298 break;
1299
1300 mfii_aen_pd_remove(sc, &med->args.pd_address);
1301 break;
1302
1303 case MR_EVT_PD_STATE_CHANGE:
1304 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1305 break;
1306
1307 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1308 break;
1309
1310 case MR_EVT_LD_CREATED:
1311 case MR_EVT_LD_DELETED:
1312 mfii_aen_ld_update(sc);
1313 break;
1314
1315 default:
1316 break;
1317 }
1318
1319 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1320 }
1321
1322 static void
1323 mfii_aen_pd_insert(struct mfii_softc *sc,
1324 const struct mfi_evtarg_pd_address *pd)
1325 {
1326 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1327 le16toh(pd->device_id), le16toh(pd->encl_id));
1328 }
1329
1330 static void
1331 mfii_aen_pd_remove(struct mfii_softc *sc,
1332 const struct mfi_evtarg_pd_address *pd)
1333 {
1334 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1335 le16toh(pd->device_id), le16toh(pd->encl_id));
1336 }
1337
1338 static void
1339 mfii_aen_pd_state_change(struct mfii_softc *sc,
1340 const struct mfi_evtarg_pd_state *state)
1341 {
1342 return;
1343 }
1344
1345 static void
1346 mfii_aen_ld_update(struct mfii_softc *sc)
1347 {
1348 int i, target, old, nld;
1349 int newlds[MFI_MAX_LD];
1350
1351 mutex_enter(&sc->sc_lock);
1352 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1353 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1354 mutex_exit(&sc->sc_lock);
1355 DNPRINTF(MFII_D_MISC,
1356 "%s: getting list of logical disks failed\n", DEVNAME(sc));
1357 return;
1358 }
1359 mutex_exit(&sc->sc_lock);
1360
1361 memset(newlds, -1, sizeof(newlds));
1362
1363 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1364 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1365 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1366 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1367 newlds[target] = i;
1368 }
1369
1370 for (i = 0; i < MFI_MAX_LD; i++) {
1371 old = sc->sc_target_lds[i];
1372 nld = newlds[i];
1373
1374 if (old == -1 && nld != -1) {
1375 printf("%s: logical drive %d added (target %d)\n",
1376 DEVNAME(sc), i, nld);
1377
1378 // XXX scsi_probe_target(sc->sc_scsibus, i);
1379
1380 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1381 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1382 } else if (nld == -1 && old != -1) {
1383 printf("%s: logical drive %d removed (target %d)\n",
1384 DEVNAME(sc), i, old);
1385
1386 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1387 sysmon_envsys_sensor_detach(sc->sc_sme,
1388 &sc->sc_sensors[i]);
1389 }
1390 }
1391
1392 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1393 }
1394
1395 static void
1396 mfii_aen_unregister(struct mfii_softc *sc)
1397 {
1398 /* XXX */
1399 }
1400
1401 static int
1402 mfii_transition_firmware(struct mfii_softc *sc)
1403 {
1404 int32_t fw_state, cur_state;
1405 int max_wait, i;
1406
1407 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1408
1409 while (fw_state != MFI_STATE_READY) {
1410 cur_state = fw_state;
1411 switch (fw_state) {
1412 case MFI_STATE_FAULT:
1413 printf("%s: firmware fault\n", DEVNAME(sc));
1414 return (1);
1415 case MFI_STATE_WAIT_HANDSHAKE:
1416 mfii_write(sc, MFI_SKINNY_IDB,
1417 MFI_INIT_CLEAR_HANDSHAKE);
1418 max_wait = 2;
1419 break;
1420 case MFI_STATE_OPERATIONAL:
1421 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1422 max_wait = 10;
1423 break;
1424 case MFI_STATE_UNDEFINED:
1425 case MFI_STATE_BB_INIT:
1426 max_wait = 2;
1427 break;
1428 case MFI_STATE_FW_INIT:
1429 case MFI_STATE_DEVICE_SCAN:
1430 case MFI_STATE_FLUSH_CACHE:
1431 max_wait = 20;
1432 break;
1433 default:
1434 printf("%s: unknown firmware state %d\n",
1435 DEVNAME(sc), fw_state);
1436 return (1);
1437 }
1438 for (i = 0; i < (max_wait * 10); i++) {
1439 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1440 if (fw_state == cur_state)
1441 DELAY(100000);
1442 else
1443 break;
1444 }
1445 if (fw_state == cur_state) {
1446 printf("%s: firmware stuck in state %#x\n",
1447 DEVNAME(sc), fw_state);
1448 return (1);
1449 }
1450 }
1451
1452 return (0);
1453 }
1454
1455 static int
1456 mfii_get_info(struct mfii_softc *sc)
1457 {
1458 int i, rv;
1459
1460 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1461 sizeof(sc->sc_info), MFII_DATA_IN, true);
1462
1463 if (rv != 0)
1464 return (rv);
1465
1466 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1467 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1468 DEVNAME(sc),
1469 sc->sc_info.mci_image_component[i].mic_name,
1470 sc->sc_info.mci_image_component[i].mic_version,
1471 sc->sc_info.mci_image_component[i].mic_build_date,
1472 sc->sc_info.mci_image_component[i].mic_build_time);
1473 }
1474
1475 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1476 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1477 DEVNAME(sc),
1478 sc->sc_info.mci_pending_image_component[i].mic_name,
1479 sc->sc_info.mci_pending_image_component[i].mic_version,
1480 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1481 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1482 }
1483
1484 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1485 DEVNAME(sc),
1486 sc->sc_info.mci_max_arms,
1487 sc->sc_info.mci_max_spans,
1488 sc->sc_info.mci_max_arrays,
1489 sc->sc_info.mci_max_lds,
1490 sc->sc_info.mci_product_name);
1491
1492 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1493 DEVNAME(sc),
1494 sc->sc_info.mci_serial_number,
1495 sc->sc_info.mci_hw_present,
1496 sc->sc_info.mci_current_fw_time,
1497 sc->sc_info.mci_max_cmds,
1498 sc->sc_info.mci_max_sg_elements);
1499
1500 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1501 DEVNAME(sc),
1502 sc->sc_info.mci_max_request_size,
1503 sc->sc_info.mci_lds_present,
1504 sc->sc_info.mci_lds_degraded,
1505 sc->sc_info.mci_lds_offline,
1506 sc->sc_info.mci_pd_present);
1507
1508 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1509 DEVNAME(sc),
1510 sc->sc_info.mci_pd_disks_present,
1511 sc->sc_info.mci_pd_disks_pred_failure,
1512 sc->sc_info.mci_pd_disks_failed);
1513
1514 DPRINTF("%s: nvram %d mem %d flash %d\n",
1515 DEVNAME(sc),
1516 sc->sc_info.mci_nvram_size,
1517 sc->sc_info.mci_memory_size,
1518 sc->sc_info.mci_flash_size);
1519
1520 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1521 DEVNAME(sc),
1522 sc->sc_info.mci_ram_correctable_errors,
1523 sc->sc_info.mci_ram_uncorrectable_errors,
1524 sc->sc_info.mci_cluster_allowed,
1525 sc->sc_info.mci_cluster_active);
1526
1527 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1528 DEVNAME(sc),
1529 sc->sc_info.mci_max_strips_per_io,
1530 sc->sc_info.mci_raid_levels,
1531 sc->sc_info.mci_adapter_ops,
1532 sc->sc_info.mci_ld_ops);
1533
1534 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1535 DEVNAME(sc),
1536 sc->sc_info.mci_stripe_sz_ops.min,
1537 sc->sc_info.mci_stripe_sz_ops.max,
1538 sc->sc_info.mci_pd_ops,
1539 sc->sc_info.mci_pd_mix_support);
1540
1541 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1542 DEVNAME(sc),
1543 sc->sc_info.mci_ecc_bucket_count,
1544 sc->sc_info.mci_package_version);
1545
1546 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1547 DEVNAME(sc),
1548 sc->sc_info.mci_properties.mcp_seq_num,
1549 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1550 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1551 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1552
1553 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1554 DEVNAME(sc),
1555 sc->sc_info.mci_properties.mcp_rebuild_rate,
1556 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1557 sc->sc_info.mci_properties.mcp_bgi_rate,
1558 sc->sc_info.mci_properties.mcp_cc_rate);
1559
1560 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1561 DEVNAME(sc),
1562 sc->sc_info.mci_properties.mcp_recon_rate,
1563 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1564 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1565 sc->sc_info.mci_properties.mcp_spinup_delay,
1566 sc->sc_info.mci_properties.mcp_cluster_enable);
1567
1568 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1569 DEVNAME(sc),
1570 sc->sc_info.mci_properties.mcp_coercion_mode,
1571 sc->sc_info.mci_properties.mcp_alarm_enable,
1572 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1573 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1574 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1575
1576 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1577 DEVNAME(sc),
1578 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1579 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1580 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1581
1582 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1583 DEVNAME(sc),
1584 sc->sc_info.mci_pci.mip_vendor,
1585 sc->sc_info.mci_pci.mip_device,
1586 sc->sc_info.mci_pci.mip_subvendor,
1587 sc->sc_info.mci_pci.mip_subdevice);
1588
1589 DPRINTF("%s: type %#x port_count %d port_addr ",
1590 DEVNAME(sc),
1591 sc->sc_info.mci_host.mih_type,
1592 sc->sc_info.mci_host.mih_port_count);
1593
1594 for (i = 0; i < 8; i++)
1595 DPRINTF("%.0" PRIx64 " ",
1596 sc->sc_info.mci_host.mih_port_addr[i]);
1597 DPRINTF("\n");
1598
1599 DPRINTF("%s: type %.x port_count %d port_addr ",
1600 DEVNAME(sc),
1601 sc->sc_info.mci_device.mid_type,
1602 sc->sc_info.mci_device.mid_port_count);
1603
1604 for (i = 0; i < 8; i++)
1605 DPRINTF("%.0" PRIx64 " ",
1606 sc->sc_info.mci_device.mid_port_addr[i]);
1607 DPRINTF("\n");
1608
1609 return (0);
1610 }
1611
1612 static int
1613 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1614 {
1615 struct mfi_frame_header *hdr = ccb->ccb_request;
1616 u_int64_t r;
1617 int to = 0, rv = 0;
1618
1619 #ifdef DIAGNOSTIC
1620 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1621 panic("mfii_mfa_poll called with cookie or done set");
1622 #endif
1623
1624 hdr->mfh_context = ccb->ccb_smid;
1625 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1626 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1627
1628 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1629 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1630
1631 mfii_start(sc, ccb);
1632
1633 for (;;) {
1634 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1635 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1636 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1637
1638 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1639 break;
1640
1641 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1642 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1643 ccb->ccb_smid);
1644 ccb->ccb_flags |= MFI_CCB_F_ERR;
1645 rv = 1;
1646 break;
1647 }
1648
1649 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1650 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1652
1653 delay(1000);
1654 }
1655
1656 if (ccb->ccb_len > 0) {
1657 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1658 0, ccb->ccb_dmamap32->dm_mapsize,
1659 (ccb->ccb_direction == MFII_DATA_IN) ?
1660 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1661
1662 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1663 }
1664
1665 return (rv);
1666 }
1667
1668 static int
1669 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1670 {
1671 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1672 void *cookie;
1673 int rv = 1;
1674
1675 done = ccb->ccb_done;
1676 cookie = ccb->ccb_cookie;
1677
1678 ccb->ccb_done = mfii_poll_done;
1679 ccb->ccb_cookie = &rv;
1680
1681 mfii_start(sc, ccb);
1682
1683 do {
1684 delay(10);
1685 mfii_postq(sc);
1686 } while (rv == 1);
1687
1688 ccb->ccb_cookie = cookie;
1689 done(sc, ccb);
1690
1691 return (0);
1692 }
1693
1694 static void
1695 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1696 {
1697 int *rv = ccb->ccb_cookie;
1698
1699 *rv = 0;
1700 }
1701
1702 static int
1703 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1704 {
1705 #ifdef DIAGNOSTIC
1706 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1707 panic("mfii_exec called with cookie or done set");
1708 #endif
1709
1710 ccb->ccb_cookie = ccb;
1711 ccb->ccb_done = mfii_exec_done;
1712
1713 mfii_start(sc, ccb);
1714
1715 mutex_enter(&ccb->ccb_mtx);
1716 while (ccb->ccb_cookie != NULL)
1717 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1718 mutex_exit(&ccb->ccb_mtx);
1719
1720 return (0);
1721 }
1722
1723 static void
1724 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1725 {
1726 mutex_enter(&ccb->ccb_mtx);
1727 ccb->ccb_cookie = NULL;
1728 cv_signal(&ccb->ccb_cv);
1729 mutex_exit(&ccb->ccb_mtx);
1730 }
1731
1732 static int
1733 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1734 void *buf, size_t len, mfii_direction_t dir, bool poll)
1735 {
1736 struct mfii_ccb *ccb;
1737 int rv;
1738
1739 KASSERT(mutex_owned(&sc->sc_lock));
1740 if (!sc->sc_running)
1741 return EAGAIN;
1742
1743 ccb = mfii_get_ccb(sc);
1744 if (ccb == NULL)
1745 return (ENOMEM);
1746
1747 mfii_scrub_ccb(ccb);
1748 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1749 mfii_put_ccb(sc, ccb);
1750
1751 return (rv);
1752 }
1753
1754 static int
1755 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1756 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1757 bool poll)
1758 {
1759 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1760 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1761 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1762 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1763 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1764 int rv = EIO;
1765
1766 if (cold)
1767 poll = true;
1768
1769 ccb->ccb_data = buf;
1770 ccb->ccb_len = len;
1771 ccb->ccb_direction = dir;
1772 switch (dir) {
1773 case MFII_DATA_IN:
1774 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1775 break;
1776 case MFII_DATA_OUT:
1777 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1778 break;
1779 case MFII_DATA_NONE:
1780 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1781 break;
1782 }
1783
1784 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1785 rv = ENOMEM;
1786 goto done;
1787 }
1788
1789 hdr->mfh_cmd = MFI_CMD_DCMD;
1790 hdr->mfh_context = ccb->ccb_smid;
1791 hdr->mfh_data_len = htole32(len);
1792 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1793 KASSERT(!ccb->ccb_dma64);
1794
1795 dcmd->mdf_opcode = opc;
1796 /* handle special opcodes */
1797 if (mbox != NULL)
1798 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1799
1800 io->function = MFII_FUNCTION_PASSTHRU_IO;
1801 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1802 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1803
1804 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1805 sge->sg_len = htole32(MFI_FRAME_SIZE);
1806 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1807
1808 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1809 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1810
1811 if (poll) {
1812 ccb->ccb_done = mfii_empty_done;
1813 mfii_poll(sc, ccb);
1814 } else
1815 mfii_exec(sc, ccb);
1816
1817 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1818 rv = 0;
1819 }
1820
1821 done:
1822 return (rv);
1823 }
1824
1825 static void
1826 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1827 {
1828 return;
1829 }
1830
1831 static int
1832 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1833 void *sglp, int nosleep)
1834 {
1835 union mfi_sgl *sgl = sglp;
1836 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1837 int error;
1838 int i;
1839
1840 KASSERT(!ccb->ccb_dma64);
1841 if (ccb->ccb_len == 0)
1842 return (0);
1843
1844 error = bus_dmamap_load(sc->sc_dmat, dmap,
1845 ccb->ccb_data, ccb->ccb_len, NULL,
1846 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1847 if (error) {
1848 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1849 return (1);
1850 }
1851
1852 for (i = 0; i < dmap->dm_nsegs; i++) {
1853 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1854 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1855 }
1856
1857 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1858 ccb->ccb_direction == MFII_DATA_OUT ?
1859 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1860
1861 return (0);
1862 }
1863
1864 static void
1865 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1866 {
1867 uint32_t *r = (uint32_t *)&ccb->ccb_req;
1868 #if defined(__LP64__)
1869 uint64_t buf;
1870 #endif
1871
1872 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1873 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1874 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875
1876 #if defined(__LP64__)
1877 buf = ((uint64_t)r[1] << 32) | r[0];
1878 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
1879 #else
1880 mutex_enter(&sc->sc_post_mtx);
1881 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1882 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1883 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1884 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1885 mutex_exit(&sc->sc_post_mtx);
1886 #endif
1887 }
1888
1889 static void
1890 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1891 {
1892 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1893 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1894 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1895
1896 if (ccb->ccb_sgl_len > 0) {
1897 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1898 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1899 BUS_DMASYNC_POSTWRITE);
1900 }
1901
1902 if (ccb->ccb_dma64) {
1903 KASSERT(ccb->ccb_len > 0);
1904 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1905 0, ccb->ccb_dmamap64->dm_mapsize,
1906 (ccb->ccb_direction == MFII_DATA_IN) ?
1907 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1908
1909 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1910 } else if (ccb->ccb_len > 0) {
1911 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1912 0, ccb->ccb_dmamap32->dm_mapsize,
1913 (ccb->ccb_direction == MFII_DATA_IN) ?
1914 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1915
1916 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1917 }
1918
1919 ccb->ccb_done(sc, ccb);
1920 }
1921
1922 static int
1923 mfii_initialise_firmware(struct mfii_softc *sc)
1924 {
1925 struct mpii_msg_iocinit_request *iiq;
1926 struct mfii_dmamem *m;
1927 struct mfii_ccb *ccb;
1928 struct mfi_init_frame *init;
1929 int rv;
1930
1931 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1932 if (m == NULL)
1933 return (1);
1934
1935 iiq = MFII_DMA_KVA(m);
1936 memset(iiq, 0, sizeof(*iiq));
1937
1938 iiq->function = MPII_FUNCTION_IOC_INIT;
1939 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1940
1941 iiq->msg_version_maj = 0x02;
1942 iiq->msg_version_min = 0x00;
1943 iiq->hdr_version_unit = 0x10;
1944 iiq->hdr_version_dev = 0x0;
1945
1946 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1947
1948 iiq->reply_descriptor_post_queue_depth =
1949 htole16(sc->sc_reply_postq_depth);
1950 iiq->reply_free_queue_depth = htole16(0);
1951
1952 iiq->sense_buffer_address_high = htole32(
1953 MFII_DMA_DVA(sc->sc_sense) >> 32);
1954
1955 iiq->reply_descriptor_post_queue_address_lo =
1956 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1957 iiq->reply_descriptor_post_queue_address_hi =
1958 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1959
1960 iiq->system_request_frame_base_address_lo =
1961 htole32(MFII_DMA_DVA(sc->sc_requests));
1962 iiq->system_request_frame_base_address_hi =
1963 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1964
1965 iiq->timestamp = htole64(time_uptime);
1966
1967 ccb = mfii_get_ccb(sc);
1968 if (ccb == NULL) {
1969 /* shouldn't ever run out of ccbs during attach */
1970 return (1);
1971 }
1972 mfii_scrub_ccb(ccb);
1973 init = ccb->ccb_request;
1974
1975 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1976 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1977 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1978 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1979
1980 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1981 0, MFII_DMA_LEN(sc->sc_reply_postq),
1982 BUS_DMASYNC_PREREAD);
1983
1984 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1985 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1986
1987 rv = mfii_mfa_poll(sc, ccb);
1988
1989 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1990 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1991
1992 mfii_put_ccb(sc, ccb);
1993 mfii_dmamem_free(sc, m);
1994
1995 return (rv);
1996 }
1997
1998 static int
1999 mfii_my_intr(struct mfii_softc *sc)
2000 {
2001 u_int32_t status;
2002
2003 status = mfii_read(sc, MFI_OSTS);
2004
2005 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2006 if (ISSET(status, 0x1)) {
2007 mfii_write(sc, MFI_OSTS, status);
2008 return (1);
2009 }
2010
2011 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2012 }
2013
2014 static int
2015 mfii_intr(void *arg)
2016 {
2017 struct mfii_softc *sc = arg;
2018
2019 if (!mfii_my_intr(sc))
2020 return (0);
2021
2022 mfii_postq(sc);
2023
2024 return (1);
2025 }
2026
2027 static void
2028 mfii_postq(struct mfii_softc *sc)
2029 {
2030 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2031 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2032 struct mpii_reply_descr *rdp;
2033 struct mfii_ccb *ccb;
2034 int rpi = 0;
2035
2036 mutex_enter(&sc->sc_reply_postq_mtx);
2037
2038 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2039 0, MFII_DMA_LEN(sc->sc_reply_postq),
2040 BUS_DMASYNC_POSTREAD);
2041
2042 for (;;) {
2043 rdp = &postq[sc->sc_reply_postq_index];
2044 DNPRINTF(MFII_D_INTR,
2045 "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2046 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2047 rdp->data == 0xffffffff);
2048 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2049 MPII_REPLY_DESCR_UNUSED)
2050 break;
2051 if (rdp->data == 0xffffffff) {
2052 /*
2053 * ioc is still writing to the reply post queue
2054 * race condition - bail!
2055 */
2056 break;
2057 }
2058
2059 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2060 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2061 memset(rdp, 0xff, sizeof(*rdp));
2062
2063 sc->sc_reply_postq_index++;
2064 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2065 rpi = 1;
2066 }
2067
2068 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2069 0, MFII_DMA_LEN(sc->sc_reply_postq),
2070 BUS_DMASYNC_PREREAD);
2071
2072 if (rpi)
2073 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2074
2075 mutex_exit(&sc->sc_reply_postq_mtx);
2076
2077 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2078 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2079 mfii_done(sc, ccb);
2080 }
2081 }
2082
2083 static void
2084 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2085 void *arg)
2086 {
2087 struct scsipi_periph *periph;
2088 struct scsipi_xfer *xs;
2089 struct scsipi_adapter *adapt = chan->chan_adapter;
2090 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2091 struct mfii_ccb *ccb;
2092 int timeout;
2093 int target;
2094
2095 switch (req) {
2096 case ADAPTER_REQ_GROW_RESOURCES:
2097 /* Not supported. */
2098 return;
2099 case ADAPTER_REQ_SET_XFER_MODE:
2100 {
2101 struct scsipi_xfer_mode *xm = arg;
2102 xm->xm_mode = PERIPH_CAP_TQING;
2103 xm->xm_period = 0;
2104 xm->xm_offset = 0;
2105 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2106 return;
2107 }
2108 case ADAPTER_REQ_RUN_XFER:
2109 break;
2110 }
2111
2112 xs = arg;
2113 periph = xs->xs_periph;
2114 target = periph->periph_target;
2115
2116 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2117 periph->periph_lun != 0) {
2118 xs->error = XS_SELTIMEOUT;
2119 scsipi_done(xs);
2120 return;
2121 }
2122
2123 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2124 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2125 /* the cache is stable storage, don't flush */
2126 xs->error = XS_NOERROR;
2127 xs->status = SCSI_OK;
2128 xs->resid = 0;
2129 scsipi_done(xs);
2130 return;
2131 }
2132
2133 ccb = mfii_get_ccb(sc);
2134 if (ccb == NULL) {
2135 xs->error = XS_RESOURCE_SHORTAGE;
2136 scsipi_done(xs);
2137 return;
2138 }
2139 mfii_scrub_ccb(ccb);
2140 ccb->ccb_cookie = xs;
2141 ccb->ccb_done = mfii_scsi_cmd_done;
2142 ccb->ccb_data = xs->data;
2143 ccb->ccb_len = xs->datalen;
2144
2145 timeout = mstohz(xs->timeout);
2146 if (timeout == 0)
2147 timeout = 1;
2148 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2149
2150 switch (xs->cmd->opcode) {
2151 case SCSI_READ_6_COMMAND:
2152 case READ_10:
2153 case READ_12:
2154 case READ_16:
2155 case SCSI_WRITE_6_COMMAND:
2156 case WRITE_10:
2157 case WRITE_12:
2158 case WRITE_16:
2159 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2160 goto stuffup;
2161 break;
2162
2163 default:
2164 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2165 goto stuffup;
2166 break;
2167 }
2168
2169 xs->error = XS_NOERROR;
2170 xs->resid = 0;
2171
2172 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2173 xs->cmd->opcode);
2174
2175 if (xs->xs_control & XS_CTL_POLL) {
2176 if (mfii_poll(sc, ccb) != 0)
2177 goto stuffup;
2178 return;
2179 }
2180
2181 mfii_start(sc, ccb);
2182
2183 return;
2184
2185 stuffup:
2186 xs->error = XS_DRIVER_STUFFUP;
2187 scsipi_done(xs);
2188 mfii_put_ccb(sc, ccb);
2189 }
2190
2191 static void
2192 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2193 {
2194 struct scsipi_xfer *xs = ccb->ccb_cookie;
2195 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2196 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2197
2198 if (callout_stop(&xs->xs_callout) != 0)
2199 return;
2200
2201 switch (ctx->status) {
2202 case MFI_STAT_OK:
2203 break;
2204
2205 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2206 xs->error = XS_SENSE;
2207 memset(&xs->sense, 0, sizeof(xs->sense));
2208 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2209 break;
2210
2211 case MFI_STAT_LD_OFFLINE:
2212 case MFI_STAT_DEVICE_NOT_FOUND:
2213 xs->error = XS_SELTIMEOUT;
2214 break;
2215
2216 default:
2217 xs->error = XS_DRIVER_STUFFUP;
2218 break;
2219 }
2220
2221 scsipi_done(xs);
2222 mfii_put_ccb(sc, ccb);
2223 }
2224
2225 static int
2226 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2227 struct scsipi_xfer *xs)
2228 {
2229 struct scsipi_periph *periph = xs->xs_periph;
2230 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2231 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2232 int segs;
2233
2234 io->dev_handle = htole16(periph->periph_target);
2235 io->function = MFII_FUNCTION_LDIO_REQUEST;
2236 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2237 io->sgl_flags = htole16(0x02); /* XXX */
2238 io->sense_buffer_length = sizeof(xs->sense);
2239 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2240 io->data_length = htole32(xs->datalen);
2241 io->io_flags = htole16(xs->cmdlen);
2242 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2243 case XS_CTL_DATA_IN:
2244 ccb->ccb_direction = MFII_DATA_IN;
2245 io->direction = MPII_SCSIIO_DIR_READ;
2246 break;
2247 case XS_CTL_DATA_OUT:
2248 ccb->ccb_direction = MFII_DATA_OUT;
2249 io->direction = MPII_SCSIIO_DIR_WRITE;
2250 break;
2251 default:
2252 ccb->ccb_direction = MFII_DATA_NONE;
2253 io->direction = MPII_SCSIIO_DIR_NONE;
2254 break;
2255 }
2256 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2257
2258 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2259 ctx->timeout_value = htole16(0x14); /* XXX */
2260 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2261 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2262
2263 if (mfii_load_ccb(sc, ccb, ctx + 1,
2264 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2265 return (1);
2266
2267 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2268 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2269 switch (sc->sc_iop->num_sge_loc) {
2270 case MFII_IOP_NUM_SGE_LOC_ORIG:
2271 ctx->num_sge = segs;
2272 break;
2273 case MFII_IOP_NUM_SGE_LOC_35:
2274 /* 12 bit field, but we're only using the lower 8 */
2275 ctx->span_arm = segs;
2276 break;
2277 }
2278
2279 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2280 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2281
2282 return (0);
2283 }
2284
2285 static int
2286 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2287 struct scsipi_xfer *xs)
2288 {
2289 struct scsipi_periph *periph = xs->xs_periph;
2290 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2291 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2292
2293 io->dev_handle = htole16(periph->periph_target);
2294 io->function = MFII_FUNCTION_LDIO_REQUEST;
2295 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2296 io->sgl_flags = htole16(0x02); /* XXX */
2297 io->sense_buffer_length = sizeof(xs->sense);
2298 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2299 io->data_length = htole32(xs->datalen);
2300 io->io_flags = htole16(xs->cmdlen);
2301 io->lun[0] = htobe16(periph->periph_lun);
2302 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2303 case XS_CTL_DATA_IN:
2304 ccb->ccb_direction = MFII_DATA_IN;
2305 io->direction = MPII_SCSIIO_DIR_READ;
2306 break;
2307 case XS_CTL_DATA_OUT:
2308 ccb->ccb_direction = MFII_DATA_OUT;
2309 io->direction = MPII_SCSIIO_DIR_WRITE;
2310 break;
2311 default:
2312 ccb->ccb_direction = MFII_DATA_NONE;
2313 io->direction = MPII_SCSIIO_DIR_NONE;
2314 break;
2315 }
2316 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2317
2318 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2319
2320 if (mfii_load_ccb(sc, ccb, ctx + 1,
2321 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2322 return (1);
2323
2324 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2325 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2326
2327 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2328 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2329
2330 return (0);
2331 }
2332
2333 #if 0
2334 void
2335 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2336 {
2337 struct scsi_link *link = xs->sc_link;
2338 struct mfii_softc *sc = link->adapter_softc;
2339 struct mfii_ccb *ccb = xs->io;
2340
2341 mfii_scrub_ccb(ccb);
2342 ccb->ccb_cookie = xs;
2343 ccb->ccb_done = mfii_scsi_cmd_done;
2344 ccb->ccb_data = xs->data;
2345 ccb->ccb_len = xs->datalen;
2346
2347 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2348
2349 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2350 if (xs->error != XS_NOERROR)
2351 goto done;
2352
2353 xs->resid = 0;
2354
2355 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2356 if (mfii_poll(sc, ccb) != 0)
2357 goto stuffup;
2358 return;
2359 }
2360
2361 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2362 mfii_start(sc, ccb);
2363
2364 return;
2365
2366 stuffup:
2367 xs->error = XS_DRIVER_STUFFUP;
2368 done:
2369 scsi_done(xs);
2370 }
2371
2372 int
2373 mfii_pd_scsi_probe(struct scsi_link *link)
2374 {
2375 struct mfii_softc *sc = link->adapter_softc;
2376 struct mfi_pd_details mpd;
2377 union mfi_mbox mbox;
2378 int rv;
2379
2380 if (link->lun > 0)
2381 return (0);
2382
2383 memset(&mbox, 0, sizeof(mbox));
2384 mbox.s[0] = htole16(link->target);
2385
2386 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2387 MFII_DATA_IN, true);
2388 if (rv != 0)
2389 return (EIO);
2390
2391 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2392 return (ENXIO);
2393
2394 return (0);
2395 }
2396
2397 int
2398 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2399 struct scsipi_xfer *xs)
2400 {
2401 struct scsi_link *link = xs->sc_link;
2402 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2403 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2404 uint16_t dev_handle;
2405
2406 dev_handle = mfii_dev_handle(sc, link->target);
2407 if (dev_handle == htole16(0xffff))
2408 return (XS_SELTIMEOUT);
2409
2410 io->dev_handle = dev_handle;
2411 io->function = 0;
2412 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2413 io->sgl_flags = htole16(0x02); /* XXX */
2414 io->sense_buffer_length = sizeof(xs->sense);
2415 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2416 io->data_length = htole32(xs->datalen);
2417 io->io_flags = htole16(xs->cmdlen);
2418 io->lun[0] = htobe16(link->lun);
2419 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2420 case XS_CTL_DATA_IN:
2421 ccb->ccb_direction = MFII_DATA_IN;
2422 io->direction = MPII_SCSIIO_DIR_READ;
2423 break;
2424 case XS_CTL_DATA_OUT:
2425 ccb->ccb_direction = MFII_DATA_OUT;
2426 io->direction = MPII_SCSIIO_DIR_WRITE;
2427 break;
2428 default:
2429 ccb->ccb_direction = MFII_DATA_NONE;
2430 io->direction = MPII_SCSIIO_DIR_NONE;
2431 break;
2432 }
2433 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2434
2435 ctx->virtual_disk_target_id = htole16(link->target);
2436 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2437 ctx->timeout_value = sc->sc_pd->pd_timeout;
2438
2439 if (mfii_load_ccb(sc, ccb, ctx + 1,
2440 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2441 return (XS_DRIVER_STUFFUP);
2442
2443 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2444 KASSERT(ccb->ccb_dma64);
2445
2446 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2447 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2448 ccb->ccb_req.dev_handle = dev_handle;
2449
2450 return (XS_NOERROR);
2451 }
2452 #endif
2453
2454 static int
2455 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2456 int nosleep)
2457 {
2458 struct mpii_msg_request *req = ccb->ccb_request;
2459 struct mfii_sge *sge = NULL, *nsge = sglp;
2460 struct mfii_sge *ce = NULL;
2461 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2462 u_int space;
2463 int i;
2464
2465 int error;
2466
2467 if (ccb->ccb_len == 0)
2468 return (0);
2469
2470 ccb->ccb_dma64 = true;
2471 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2472 ccb->ccb_data, ccb->ccb_len, NULL,
2473 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2474 if (error) {
2475 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2476 return (1);
2477 }
2478
2479 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2480 sizeof(*nsge);
2481 if (dmap->dm_nsegs > space) {
2482 space--;
2483
2484 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2485 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2486
2487 ce = nsge + space;
2488 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2489 ce->sg_len = htole32(ccb->ccb_sgl_len);
2490 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2491
2492 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2493 }
2494
2495 for (i = 0; i < dmap->dm_nsegs; i++) {
2496 if (nsge == ce)
2497 nsge = ccb->ccb_sgl;
2498
2499 sge = nsge;
2500
2501 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2502 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2503 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2504
2505 nsge = sge + 1;
2506 }
2507 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2508
2509 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2510 ccb->ccb_direction == MFII_DATA_OUT ?
2511 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2512
2513 if (ccb->ccb_sgl_len > 0) {
2514 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2515 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2516 BUS_DMASYNC_PREWRITE);
2517 }
2518
2519 return (0);
2520 }
2521
2522 static void
2523 mfii_scsi_cmd_tmo(void *p)
2524 {
2525 struct mfii_ccb *ccb = p;
2526 struct mfii_softc *sc = ccb->ccb_sc;
2527 bool start_abort;
2528
2529 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2530
2531 mutex_enter(&sc->sc_abort_mtx);
2532 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2533 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2534 if (start_abort)
2535 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2536 mutex_exit(&sc->sc_abort_mtx);
2537 }
2538
2539 static void
2540 mfii_abort_task(struct work *wk, void *scp)
2541 {
2542 struct mfii_softc *sc = scp;
2543 struct mfii_ccb *list;
2544
2545 mutex_enter(&sc->sc_abort_mtx);
2546 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2547 SIMPLEQ_INIT(&sc->sc_abort_list);
2548 mutex_exit(&sc->sc_abort_mtx);
2549
2550 while (list != NULL) {
2551 struct mfii_ccb *ccb = list;
2552 struct scsipi_xfer *xs = ccb->ccb_cookie;
2553 struct scsipi_periph *periph = xs->xs_periph;
2554 struct mfii_ccb *accb;
2555
2556 list = SIMPLEQ_NEXT(ccb, ccb_link);
2557
2558 if (!sc->sc_ld[periph->periph_target].ld_present) {
2559 /* device is gone */
2560 xs->error = XS_SELTIMEOUT;
2561 scsipi_done(xs);
2562 mfii_put_ccb(sc, ccb);
2563 continue;
2564 }
2565
2566 accb = mfii_get_ccb(sc);
2567 mfii_scrub_ccb(accb);
2568 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2569 MPII_SCSI_TASK_ABORT_TASK,
2570 htole32(MFII_TASK_MGMT_FLAGS_PD));
2571
2572 accb->ccb_cookie = ccb;
2573 accb->ccb_done = mfii_scsi_cmd_abort_done;
2574
2575 mfii_start(sc, accb);
2576 }
2577 }
2578
2579 static void
2580 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2581 uint16_t smid, uint8_t type, uint32_t flags)
2582 {
2583 struct mfii_task_mgmt *msg;
2584 struct mpii_msg_scsi_task_request *req;
2585
2586 msg = accb->ccb_request;
2587 req = &msg->mpii_request;
2588 req->dev_handle = dev_handle;
2589 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2590 req->task_type = type;
2591 req->task_mid = htole16( smid);
2592 msg->flags = flags;
2593
2594 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2595 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2596 }
2597
2598 static void
2599 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2600 {
2601 struct mfii_ccb *ccb = accb->ccb_cookie;
2602 struct scsipi_xfer *xs = ccb->ccb_cookie;
2603
2604 /* XXX check accb completion? */
2605
2606 mfii_put_ccb(sc, accb);
2607 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2608
2609 xs->error = XS_TIMEOUT;
2610 scsipi_done(xs);
2611 mfii_put_ccb(sc, ccb);
2612 }
2613
2614 static struct mfii_ccb *
2615 mfii_get_ccb(struct mfii_softc *sc)
2616 {
2617 struct mfii_ccb *ccb;
2618
2619 mutex_enter(&sc->sc_ccb_mtx);
2620 if (!sc->sc_running) {
2621 ccb = NULL;
2622 } else {
2623 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2624 if (ccb != NULL)
2625 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2626 }
2627 mutex_exit(&sc->sc_ccb_mtx);
2628 return (ccb);
2629 }
2630
2631 static void
2632 mfii_scrub_ccb(struct mfii_ccb *ccb)
2633 {
2634 ccb->ccb_cookie = NULL;
2635 ccb->ccb_done = NULL;
2636 ccb->ccb_flags = 0;
2637 ccb->ccb_data = NULL;
2638 ccb->ccb_direction = MFII_DATA_NONE;
2639 ccb->ccb_dma64 = false;
2640 ccb->ccb_len = 0;
2641 ccb->ccb_sgl_len = 0;
2642 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2643 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2644 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2645 }
2646
2647 static void
2648 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2649 {
2650 mutex_enter(&sc->sc_ccb_mtx);
2651 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2652 mutex_exit(&sc->sc_ccb_mtx);
2653 }
2654
2655 static int
2656 mfii_init_ccb(struct mfii_softc *sc)
2657 {
2658 struct mfii_ccb *ccb;
2659 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2660 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2661 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2662 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2663 u_int i;
2664 int error;
2665
2666 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2667 M_DEVBUF, M_WAITOK|M_ZERO);
2668
2669 for (i = 0; i < sc->sc_max_cmds; i++) {
2670 ccb = &sc->sc_ccb[i];
2671 ccb->ccb_sc = sc;
2672
2673 /* create a dma map for transfer */
2674 error = bus_dmamap_create(sc->sc_dmat,
2675 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2676 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2677 if (error) {
2678 printf("%s: cannot create ccb dmamap32 (%d)\n",
2679 DEVNAME(sc), error);
2680 goto destroy;
2681 }
2682 error = bus_dmamap_create(sc->sc_dmat64,
2683 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2684 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2685 if (error) {
2686 printf("%s: cannot create ccb dmamap64 (%d)\n",
2687 DEVNAME(sc), error);
2688 goto destroy32;
2689 }
2690
2691 /* select i + 1'th request. 0 is reserved for events */
2692 ccb->ccb_smid = i + 1;
2693 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2694 ccb->ccb_request = request + ccb->ccb_request_offset;
2695 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2696 ccb->ccb_request_offset;
2697
2698 /* select i'th MFI command frame */
2699 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2700 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2701 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2702 ccb->ccb_mfi_offset;
2703
2704 /* select i'th sense */
2705 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2706 ccb->ccb_sense = (struct mfi_sense *)(sense +
2707 ccb->ccb_sense_offset);
2708 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2709 ccb->ccb_sense_offset;
2710
2711 /* select i'th sgl */
2712 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2713 sc->sc_max_sgl * i;
2714 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2715 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2716 ccb->ccb_sgl_offset;
2717
2718 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2719 cv_init(&ccb->ccb_cv, "mfiiexec");
2720
2721 /* add ccb to queue */
2722 mfii_put_ccb(sc, ccb);
2723 }
2724
2725 return (0);
2726
2727 destroy32:
2728 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2729 destroy:
2730 /* free dma maps and ccb memory */
2731 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2732 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2733 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2734 }
2735
2736 free(sc->sc_ccb, M_DEVBUF);
2737
2738 return (1);
2739 }
2740
2741 #if NBIO > 0
2742 static int
2743 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2744 {
2745 struct mfii_softc *sc = device_private(dev);
2746 int error = 0;
2747
2748 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2749
2750 mutex_enter(&sc->sc_lock);
2751
2752 switch (cmd) {
2753 case BIOCINQ:
2754 DNPRINTF(MFII_D_IOCTL, "inq\n");
2755 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2756 break;
2757
2758 case BIOCVOL:
2759 DNPRINTF(MFII_D_IOCTL, "vol\n");
2760 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2761 break;
2762
2763 case BIOCDISK:
2764 DNPRINTF(MFII_D_IOCTL, "disk\n");
2765 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2766 break;
2767
2768 case BIOCALARM:
2769 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2770 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2771 break;
2772
2773 case BIOCBLINK:
2774 DNPRINTF(MFII_D_IOCTL, "blink\n");
2775 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2776 break;
2777
2778 case BIOCSETSTATE:
2779 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2780 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2781 break;
2782
2783 #if 0
2784 case BIOCPATROL:
2785 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2786 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2787 break;
2788 #endif
2789
2790 default:
2791 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2792 error = ENOTTY;
2793 }
2794
2795 mutex_exit(&sc->sc_lock);
2796
2797 return (error);
2798 }
2799
2800 static int
2801 mfii_bio_getitall(struct mfii_softc *sc)
2802 {
2803 int i, d, rv = EINVAL;
2804 size_t size;
2805 union mfi_mbox mbox;
2806 struct mfi_conf *cfg = NULL;
2807 struct mfi_ld_details *ld_det = NULL;
2808
2809 /* get info */
2810 if (mfii_get_info(sc)) {
2811 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2812 DEVNAME(sc));
2813 goto done;
2814 }
2815
2816 /* send single element command to retrieve size for full structure */
2817 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2818 if (cfg == NULL)
2819 goto done;
2820 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2821 MFII_DATA_IN, false)) {
2822 free(cfg, M_DEVBUF);
2823 goto done;
2824 }
2825
2826 size = cfg->mfc_size;
2827 free(cfg, M_DEVBUF);
2828
2829 /* memory for read config */
2830 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2831 if (cfg == NULL)
2832 goto done;
2833 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2834 MFII_DATA_IN, false)) {
2835 free(cfg, M_DEVBUF);
2836 goto done;
2837 }
2838
2839 /* replace current pointer with new one */
2840 if (sc->sc_cfg)
2841 free(sc->sc_cfg, M_DEVBUF);
2842 sc->sc_cfg = cfg;
2843
2844 /* get all ld info */
2845 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2846 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2847 goto done;
2848
2849 /* get memory for all ld structures */
2850 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2851 if (sc->sc_ld_sz != size) {
2852 if (sc->sc_ld_details)
2853 free(sc->sc_ld_details, M_DEVBUF);
2854
2855 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2856 if (ld_det == NULL)
2857 goto done;
2858 sc->sc_ld_sz = size;
2859 sc->sc_ld_details = ld_det;
2860 }
2861
2862 /* find used physical disks */
2863 size = sizeof(struct mfi_ld_details);
2864 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2865 memset(&mbox, 0, sizeof(mbox));
2866 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2867 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2868 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2869 goto done;
2870
2871 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2872 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2873 }
2874 sc->sc_no_pd = d;
2875
2876 rv = 0;
2877 done:
2878 return (rv);
2879 }
2880
2881 static int
2882 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2883 {
2884 int rv = EINVAL;
2885 struct mfi_conf *cfg = NULL;
2886
2887 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2888
2889 if (mfii_bio_getitall(sc)) {
2890 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2891 DEVNAME(sc));
2892 goto done;
2893 }
2894
2895 /* count unused disks as volumes */
2896 if (sc->sc_cfg == NULL)
2897 goto done;
2898 cfg = sc->sc_cfg;
2899
2900 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2901 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2902 #if notyet
2903 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2904 (bi->bi_nodisk - sc->sc_no_pd);
2905 #endif
2906 /* tell bio who we are */
2907 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2908
2909 rv = 0;
2910 done:
2911 return (rv);
2912 }
2913
2914 static int
2915 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2916 {
2917 int i, per, rv = EINVAL;
2918
2919 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2920 DEVNAME(sc), bv->bv_volid);
2921
2922 /* we really could skip and expect that inq took care of it */
2923 if (mfii_bio_getitall(sc)) {
2924 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2925 DEVNAME(sc));
2926 goto done;
2927 }
2928
2929 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2930 /* go do hotspares & unused disks */
2931 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2932 goto done;
2933 }
2934
2935 i = bv->bv_volid;
2936 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2937 sizeof(bv->bv_dev));
2938
2939 switch (sc->sc_ld_list.mll_list[i].mll_state) {
2940 case MFI_LD_OFFLINE:
2941 bv->bv_status = BIOC_SVOFFLINE;
2942 break;
2943
2944 case MFI_LD_PART_DEGRADED:
2945 case MFI_LD_DEGRADED:
2946 bv->bv_status = BIOC_SVDEGRADED;
2947 break;
2948
2949 case MFI_LD_ONLINE:
2950 bv->bv_status = BIOC_SVONLINE;
2951 break;
2952
2953 default:
2954 bv->bv_status = BIOC_SVINVALID;
2955 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2956 DEVNAME(sc),
2957 sc->sc_ld_list.mll_list[i].mll_state);
2958 }
2959
2960 /* additional status can modify MFI status */
2961 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2962 case MFI_LD_PROG_CC:
2963 bv->bv_status = BIOC_SVSCRUB;
2964 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2965 bv->bv_percent = (per * 100) / 0xffff;
2966 bv->bv_seconds =
2967 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2968 break;
2969
2970 case MFI_LD_PROG_BGI:
2971 bv->bv_status = BIOC_SVSCRUB;
2972 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
2973 bv->bv_percent = (per * 100) / 0xffff;
2974 bv->bv_seconds =
2975 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
2976 break;
2977
2978 case MFI_LD_PROG_FGI:
2979 case MFI_LD_PROG_RECONSTRUCT:
2980 /* nothing yet */
2981 break;
2982 }
2983
2984 #if 0
2985 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2986 bv->bv_cache = BIOC_CVWRITEBACK;
2987 else
2988 bv->bv_cache = BIOC_CVWRITETHROUGH;
2989 #endif
2990
2991 /*
2992 * The RAID levels are determined per the SNIA DDF spec, this is only
2993 * a subset that is valid for the MFI controller.
2994 */
2995 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2996 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2997 bv->bv_level *= 10;
2998
2999 bv->bv_nodisk =
3000 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3001 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3002
3003 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3004 bv->bv_stripe_size =
3005 (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3006 / 1024; /* in KB */
3007
3008 rv = 0;
3009 done:
3010 return (rv);
3011 }
3012
3013 static int
3014 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3015 {
3016 struct mfi_conf *cfg;
3017 struct mfi_array *ar;
3018 struct mfi_ld_cfg *ld;
3019 struct mfi_pd_details *pd;
3020 struct mfi_pd_list *pl;
3021 struct scsipi_inquiry_data *inqbuf;
3022 char vend[8+16+4+1], *vendp;
3023 int i, rv = EINVAL;
3024 int arr, vol, disk, span;
3025 union mfi_mbox mbox;
3026
3027 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3028 DEVNAME(sc), bd->bd_diskid);
3029
3030 /* we really could skip and expect that inq took care of it */
3031 if (mfii_bio_getitall(sc)) {
3032 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3033 DEVNAME(sc));
3034 return (rv);
3035 }
3036 cfg = sc->sc_cfg;
3037
3038 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3039 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3040
3041 ar = cfg->mfc_array;
3042 vol = bd->bd_volid;
3043 if (vol >= cfg->mfc_no_ld) {
3044 /* do hotspares */
3045 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3046 goto freeme;
3047 }
3048
3049 /* calculate offset to ld structure */
3050 ld = (struct mfi_ld_cfg *)(
3051 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3052 cfg->mfc_array_size * cfg->mfc_no_array);
3053
3054 /* use span 0 only when raid group is not spanned */
3055 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3056 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3057 else
3058 span = 0;
3059 arr = ld[vol].mlc_span[span].mls_index;
3060
3061 /* offset disk into pd list */
3062 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3063
3064 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3065 /* disk is missing but succeed command */
3066 bd->bd_status = BIOC_SDFAILED;
3067 rv = 0;
3068
3069 /* try to find an unused disk for the target to rebuild */
3070 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3071 MFII_DATA_IN, false))
3072 goto freeme;
3073
3074 for (i = 0; i < pl->mpl_no_pd; i++) {
3075 if (pl->mpl_address[i].mpa_scsi_type != 0)
3076 continue;
3077
3078 memset(&mbox, 0, sizeof(mbox));
3079 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3080 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3081 pd, sizeof(*pd), MFII_DATA_IN, false))
3082 continue;
3083
3084 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3085 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3086 break;
3087 }
3088
3089 if (i == pl->mpl_no_pd)
3090 goto freeme;
3091 } else {
3092 memset(&mbox, 0, sizeof(mbox));
3093 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3094 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3095 MFII_DATA_IN, false)) {
3096 bd->bd_status = BIOC_SDINVALID;
3097 goto freeme;
3098 }
3099 }
3100
3101 /* get the remaining fields */
3102 bd->bd_channel = pd->mpd_enc_idx;
3103 bd->bd_target = pd->mpd_enc_slot;
3104
3105 /* get status */
3106 switch (pd->mpd_fw_state){
3107 case MFI_PD_UNCONFIG_GOOD:
3108 case MFI_PD_UNCONFIG_BAD:
3109 bd->bd_status = BIOC_SDUNUSED;
3110 break;
3111
3112 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3113 bd->bd_status = BIOC_SDHOTSPARE;
3114 break;
3115
3116 case MFI_PD_OFFLINE:
3117 bd->bd_status = BIOC_SDOFFLINE;
3118 break;
3119
3120 case MFI_PD_FAILED:
3121 bd->bd_status = BIOC_SDFAILED;
3122 break;
3123
3124 case MFI_PD_REBUILD:
3125 bd->bd_status = BIOC_SDREBUILD;
3126 break;
3127
3128 case MFI_PD_ONLINE:
3129 bd->bd_status = BIOC_SDONLINE;
3130 break;
3131
3132 case MFI_PD_COPYBACK:
3133 case MFI_PD_SYSTEM:
3134 default:
3135 bd->bd_status = BIOC_SDINVALID;
3136 break;
3137 }
3138
3139 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3140
3141 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3142 vendp = inqbuf->vendor;
3143 memcpy(vend, vendp, sizeof vend - 1);
3144 vend[sizeof vend - 1] = '\0';
3145 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3146
3147 /* XXX find a way to retrieve serial nr from drive */
3148 /* XXX find a way to get bd_procdev */
3149
3150 #if 0
3151 mfp = &pd->mpd_progress;
3152 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3153 mp = &mfp->mfp_patrol_read;
3154 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3155 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3156 }
3157 #endif
3158
3159 rv = 0;
3160 freeme:
3161 free(pd, M_DEVBUF);
3162 free(pl, M_DEVBUF);
3163
3164 return (rv);
3165 }
3166
3167 static int
3168 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3169 {
3170 uint32_t opc;
3171 int rv = 0;
3172 int8_t ret;
3173 mfii_direction_t dir = MFII_DATA_NONE;
3174
3175 switch (ba->ba_opcode) {
3176 case BIOC_SADISABLE:
3177 opc = MR_DCMD_SPEAKER_DISABLE;
3178 break;
3179
3180 case BIOC_SAENABLE:
3181 opc = MR_DCMD_SPEAKER_ENABLE;
3182 break;
3183
3184 case BIOC_SASILENCE:
3185 opc = MR_DCMD_SPEAKER_SILENCE;
3186 break;
3187
3188 case BIOC_GASTATUS:
3189 opc = MR_DCMD_SPEAKER_GET;
3190 dir = MFII_DATA_IN;
3191 break;
3192
3193 case BIOC_SATEST:
3194 opc = MR_DCMD_SPEAKER_TEST;
3195 break;
3196
3197 default:
3198 DNPRINTF(MFII_D_IOCTL,
3199 "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3200 DEVNAME(sc), ba->ba_opcode);
3201 return (EINVAL);
3202 }
3203
3204 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3205 rv = EINVAL;
3206 else
3207 if (ba->ba_opcode == BIOC_GASTATUS)
3208 ba->ba_status = ret;
3209 else
3210 ba->ba_status = 0;
3211
3212 return (rv);
3213 }
3214
3215 static int
3216 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3217 {
3218 int i, found, rv = EINVAL;
3219 union mfi_mbox mbox;
3220 uint32_t cmd;
3221 struct mfi_pd_list *pd;
3222
3223 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3224 bb->bb_status);
3225
3226 /* channel 0 means not in an enclosure so can't be blinked */
3227 if (bb->bb_channel == 0)
3228 return (EINVAL);
3229
3230 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3231
3232 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3233 MFII_DATA_IN, false))
3234 goto done;
3235
3236 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3237 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3238 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3239 found = 1;
3240 break;
3241 }
3242
3243 if (!found)
3244 goto done;
3245
3246 memset(&mbox, 0, sizeof(mbox));
3247 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3248
3249 switch (bb->bb_status) {
3250 case BIOC_SBUNBLINK:
3251 cmd = MR_DCMD_PD_UNBLINK;
3252 break;
3253
3254 case BIOC_SBBLINK:
3255 cmd = MR_DCMD_PD_BLINK;
3256 break;
3257
3258 case BIOC_SBALARM:
3259 default:
3260 DNPRINTF(MFII_D_IOCTL,
3261 "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3262 DEVNAME(sc), bb->bb_status);
3263 goto done;
3264 }
3265
3266
3267 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3268 goto done;
3269
3270 rv = 0;
3271 done:
3272 free(pd, M_DEVBUF);
3273 return (rv);
3274 }
3275
3276 static int
3277 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3278 {
3279 struct mfii_foreign_scan_info *fsi;
3280 struct mfi_pd_details *pd;
3281 union mfi_mbox mbox;
3282 int rv;
3283
3284 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3285 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3286
3287 memset(&mbox, 0, sizeof mbox);
3288 mbox.s[0] = pd_id;
3289 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3290 MFII_DATA_IN, false);
3291 if (rv != 0)
3292 goto done;
3293
3294 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3295 mbox.s[0] = pd_id;
3296 mbox.s[1] = pd->mpd_pd.mfp_seq;
3297 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3298 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3299 MFII_DATA_NONE, false);
3300 if (rv != 0)
3301 goto done;
3302 }
3303
3304 memset(&mbox, 0, sizeof mbox);
3305 mbox.s[0] = pd_id;
3306 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3307 MFII_DATA_IN, false);
3308 if (rv != 0)
3309 goto done;
3310
3311 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3312 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3313 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3314 if (rv != 0)
3315 goto done;
3316
3317 if (fsi->count > 0) {
3318 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3319 NULL, 0, MFII_DATA_NONE, false);
3320 if (rv != 0)
3321 goto done;
3322 }
3323 }
3324
3325 memset(&mbox, 0, sizeof mbox);
3326 mbox.s[0] = pd_id;
3327 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3328 MFII_DATA_IN, false);
3329 if (rv != 0)
3330 goto done;
3331
3332 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3333 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3334 rv = ENXIO;
3335
3336 done:
3337 free(fsi, M_DEVBUF);
3338 free(pd, M_DEVBUF);
3339
3340 return (rv);
3341 }
3342
3343 static int
3344 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3345 {
3346 struct mfi_hotspare *hs;
3347 struct mfi_pd_details *pd;
3348 union mfi_mbox mbox;
3349 size_t size;
3350 int rv = EINVAL;
3351
3352 /* we really could skip and expect that inq took care of it */
3353 if (mfii_bio_getitall(sc)) {
3354 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3355 DEVNAME(sc));
3356 return (rv);
3357 }
3358 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3359
3360 hs = malloc(size, M_DEVBUF, M_WAITOK);
3361 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3362
3363 memset(&mbox, 0, sizeof mbox);
3364 mbox.s[0] = pd_id;
3365 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3366 MFII_DATA_IN, false);
3367 if (rv != 0)
3368 goto done;
3369
3370 memset(hs, 0, size);
3371 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3372 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3373 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3374 MFII_DATA_OUT, false);
3375
3376 done:
3377 free(hs, M_DEVBUF);
3378 free(pd, M_DEVBUF);
3379
3380 return (rv);
3381 }
3382
3383 static int
3384 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3385 {
3386 struct mfi_pd_details *pd;
3387 struct mfi_pd_list *pl;
3388 int i, found, rv = EINVAL;
3389 union mfi_mbox mbox;
3390
3391 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3392 bs->bs_status);
3393
3394 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3395 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3396
3397 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3398 MFII_DATA_IN, false))
3399 goto done;
3400
3401 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3402 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3403 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3404 found = 1;
3405 break;
3406 }
3407
3408 if (!found)
3409 goto done;
3410
3411 memset(&mbox, 0, sizeof(mbox));
3412 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3413
3414 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3415 MFII_DATA_IN, false))
3416 goto done;
3417
3418 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3419 mbox.s[1] = pd->mpd_pd.mfp_seq;
3420
3421 switch (bs->bs_status) {
3422 case BIOC_SSONLINE:
3423 mbox.b[4] = MFI_PD_ONLINE;
3424 break;
3425
3426 case BIOC_SSOFFLINE:
3427 mbox.b[4] = MFI_PD_OFFLINE;
3428 break;
3429
3430 case BIOC_SSHOTSPARE:
3431 mbox.b[4] = MFI_PD_HOTSPARE;
3432 break;
3433
3434 case BIOC_SSREBUILD:
3435 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3436 if ((rv = mfii_makegood(sc,
3437 pl->mpl_address[i].mpa_pd_id)))
3438 goto done;
3439
3440 if ((rv = mfii_makespare(sc,
3441 pl->mpl_address[i].mpa_pd_id)))
3442 goto done;
3443
3444 memset(&mbox, 0, sizeof(mbox));
3445 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3446 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3447 pd, sizeof(*pd), MFII_DATA_IN, false);
3448 if (rv != 0)
3449 goto done;
3450
3451 /* rebuilding might be started by mfii_makespare() */
3452 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3453 rv = 0;
3454 goto done;
3455 }
3456
3457 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3458 mbox.s[1] = pd->mpd_pd.mfp_seq;
3459 }
3460 mbox.b[4] = MFI_PD_REBUILD;
3461 break;
3462
3463 default:
3464 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3465 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3466 goto done;
3467 }
3468
3469
3470 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3471 MFII_DATA_NONE, false);
3472 done:
3473 free(pd, M_DEVBUF);
3474 free(pl, M_DEVBUF);
3475 return (rv);
3476 }
3477
3478 #if 0
3479 int
3480 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3481 {
3482 uint32_t opc;
3483 int rv = 0;
3484 struct mfi_pr_properties prop;
3485 struct mfi_pr_status status;
3486 uint32_t time, exec_freq;
3487
3488 switch (bp->bp_opcode) {
3489 case BIOC_SPSTOP:
3490 case BIOC_SPSTART:
3491 if (bp->bp_opcode == BIOC_SPSTART)
3492 opc = MR_DCMD_PR_START;
3493 else
3494 opc = MR_DCMD_PR_STOP;
3495 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3496 return (EINVAL);
3497 break;
3498
3499 case BIOC_SPMANUAL:
3500 case BIOC_SPDISABLE:
3501 case BIOC_SPAUTO:
3502 /* Get device's time. */
3503 opc = MR_DCMD_TIME_SECS_GET;
3504 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3505 MFII_DATA_IN, false))
3506 return (EINVAL);
3507
3508 opc = MR_DCMD_PR_GET_PROPERTIES;
3509 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3510 MFII_DATA_IN, false))
3511 return (EINVAL);
3512
3513 switch (bp->bp_opcode) {
3514 case BIOC_SPMANUAL:
3515 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3516 break;
3517 case BIOC_SPDISABLE:
3518 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3519 break;
3520 case BIOC_SPAUTO:
3521 if (bp->bp_autoival != 0) {
3522 if (bp->bp_autoival == -1)
3523 /* continuously */
3524 exec_freq = 0xffffffffU;
3525 else if (bp->bp_autoival > 0)
3526 exec_freq = bp->bp_autoival;
3527 else
3528 return (EINVAL);
3529 prop.exec_freq = exec_freq;
3530 }
3531 if (bp->bp_autonext != 0) {
3532 if (bp->bp_autonext < 0)
3533 return (EINVAL);
3534 else
3535 prop.next_exec =
3536 time + bp->bp_autonext;
3537 }
3538 prop.op_mode = MFI_PR_OPMODE_AUTO;
3539 break;
3540 }
3541
3542 opc = MR_DCMD_PR_SET_PROPERTIES;
3543 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3544 MFII_DATA_OUT, false))
3545 return (EINVAL);
3546
3547 break;
3548
3549 case BIOC_GPSTATUS:
3550 opc = MR_DCMD_PR_GET_PROPERTIES;
3551 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3552 MFII_DATA_IN, false))
3553 return (EINVAL);
3554
3555 opc = MR_DCMD_PR_GET_STATUS;
3556 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3557 MFII_DATA_IN, false))
3558 return (EINVAL);
3559
3560 /* Get device's time. */
3561 opc = MR_DCMD_TIME_SECS_GET;
3562 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3563 MFII_DATA_IN, false))
3564 return (EINVAL);
3565
3566 switch (prop.op_mode) {
3567 case MFI_PR_OPMODE_AUTO:
3568 bp->bp_mode = BIOC_SPMAUTO;
3569 bp->bp_autoival = prop.exec_freq;
3570 bp->bp_autonext = prop.next_exec;
3571 bp->bp_autonow = time;
3572 break;
3573 case MFI_PR_OPMODE_MANUAL:
3574 bp->bp_mode = BIOC_SPMMANUAL;
3575 break;
3576 case MFI_PR_OPMODE_DISABLED:
3577 bp->bp_mode = BIOC_SPMDISABLED;
3578 break;
3579 default:
3580 printf("%s: unknown patrol mode %d\n",
3581 DEVNAME(sc), prop.op_mode);
3582 break;
3583 }
3584
3585 switch (status.state) {
3586 case MFI_PR_STATE_STOPPED:
3587 bp->bp_status = BIOC_SPSSTOPPED;
3588 break;
3589 case MFI_PR_STATE_READY:
3590 bp->bp_status = BIOC_SPSREADY;
3591 break;
3592 case MFI_PR_STATE_ACTIVE:
3593 bp->bp_status = BIOC_SPSACTIVE;
3594 break;
3595 case MFI_PR_STATE_ABORTED:
3596 bp->bp_status = BIOC_SPSABORTED;
3597 break;
3598 default:
3599 printf("%s: unknown patrol state %d\n",
3600 DEVNAME(sc), status.state);
3601 break;
3602 }
3603
3604 break;
3605
3606 default:
3607 DNPRINTF(MFII_D_IOCTL,
3608 "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3609 DEVNAME(sc), bp->bp_opcode);
3610 return (EINVAL);
3611 }
3612
3613 return (rv);
3614 }
3615 #endif
3616
3617 static int
3618 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3619 {
3620 struct mfi_conf *cfg;
3621 struct mfi_hotspare *hs;
3622 struct mfi_pd_details *pd;
3623 struct bioc_disk *sdhs;
3624 struct bioc_vol *vdhs;
3625 struct scsipi_inquiry_data *inqbuf;
3626 char vend[8+16+4+1], *vendp;
3627 int i, rv = EINVAL;
3628 uint32_t size;
3629 union mfi_mbox mbox;
3630
3631 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3632
3633 if (!bio_hs)
3634 return (EINVAL);
3635
3636 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3637
3638 /* send single element command to retrieve size for full structure */
3639 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3640 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3641 MFII_DATA_IN, false))
3642 goto freeme;
3643
3644 size = cfg->mfc_size;
3645 free(cfg, M_DEVBUF);
3646
3647 /* memory for read config */
3648 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3649 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3650 MFII_DATA_IN, false))
3651 goto freeme;
3652
3653 /* calculate offset to hs structure */
3654 hs = (struct mfi_hotspare *)(
3655 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3656 cfg->mfc_array_size * cfg->mfc_no_array +
3657 cfg->mfc_ld_size * cfg->mfc_no_ld);
3658
3659 if (volid < cfg->mfc_no_ld)
3660 goto freeme; /* not a hotspare */
3661
3662 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3663 goto freeme; /* not a hotspare */
3664
3665 /* offset into hotspare structure */
3666 i = volid - cfg->mfc_no_ld;
3667
3668 DNPRINTF(MFII_D_IOCTL,
3669 "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3670 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3671 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3672
3673 /* get pd fields */
3674 memset(&mbox, 0, sizeof(mbox));
3675 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3676 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3677 MFII_DATA_IN, false)) {
3678 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3679 DEVNAME(sc));
3680 goto freeme;
3681 }
3682
3683 switch (type) {
3684 case MFI_MGMT_VD:
3685 vdhs = bio_hs;
3686 vdhs->bv_status = BIOC_SVONLINE;
3687 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3688 vdhs->bv_level = -1; /* hotspare */
3689 vdhs->bv_nodisk = 1;
3690 break;
3691
3692 case MFI_MGMT_SD:
3693 sdhs = bio_hs;
3694 sdhs->bd_status = BIOC_SDHOTSPARE;
3695 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3696 sdhs->bd_channel = pd->mpd_enc_idx;
3697 sdhs->bd_target = pd->mpd_enc_slot;
3698 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3699 vendp = inqbuf->vendor;
3700 memcpy(vend, vendp, sizeof vend - 1);
3701 vend[sizeof vend - 1] = '\0';
3702 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3703 break;
3704
3705 default:
3706 goto freeme;
3707 }
3708
3709 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3710 rv = 0;
3711 freeme:
3712 free(pd, M_DEVBUF);
3713 free(cfg, M_DEVBUF);
3714
3715 return (rv);
3716 }
3717
3718 #endif /* NBIO > 0 */
3719
3720 #define MFI_BBU_SENSORS 4
3721
3722 static void
3723 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3724 {
3725 struct mfi_bbu_status bbu;
3726 u_int32_t status;
3727 u_int32_t mask;
3728 u_int32_t soh_bad;
3729 int rv;
3730
3731 mutex_enter(&sc->sc_lock);
3732 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3733 sizeof(bbu), MFII_DATA_IN, false);
3734 mutex_exit(&sc->sc_lock);
3735 if (rv != 0) {
3736 edata->state = ENVSYS_SINVALID;
3737 edata->value_cur = 0;
3738 return;
3739 }
3740
3741 switch (bbu.battery_type) {
3742 case MFI_BBU_TYPE_IBBU:
3743 case MFI_BBU_TYPE_IBBU09:
3744 case MFI_BBU_TYPE_CVPM02:
3745 mask = MFI_BBU_STATE_BAD_IBBU;
3746 soh_bad = 0;
3747 break;
3748 case MFI_BBU_TYPE_BBU:
3749 mask = MFI_BBU_STATE_BAD_BBU;
3750 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3751 break;
3752
3753 case MFI_BBU_TYPE_NONE:
3754 default:
3755 edata->state = ENVSYS_SCRITICAL;
3756 edata->value_cur = 0;
3757 return;
3758 }
3759
3760 status = le32toh(bbu.fw_status) & mask;
3761 switch (edata->sensor) {
3762 case 0:
3763 edata->value_cur = (status || soh_bad) ? 0 : 1;
3764 edata->state =
3765 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3766 return;
3767 case 1:
3768 edata->value_cur = le16toh(bbu.voltage) * 1000;
3769 edata->state = ENVSYS_SVALID;
3770 return;
3771 case 2:
3772 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3773 edata->state = ENVSYS_SVALID;
3774 return;
3775 case 3:
3776 edata->value_cur =
3777 le16toh(bbu.temperature) * 1000000 + 273150000;
3778 edata->state = ENVSYS_SVALID;
3779 return;
3780 }
3781 }
3782
3783 static void
3784 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3785 {
3786 struct bioc_vol bv;
3787 int error;
3788
3789 memset(&bv, 0, sizeof(bv));
3790 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3791 mutex_enter(&sc->sc_lock);
3792 error = mfii_ioctl_vol(sc, &bv);
3793 mutex_exit(&sc->sc_lock);
3794 if (error)
3795 bv.bv_status = BIOC_SVINVALID;
3796 bio_vol_to_envsys(edata, &bv);
3797 }
3798
3799 static void
3800 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3801 {
3802 sensor->units = ENVSYS_DRIVE;
3803 sensor->state = ENVSYS_SINVALID;
3804 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3805 /* Enable monitoring for drive state changes */
3806 sensor->flags |= ENVSYS_FMONSTCHANGED;
3807 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3808 }
3809
3810 static void
3811 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3812 {
3813 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3814 aprint_error_dev(sc->sc_dev,
3815 "failed to attach sensor %s\n", s->desc);
3816 }
3817
3818 static int
3819 mfii_create_sensors(struct mfii_softc *sc)
3820 {
3821 int i, rv;
3822 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3823
3824 sc->sc_sme = sysmon_envsys_create();
3825 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3826 M_DEVBUF, M_NOWAIT | M_ZERO);
3827
3828 if (sc->sc_sensors == NULL) {
3829 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3830 return ENOMEM;
3831 }
3832 /* BBU */
3833 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3834 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3835 sc->sc_sensors[0].value_cur = 0;
3836 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3837 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3838 sc->sc_sensors[1].value_cur = 0;
3839 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3840 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3841 sc->sc_sensors[2].value_cur = 0;
3842 sc->sc_sensors[3].units = ENVSYS_STEMP;
3843 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3844 sc->sc_sensors[3].value_cur = 0;
3845
3846 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3847 sc->sc_bbuok = true;
3848 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3849 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3850 "%s BBU state", DEVNAME(sc));
3851 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3852 "%s BBU voltage", DEVNAME(sc));
3853 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3854 "%s BBU current", DEVNAME(sc));
3855 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3856 "%s BBU temperature", DEVNAME(sc));
3857 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3858 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3859 }
3860 }
3861
3862 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3863 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3864 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3865 }
3866
3867 sc->sc_sme->sme_name = DEVNAME(sc);
3868 sc->sc_sme->sme_cookie = sc;
3869 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3870 rv = sysmon_envsys_register(sc->sc_sme);
3871 if (rv) {
3872 aprint_error_dev(sc->sc_dev,
3873 "unable to register with sysmon (rv = %d)\n", rv);
3874 sysmon_envsys_destroy(sc->sc_sme);
3875 sc->sc_sme = NULL;
3876 }
3877 return rv;
3878
3879 }
3880
3881 static int
3882 mfii_destroy_sensors(struct mfii_softc *sc)
3883 {
3884 if (sc->sc_sme == NULL)
3885 return 0;
3886 sysmon_envsys_unregister(sc->sc_sme);
3887 sc->sc_sme = NULL;
3888 free(sc->sc_sensors, M_DEVBUF);
3889 return 0;
3890 }
3891
3892 static void
3893 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3894 {
3895 struct mfii_softc *sc = sme->sme_cookie;
3896
3897 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3898 return;
3899
3900 if (edata->sensor < MFI_BBU_SENSORS) {
3901 if (sc->sc_bbuok)
3902 mfii_bbu(sc, edata);
3903 } else {
3904 mfii_refresh_ld_sensor(sc, edata);
3905 }
3906 }
3907