mfii.c revision 1.17 1 /* $NetBSD: mfii.c,v 1.17 2022/06/27 15:32:30 msaitoh Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.17 2022/06/27 15:32:30 msaitoh Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271
272 struct mfii_iop {
273 int bar;
274 int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
276 #define MFII_IOP_NUM_SGE_LOC_35 1
277 u_int16_t ldio_ctx_reg_lock_flags;
278 u_int8_t ldio_req_type;
279 u_int8_t ldio_ctx_type_nseg;
280 u_int8_t sge_flag_chain;
281 u_int8_t sge_flag_eol;
282 };
283
284 struct mfii_softc {
285 device_t sc_dev;
286 struct scsipi_channel sc_chan;
287 struct scsipi_adapter sc_adapt;
288
289 const struct mfii_iop *sc_iop;
290
291 pci_chipset_tag_t sc_pc;
292 pcitag_t sc_tag;
293
294 bus_space_tag_t sc_iot;
295 bus_space_handle_t sc_ioh;
296 bus_size_t sc_ios;
297 bus_dma_tag_t sc_dmat;
298 bus_dma_tag_t sc_dmat64;
299 bool sc_64bit_dma;
300
301 void *sc_ih;
302
303 kmutex_t sc_ccb_mtx;
304 kmutex_t sc_post_mtx;
305
306 u_int sc_max_fw_cmds;
307 u_int sc_max_cmds;
308 u_int sc_max_sgl;
309
310 u_int sc_reply_postq_depth;
311 u_int sc_reply_postq_index;
312 kmutex_t sc_reply_postq_mtx;
313 struct mfii_dmamem *sc_reply_postq;
314
315 struct mfii_dmamem *sc_requests;
316 struct mfii_dmamem *sc_mfi;
317 struct mfii_dmamem *sc_sense;
318 struct mfii_dmamem *sc_sgl;
319
320 struct mfii_ccb *sc_ccb;
321 struct mfii_ccb_list sc_ccb_freeq;
322
323 struct mfii_ccb *sc_aen_ccb;
324 struct workqueue *sc_aen_wq;
325 struct work sc_aen_work;
326
327 kmutex_t sc_abort_mtx;
328 struct mfii_ccb_list sc_abort_list;
329 struct workqueue *sc_abort_wq;
330 struct work sc_abort_work;
331
332 /* save some useful information for logical drives that is missing
333 * in sc_ld_list
334 */
335 struct {
336 bool ld_present;
337 char ld_dev[16]; /* device name sd? */
338 } sc_ld[MFI_MAX_LD];
339 int sc_target_lds[MFI_MAX_LD];
340
341 /* bio */
342 struct mfi_conf *sc_cfg;
343 struct mfi_ctrl_info sc_info;
344 struct mfi_ld_list sc_ld_list;
345 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
346 int sc_no_pd; /* used physical disks */
347 int sc_ld_sz; /* sizeof sc_ld_details */
348
349 /* mgmt lock */
350 kmutex_t sc_lock;
351 bool sc_running;
352
353 /* sensors */
354 struct sysmon_envsys *sc_sme;
355 envsys_data_t *sc_sensors;
356 bool sc_bbuok;
357
358 device_t sc_child;
359 };
360
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
365 #define MFII_D_CMD 0x0001
366 #define MFII_D_INTR 0x0002
367 #define MFII_D_MISC 0x0004
368 #define MFII_D_DMA 0x0008
369 #define MFII_D_IOCTL 0x0010
370 #define MFII_D_RW 0x0020
371 #define MFII_D_MEM 0x0040
372 #define MFII_D_CCB 0x0080
373 uint32_t mfii_debug = 0
374 /* | MFII_D_CMD */
375 /* | MFII_D_INTR */
376 | MFII_D_MISC
377 /* | MFII_D_DMA */
378 /* | MFII_D_IOCTL */
379 /* | MFII_D_RW */
380 /* | MFII_D_MEM */
381 /* | MFII_D_CCB */
382 ;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387
388 static int mfii_match(device_t, cfdata_t, void *);
389 static void mfii_attach(device_t, device_t, void *);
390 static int mfii_detach(device_t, int);
391 static int mfii_rescan(device_t, const char *, const int *);
392 static void mfii_childdetached(device_t, device_t);
393 static bool mfii_suspend(device_t, const pmf_qual_t *);
394 static bool mfii_resume(device_t, const pmf_qual_t *);
395 static bool mfii_shutdown(device_t, int);
396
397
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 mfii_childdetached, DVF_DETACH_SHUTDOWN);
401
402 static void mfii_scsipi_request(struct scsipi_channel *,
403 scsipi_adapter_req_t, void *);
404 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405
406 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
407
408 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
409 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410
411 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 static void mfii_dmamem_free(struct mfii_softc *,
413 struct mfii_dmamem *);
414
415 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
416 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 static int mfii_init_ccb(struct mfii_softc *);
418 static void mfii_scrub_ccb(struct mfii_ccb *);
419
420 static int mfii_transition_firmware(struct mfii_softc *);
421 static int mfii_initialise_firmware(struct mfii_softc *);
422 static int mfii_get_info(struct mfii_softc *);
423
424 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 static int mfii_my_intr(struct mfii_softc *);
431 static int mfii_intr(void *);
432 static void mfii_postq(struct mfii_softc *);
433
434 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 void *, int);
436 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 void *, int);
438
439 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440
441 static int mfii_mgmt(struct mfii_softc *, uint32_t,
442 const union mfi_mbox *, void *, size_t,
443 mfii_direction_t, bool);
444 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 uint32_t, const union mfi_mbox *, void *, size_t,
446 mfii_direction_t, bool);
447 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448
449 static int mfii_scsi_cmd_io(struct mfii_softc *,
450 struct mfii_ccb *, struct scsipi_xfer *);
451 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
452 struct mfii_ccb *, struct scsipi_xfer *);
453 static void mfii_scsi_cmd_tmo(void *);
454
455 static void mfii_abort_task(struct work *, void *);
456 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
457 uint16_t, uint16_t, uint8_t, uint32_t);
458 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
459 struct mfii_ccb *);
460
461 static int mfii_aen_register(struct mfii_softc *);
462 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
463 struct mfii_dmamem *, uint32_t);
464 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
465 static void mfii_aen(struct work *, void *);
466 static void mfii_aen_unregister(struct mfii_softc *);
467
468 static void mfii_aen_pd_insert(struct mfii_softc *,
469 const struct mfi_evtarg_pd_address *);
470 static void mfii_aen_pd_remove(struct mfii_softc *,
471 const struct mfi_evtarg_pd_address *);
472 static void mfii_aen_pd_state_change(struct mfii_softc *,
473 const struct mfi_evtarg_pd_state *);
474 static void mfii_aen_ld_update(struct mfii_softc *);
475
476 #if NBIO > 0
477 static int mfii_ioctl(device_t, u_long, void *);
478 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
479 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
480 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
481 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
482 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
483 static int mfii_ioctl_setstate(struct mfii_softc *,
484 struct bioc_setstate *);
485 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
486 static int mfii_bio_getitall(struct mfii_softc *);
487 #endif /* NBIO > 0 */
488
489 #if 0
490 static const char *mfi_bbu_indicators[] = {
491 "pack missing",
492 "voltage low",
493 "temp high",
494 "charge active",
495 "discharge active",
496 "learn cycle req'd",
497 "learn cycle active",
498 "learn cycle failed",
499 "learn cycle timeout",
500 "I2C errors",
501 "replace pack",
502 "low capacity",
503 "periodic learn req'd"
504 };
505 #endif
506
507 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
508 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
509 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
510 static int mfii_create_sensors(struct mfii_softc *);
511 static int mfii_destroy_sensors(struct mfii_softc *);
512 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
513 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
514
515 /*
516 * mfii boards support asynchronous (and non-polled) completion of
517 * dcmds by proxying them through a passthru mpii command that points
518 * at a dcmd frame. since the passthru command is submitted like
519 * the scsi commands using an SMID in the request descriptor,
520 * ccb_request memory * must contain the passthru command because
521 * that is what the SMID refers to. this means ccb_request cannot
522 * contain the dcmd. rather than allocating separate dma memory to
523 * hold the dcmd, we reuse the sense memory buffer for it.
524 */
525
526 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
527
528 static inline void
529 mfii_dcmd_scrub(struct mfii_ccb *ccb)
530 {
531 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
532 }
533
534 static inline struct mfi_dcmd_frame *
535 mfii_dcmd_frame(struct mfii_ccb *ccb)
536 {
537 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
538 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
539 }
540
541 static inline void
542 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
543 {
544 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
545 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
546 }
547
548 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
549
550 static const struct mfii_iop mfii_iop_thunderbolt = {
551 MFII_BAR,
552 MFII_IOP_NUM_SGE_LOC_ORIG,
553 0,
554 MFII_REQ_TYPE_LDIO,
555 0,
556 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
557 0
558 };
559
560 /*
561 * a lot of these values depend on us not implementing fastpath yet.
562 */
563 static const struct mfii_iop mfii_iop_25 = {
564 MFII_BAR,
565 MFII_IOP_NUM_SGE_LOC_ORIG,
566 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
567 MFII_REQ_TYPE_NO_LOCK,
568 MFII_RAID_CTX_TYPE_CUDA | 0x1,
569 MFII_SGE_CHAIN_ELEMENT,
570 MFII_SGE_END_OF_LIST
571 };
572
573 static const struct mfii_iop mfii_iop_35 = {
574 MFII_BAR_35,
575 MFII_IOP_NUM_SGE_LOC_35,
576 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
577 MFII_REQ_TYPE_NO_LOCK,
578 MFII_RAID_CTX_TYPE_CUDA | 0x1,
579 MFII_SGE_CHAIN_ELEMENT,
580 MFII_SGE_END_OF_LIST
581 };
582
583 struct mfii_device {
584 pcireg_t mpd_vendor;
585 pcireg_t mpd_product;
586 const struct mfii_iop *mpd_iop;
587 };
588
589 static const struct mfii_device mfii_devices[] = {
590 /* Fusion */
591 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
592 &mfii_iop_thunderbolt },
593 /* Fury */
594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
595 &mfii_iop_25 },
596 /* Invader */
597 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
598 &mfii_iop_25 },
599 /* Crusader */
600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
601 &mfii_iop_35 },
602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
603 &mfii_iop_35 },
604 /* Ventura */
605 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
606 &mfii_iop_35 },
607 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
608 &mfii_iop_35 },
609 /* Tomcat */
610 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
611 &mfii_iop_35 },
612 /* Harpoon */
613 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
614 &mfii_iop_35 }
615 };
616
617 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
618
619 static const struct mfii_iop *
620 mfii_find_iop(struct pci_attach_args *pa)
621 {
622 const struct mfii_device *mpd;
623 int i;
624
625 for (i = 0; i < __arraycount(mfii_devices); i++) {
626 mpd = &mfii_devices[i];
627
628 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
629 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
630 return (mpd->mpd_iop);
631 }
632
633 return (NULL);
634 }
635
636 static int
637 mfii_match(device_t parent, cfdata_t match, void *aux)
638 {
639 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
640 }
641
642 static void
643 mfii_attach(device_t parent, device_t self, void *aux)
644 {
645 struct mfii_softc *sc = device_private(self);
646 struct pci_attach_args *pa = aux;
647 pcireg_t memtype;
648 pci_intr_handle_t ih;
649 char intrbuf[PCI_INTRSTR_LEN];
650 const char *intrstr;
651 u_int32_t status, scpad2, scpad3;
652 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
653 struct scsipi_adapter *adapt = &sc->sc_adapt;
654 struct scsipi_channel *chan = &sc->sc_chan;
655
656 /* init sc */
657 sc->sc_dev = self;
658 sc->sc_iop = mfii_find_iop(aux);
659 sc->sc_dmat = pa->pa_dmat;
660 if (pci_dma64_available(pa)) {
661 sc->sc_dmat64 = pa->pa_dmat64;
662 sc->sc_64bit_dma = 1;
663 } else {
664 sc->sc_dmat64 = pa->pa_dmat;
665 sc->sc_64bit_dma = 0;
666 }
667 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
668 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
669 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
670 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
671
672 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
673
674 sc->sc_aen_ccb = NULL;
675 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
676 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
677 PRI_BIO, IPL_BIO, WQ_MPSAFE);
678
679 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
680 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
681 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
682
683 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
684 SIMPLEQ_INIT(&sc->sc_abort_list);
685
686 /* wire up the bus shizz */
687 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
688 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
689 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
690 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
691 aprint_error(": unable to map registers\n");
692 return;
693 }
694
695 /* disable interrupts */
696 mfii_write(sc, MFI_OMSK, 0xffffffff);
697
698 if (pci_intr_map(pa, &ih) != 0) {
699 aprint_error(": unable to map interrupt\n");
700 goto pci_unmap;
701 }
702 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
703 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
704
705 /* lets get started */
706 if (mfii_transition_firmware(sc))
707 goto pci_unmap;
708 sc->sc_running = true;
709
710 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
711 scpad3 = mfii_read(sc, MFII_OSP3);
712 status = mfii_fw_state(sc);
713 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
714 if (sc->sc_max_fw_cmds == 0)
715 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
716 /*
717 * reduce max_cmds by 1 to ensure that the reply queue depth does not
718 * exceed FW supplied max_fw_cmds.
719 */
720 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
721
722 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
723 scpad2 = mfii_read(sc, MFII_OSP2);
724 chain_frame_sz =
725 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
726 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
727 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
728 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
729
730 nsge_in_io = (MFII_REQUEST_SIZE -
731 sizeof(struct mpii_msg_scsi_io) -
732 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
733 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
734
735 /* round down to nearest power of two */
736 sc->sc_max_sgl = 1;
737 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
738 sc->sc_max_sgl <<= 1;
739
740 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
741 DEVNAME(sc), status, scpad2, scpad3);
742 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
743 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
744 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
745 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
746 sc->sc_max_sgl);
747
748 /* sense memory */
749 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
750 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
751 if (sc->sc_sense == NULL) {
752 aprint_error(": unable to allocate sense memory\n");
753 goto pci_unmap;
754 }
755
756 /* reply post queue */
757 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
758
759 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
760 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
761 if (sc->sc_reply_postq == NULL)
762 goto free_sense;
763
764 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
765 MFII_DMA_LEN(sc->sc_reply_postq));
766
767 /* MPII request frame array */
768 sc->sc_requests = mfii_dmamem_alloc(sc,
769 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
770 if (sc->sc_requests == NULL)
771 goto free_reply_postq;
772
773 /* MFI command frame array */
774 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
775 if (sc->sc_mfi == NULL)
776 goto free_requests;
777
778 /* MPII SGL array */
779 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
780 sizeof(struct mfii_sge) * sc->sc_max_sgl);
781 if (sc->sc_sgl == NULL)
782 goto free_mfi;
783
784 if (mfii_init_ccb(sc) != 0) {
785 aprint_error(": could not init ccb list\n");
786 goto free_sgl;
787 }
788
789 /* kickstart firmware with all addresses and pointers */
790 if (mfii_initialise_firmware(sc) != 0) {
791 aprint_error(": could not initialize firmware\n");
792 goto free_sgl;
793 }
794
795 mutex_enter(&sc->sc_lock);
796 if (mfii_get_info(sc) != 0) {
797 mutex_exit(&sc->sc_lock);
798 aprint_error(": could not retrieve controller information\n");
799 goto free_sgl;
800 }
801 mutex_exit(&sc->sc_lock);
802
803 aprint_normal(": \"%s\", firmware %s",
804 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
805 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
806 aprint_normal(", %uMB cache",
807 le16toh(sc->sc_info.mci_memory_size));
808 }
809 aprint_normal("\n");
810 aprint_naive("\n");
811
812 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
813 mfii_intr, sc, DEVNAME(sc));
814 if (sc->sc_ih == NULL) {
815 aprint_error_dev(self, "can't establish interrupt");
816 if (intrstr)
817 aprint_error(" at %s", intrstr);
818 aprint_error("\n");
819 goto free_sgl;
820 }
821 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
822
823 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
824 sc->sc_ld[i].ld_present = 1;
825
826 memset(adapt, 0, sizeof(*adapt));
827 adapt->adapt_dev = sc->sc_dev;
828 adapt->adapt_nchannels = 1;
829 /* keep a few commands for management */
830 if (sc->sc_max_cmds > 4)
831 adapt->adapt_openings = sc->sc_max_cmds - 4;
832 else
833 adapt->adapt_openings = sc->sc_max_cmds;
834 adapt->adapt_max_periph = adapt->adapt_openings;
835 adapt->adapt_request = mfii_scsipi_request;
836 adapt->adapt_minphys = minphys;
837 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
838
839 memset(chan, 0, sizeof(*chan));
840 chan->chan_adapter = adapt;
841 chan->chan_bustype = &scsi_sas_bustype;
842 chan->chan_channel = 0;
843 chan->chan_flags = 0;
844 chan->chan_nluns = 8;
845 chan->chan_ntargets = sc->sc_info.mci_max_lds;
846 chan->chan_id = sc->sc_info.mci_max_lds;
847
848 mfii_rescan(sc->sc_dev, NULL, NULL);
849
850 if (mfii_aen_register(sc) != 0) {
851 /* error printed by mfii_aen_register */
852 goto intr_disestablish;
853 }
854
855 mutex_enter(&sc->sc_lock);
856 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
857 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
858 mutex_exit(&sc->sc_lock);
859 aprint_error_dev(self,
860 "getting list of logical disks failed\n");
861 goto intr_disestablish;
862 }
863 mutex_exit(&sc->sc_lock);
864 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
865 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
866 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
867 sc->sc_target_lds[target] = i;
868 }
869
870 /* enable interrupts */
871 mfii_write(sc, MFI_OSTS, 0xffffffff);
872 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
873
874 #if NBIO > 0
875 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
876 panic("%s: controller registration failed", DEVNAME(sc));
877 #endif /* NBIO > 0 */
878
879 if (mfii_create_sensors(sc) != 0)
880 aprint_error_dev(self, "unable to create sensors\n");
881
882 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
883 mfii_shutdown))
884 aprint_error_dev(self, "couldn't establish power handler\n");
885 return;
886 intr_disestablish:
887 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
888 free_sgl:
889 mfii_dmamem_free(sc, sc->sc_sgl);
890 free_mfi:
891 mfii_dmamem_free(sc, sc->sc_mfi);
892 free_requests:
893 mfii_dmamem_free(sc, sc->sc_requests);
894 free_reply_postq:
895 mfii_dmamem_free(sc, sc->sc_reply_postq);
896 free_sense:
897 mfii_dmamem_free(sc, sc->sc_sense);
898 pci_unmap:
899 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
900 }
901
902 #if 0
903 struct srp_gc mfii_dev_handles_gc =
904 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
905
906 static inline uint16_t
907 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
908 {
909 struct srp_ref sr;
910 uint16_t *map, handle;
911
912 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
913 handle = map[target];
914 srp_leave(&sr);
915
916 return (handle);
917 }
918
919 static int
920 mfii_dev_handles_update(struct mfii_softc *sc)
921 {
922 struct mfii_ld_map *lm;
923 uint16_t *dev_handles = NULL;
924 int i;
925 int rv = 0;
926
927 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
928
929 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
930 MFII_DATA_IN, false);
931
932 if (rv != 0) {
933 rv = EIO;
934 goto free_lm;
935 }
936
937 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
938 M_DEVBUF, M_WAITOK);
939
940 for (i = 0; i < MFI_MAX_PD; i++)
941 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
942
943 /* commit the updated info */
944 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
945 srp_update_locked(&mfii_dev_handles_gc,
946 &sc->sc_pd->pd_dev_handles, dev_handles);
947
948 free_lm:
949 free(lm, M_TEMP, sizeof(*lm));
950
951 return (rv);
952 }
953
954 static void
955 mfii_dev_handles_dtor(void *null, void *v)
956 {
957 uint16_t *dev_handles = v;
958
959 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
960 }
961 #endif /* 0 */
962
963 static int
964 mfii_detach(device_t self, int flags)
965 {
966 struct mfii_softc *sc = device_private(self);
967 int error;
968
969 if (sc->sc_ih == NULL)
970 return (0);
971
972 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
973 return error;
974
975 mfii_destroy_sensors(sc);
976 #if NBIO > 0
977 bio_unregister(sc->sc_dev);
978 #endif
979 mfii_shutdown(sc->sc_dev, 0);
980 mfii_write(sc, MFI_OMSK, 0xffffffff);
981
982 mfii_aen_unregister(sc);
983 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
984 mfii_dmamem_free(sc, sc->sc_sgl);
985 mfii_dmamem_free(sc, sc->sc_mfi);
986 mfii_dmamem_free(sc, sc->sc_requests);
987 mfii_dmamem_free(sc, sc->sc_reply_postq);
988 mfii_dmamem_free(sc, sc->sc_sense);
989 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
990
991 return (0);
992 }
993
994 static int
995 mfii_rescan(device_t self, const char *ifattr, const int *locators)
996 {
997 struct mfii_softc *sc = device_private(self);
998
999 if (sc->sc_child != NULL)
1000 return 0;
1001
1002 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint, CFARGS_NONE);
1003 return 0;
1004 }
1005
1006 static void
1007 mfii_childdetached(device_t self, device_t child)
1008 {
1009 struct mfii_softc *sc = device_private(self);
1010
1011 KASSERT(self == sc->sc_dev);
1012 KASSERT(child == sc->sc_child);
1013
1014 if (child == sc->sc_child)
1015 sc->sc_child = NULL;
1016 }
1017
1018 static bool
1019 mfii_suspend(device_t dev, const pmf_qual_t *q)
1020 {
1021 /* XXX to be implemented */
1022 return false;
1023 }
1024
1025 static bool
1026 mfii_resume(device_t dev, const pmf_qual_t *q)
1027 {
1028 /* XXX to be implemented */
1029 return false;
1030 }
1031
1032 static bool
1033 mfii_shutdown(device_t dev, int how)
1034 {
1035 struct mfii_softc *sc = device_private(dev);
1036 struct mfii_ccb *ccb;
1037 union mfi_mbox mbox;
1038 bool rv = true;
1039
1040 memset(&mbox, 0, sizeof(mbox));
1041
1042 mutex_enter(&sc->sc_lock);
1043 DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1044 ccb = mfii_get_ccb(sc);
1045 if (ccb == NULL)
1046 return false;
1047 mutex_enter(&sc->sc_ccb_mtx);
1048 if (sc->sc_running) {
1049 sc->sc_running = 0; /* prevent new commands */
1050 mutex_exit(&sc->sc_ccb_mtx);
1051 #if 0 /* XXX why does this hang ? */
1052 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1053 mfii_scrub_ccb(ccb);
1054 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1055 NULL, 0, MFII_DATA_NONE, true)) {
1056 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1057 rv = false;
1058 goto fail;
1059 }
1060 printf("ok1\n");
1061 #endif
1062 mbox.b[0] = 0;
1063 mfii_scrub_ccb(ccb);
1064 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1065 NULL, 0, MFII_DATA_NONE, true)) {
1066 aprint_error_dev(dev, "shutdown: "
1067 "firmware shutdown failed\n");
1068 rv = false;
1069 goto fail;
1070 }
1071 } else {
1072 mutex_exit(&sc->sc_ccb_mtx);
1073 }
1074 fail:
1075 mfii_put_ccb(sc, ccb);
1076 mutex_exit(&sc->sc_lock);
1077 return rv;
1078 }
1079
1080 static u_int32_t
1081 mfii_read(struct mfii_softc *sc, bus_size_t r)
1082 {
1083 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1084 BUS_SPACE_BARRIER_READ);
1085 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1086 }
1087
1088 static void
1089 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1090 {
1091 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1092 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1093 BUS_SPACE_BARRIER_WRITE);
1094 }
1095
1096 static struct mfii_dmamem *
1097 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1098 {
1099 struct mfii_dmamem *m;
1100 int nsegs;
1101
1102 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1103 m->mdm_size = size;
1104
1105 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1106 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1107 goto mdmfree;
1108
1109 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1110 &nsegs, BUS_DMA_NOWAIT) != 0)
1111 goto destroy;
1112
1113 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1114 BUS_DMA_NOWAIT) != 0)
1115 goto free;
1116
1117 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1118 BUS_DMA_NOWAIT) != 0)
1119 goto unmap;
1120
1121 memset(m->mdm_kva, 0, size);
1122 return (m);
1123
1124 unmap:
1125 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1126 free:
1127 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1128 destroy:
1129 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1130 mdmfree:
1131 free(m, M_DEVBUF);
1132
1133 return (NULL);
1134 }
1135
1136 static void
1137 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1138 {
1139 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1140 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1141 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1142 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1143 free(m, M_DEVBUF);
1144 }
1145
1146 static void
1147 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1148 {
1149 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1150 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1151 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1152
1153 io->function = MFII_FUNCTION_PASSTHRU_IO;
1154 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1155 io->chain_offset = io->sgl_offset0 / 4;
1156
1157 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1158 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1159 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1160
1161 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1162 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1163
1164 mfii_start(sc, ccb);
1165 }
1166
1167 static int
1168 mfii_aen_register(struct mfii_softc *sc)
1169 {
1170 struct mfi_evt_log_info mel;
1171 struct mfii_ccb *ccb;
1172 struct mfii_dmamem *mdm;
1173 int rv;
1174
1175 ccb = mfii_get_ccb(sc);
1176 if (ccb == NULL) {
1177 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1178 return (ENOMEM);
1179 }
1180
1181 memset(&mel, 0, sizeof(mel));
1182 mfii_scrub_ccb(ccb);
1183
1184 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1185 &mel, sizeof(mel), MFII_DATA_IN, true);
1186 if (rv != 0) {
1187 mfii_put_ccb(sc, ccb);
1188 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1189 return (EIO);
1190 }
1191
1192 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1193 if (mdm == NULL) {
1194 mfii_put_ccb(sc, ccb);
1195 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1196 return (ENOMEM);
1197 }
1198
1199 /* replay all the events from boot */
1200 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1201
1202 return (0);
1203 }
1204
1205 static void
1206 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1207 struct mfii_dmamem *mdm, uint32_t seq)
1208 {
1209 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1210 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1211 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1212 union mfi_evt_class_locale mec;
1213
1214 mfii_scrub_ccb(ccb);
1215 mfii_dcmd_scrub(ccb);
1216 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1217
1218 ccb->ccb_cookie = mdm;
1219 ccb->ccb_done = mfii_aen_done;
1220 sc->sc_aen_ccb = ccb;
1221
1222 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1223 mec.mec_members.reserved = 0;
1224 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1225
1226 hdr->mfh_cmd = MFI_CMD_DCMD;
1227 hdr->mfh_sg_count = 1;
1228 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1229 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1230 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1231 dcmd->mdf_mbox.w[0] = htole32(seq);
1232 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1233 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1234 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1235
1236 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1237 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1238
1239 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1240 mfii_dcmd_start(sc, ccb);
1241 }
1242
1243 static void
1244 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1245 {
1246 KASSERT(sc->sc_aen_ccb == ccb);
1247
1248 /*
1249 * defer to a thread with KERNEL_LOCK so we can run autoconf
1250 * We shouldn't have more than one AEN command pending at a time,
1251 * so no need to lock
1252 */
1253 if (sc->sc_running)
1254 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1255 }
1256
1257 static void
1258 mfii_aen(struct work *wk, void *arg)
1259 {
1260 struct mfii_softc *sc = arg;
1261 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1262 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1263 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1264
1265 mfii_dcmd_sync(sc, ccb,
1266 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1267 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1268 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1269
1270 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1271 le32toh(med->med_seq_num), le32toh(med->med_code),
1272 med->med_arg_type, med->med_description);
1273
1274 switch (le32toh(med->med_code)) {
1275 case MR_EVT_PD_INSERTED_EXT:
1276 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1277 break;
1278
1279 mfii_aen_pd_insert(sc, &med->args.pd_address);
1280 break;
1281 case MR_EVT_PD_REMOVED_EXT:
1282 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1283 break;
1284
1285 mfii_aen_pd_remove(sc, &med->args.pd_address);
1286 break;
1287
1288 case MR_EVT_PD_STATE_CHANGE:
1289 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1290 break;
1291
1292 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1293 break;
1294
1295 case MR_EVT_LD_CREATED:
1296 case MR_EVT_LD_DELETED:
1297 mfii_aen_ld_update(sc);
1298 break;
1299
1300 default:
1301 break;
1302 }
1303
1304 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1305 }
1306
1307 static void
1308 mfii_aen_pd_insert(struct mfii_softc *sc,
1309 const struct mfi_evtarg_pd_address *pd)
1310 {
1311 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1312 le16toh(pd->device_id), le16toh(pd->encl_id));
1313 }
1314
1315 static void
1316 mfii_aen_pd_remove(struct mfii_softc *sc,
1317 const struct mfi_evtarg_pd_address *pd)
1318 {
1319 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1320 le16toh(pd->device_id), le16toh(pd->encl_id));
1321 }
1322
1323 static void
1324 mfii_aen_pd_state_change(struct mfii_softc *sc,
1325 const struct mfi_evtarg_pd_state *state)
1326 {
1327 return;
1328 }
1329
1330 static void
1331 mfii_aen_ld_update(struct mfii_softc *sc)
1332 {
1333 int i, target, old, nld;
1334 int newlds[MFI_MAX_LD];
1335
1336 mutex_enter(&sc->sc_lock);
1337 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1338 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1339 mutex_exit(&sc->sc_lock);
1340 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1341 DEVNAME(sc));
1342 return;
1343 }
1344 mutex_exit(&sc->sc_lock);
1345
1346 memset(newlds, -1, sizeof(newlds));
1347
1348 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1349 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1350 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1351 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1352 newlds[target] = i;
1353 }
1354
1355 for (i = 0; i < MFI_MAX_LD; i++) {
1356 old = sc->sc_target_lds[i];
1357 nld = newlds[i];
1358
1359 if (old == -1 && nld != -1) {
1360 printf("%s: logical drive %d added (target %d)\n",
1361 DEVNAME(sc), i, nld);
1362
1363 // XXX scsi_probe_target(sc->sc_scsibus, i);
1364
1365 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1366 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1367 } else if (nld == -1 && old != -1) {
1368 printf("%s: logical drive %d removed (target %d)\n",
1369 DEVNAME(sc), i, old);
1370
1371 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1372 sysmon_envsys_sensor_detach(sc->sc_sme,
1373 &sc->sc_sensors[i]);
1374 }
1375 }
1376
1377 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1378 }
1379
1380 static void
1381 mfii_aen_unregister(struct mfii_softc *sc)
1382 {
1383 /* XXX */
1384 }
1385
1386 static int
1387 mfii_transition_firmware(struct mfii_softc *sc)
1388 {
1389 int32_t fw_state, cur_state;
1390 int max_wait, i;
1391
1392 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1393
1394 while (fw_state != MFI_STATE_READY) {
1395 cur_state = fw_state;
1396 switch (fw_state) {
1397 case MFI_STATE_FAULT:
1398 printf("%s: firmware fault\n", DEVNAME(sc));
1399 return (1);
1400 case MFI_STATE_WAIT_HANDSHAKE:
1401 mfii_write(sc, MFI_SKINNY_IDB,
1402 MFI_INIT_CLEAR_HANDSHAKE);
1403 max_wait = 2;
1404 break;
1405 case MFI_STATE_OPERATIONAL:
1406 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1407 max_wait = 10;
1408 break;
1409 case MFI_STATE_UNDEFINED:
1410 case MFI_STATE_BB_INIT:
1411 max_wait = 2;
1412 break;
1413 case MFI_STATE_FW_INIT:
1414 case MFI_STATE_DEVICE_SCAN:
1415 case MFI_STATE_FLUSH_CACHE:
1416 max_wait = 20;
1417 break;
1418 default:
1419 printf("%s: unknown firmware state %d\n",
1420 DEVNAME(sc), fw_state);
1421 return (1);
1422 }
1423 for (i = 0; i < (max_wait * 10); i++) {
1424 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1425 if (fw_state == cur_state)
1426 DELAY(100000);
1427 else
1428 break;
1429 }
1430 if (fw_state == cur_state) {
1431 printf("%s: firmware stuck in state %#x\n",
1432 DEVNAME(sc), fw_state);
1433 return (1);
1434 }
1435 }
1436
1437 return (0);
1438 }
1439
1440 static int
1441 mfii_get_info(struct mfii_softc *sc)
1442 {
1443 int i, rv;
1444
1445 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1446 sizeof(sc->sc_info), MFII_DATA_IN, true);
1447
1448 if (rv != 0)
1449 return (rv);
1450
1451 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1452 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1453 DEVNAME(sc),
1454 sc->sc_info.mci_image_component[i].mic_name,
1455 sc->sc_info.mci_image_component[i].mic_version,
1456 sc->sc_info.mci_image_component[i].mic_build_date,
1457 sc->sc_info.mci_image_component[i].mic_build_time);
1458 }
1459
1460 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1461 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1462 DEVNAME(sc),
1463 sc->sc_info.mci_pending_image_component[i].mic_name,
1464 sc->sc_info.mci_pending_image_component[i].mic_version,
1465 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1466 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1467 }
1468
1469 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1470 DEVNAME(sc),
1471 sc->sc_info.mci_max_arms,
1472 sc->sc_info.mci_max_spans,
1473 sc->sc_info.mci_max_arrays,
1474 sc->sc_info.mci_max_lds,
1475 sc->sc_info.mci_product_name);
1476
1477 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1478 DEVNAME(sc),
1479 sc->sc_info.mci_serial_number,
1480 sc->sc_info.mci_hw_present,
1481 sc->sc_info.mci_current_fw_time,
1482 sc->sc_info.mci_max_cmds,
1483 sc->sc_info.mci_max_sg_elements);
1484
1485 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1486 DEVNAME(sc),
1487 sc->sc_info.mci_max_request_size,
1488 sc->sc_info.mci_lds_present,
1489 sc->sc_info.mci_lds_degraded,
1490 sc->sc_info.mci_lds_offline,
1491 sc->sc_info.mci_pd_present);
1492
1493 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1494 DEVNAME(sc),
1495 sc->sc_info.mci_pd_disks_present,
1496 sc->sc_info.mci_pd_disks_pred_failure,
1497 sc->sc_info.mci_pd_disks_failed);
1498
1499 DPRINTF("%s: nvram %d mem %d flash %d\n",
1500 DEVNAME(sc),
1501 sc->sc_info.mci_nvram_size,
1502 sc->sc_info.mci_memory_size,
1503 sc->sc_info.mci_flash_size);
1504
1505 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1506 DEVNAME(sc),
1507 sc->sc_info.mci_ram_correctable_errors,
1508 sc->sc_info.mci_ram_uncorrectable_errors,
1509 sc->sc_info.mci_cluster_allowed,
1510 sc->sc_info.mci_cluster_active);
1511
1512 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1513 DEVNAME(sc),
1514 sc->sc_info.mci_max_strips_per_io,
1515 sc->sc_info.mci_raid_levels,
1516 sc->sc_info.mci_adapter_ops,
1517 sc->sc_info.mci_ld_ops);
1518
1519 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1520 DEVNAME(sc),
1521 sc->sc_info.mci_stripe_sz_ops.min,
1522 sc->sc_info.mci_stripe_sz_ops.max,
1523 sc->sc_info.mci_pd_ops,
1524 sc->sc_info.mci_pd_mix_support);
1525
1526 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1527 DEVNAME(sc),
1528 sc->sc_info.mci_ecc_bucket_count,
1529 sc->sc_info.mci_package_version);
1530
1531 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1532 DEVNAME(sc),
1533 sc->sc_info.mci_properties.mcp_seq_num,
1534 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1535 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1536 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1537
1538 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1539 DEVNAME(sc),
1540 sc->sc_info.mci_properties.mcp_rebuild_rate,
1541 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1542 sc->sc_info.mci_properties.mcp_bgi_rate,
1543 sc->sc_info.mci_properties.mcp_cc_rate);
1544
1545 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1546 DEVNAME(sc),
1547 sc->sc_info.mci_properties.mcp_recon_rate,
1548 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1549 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1550 sc->sc_info.mci_properties.mcp_spinup_delay,
1551 sc->sc_info.mci_properties.mcp_cluster_enable);
1552
1553 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1554 DEVNAME(sc),
1555 sc->sc_info.mci_properties.mcp_coercion_mode,
1556 sc->sc_info.mci_properties.mcp_alarm_enable,
1557 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1558 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1559 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1560
1561 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1562 DEVNAME(sc),
1563 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1564 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1565 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1566
1567 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1568 DEVNAME(sc),
1569 sc->sc_info.mci_pci.mip_vendor,
1570 sc->sc_info.mci_pci.mip_device,
1571 sc->sc_info.mci_pci.mip_subvendor,
1572 sc->sc_info.mci_pci.mip_subdevice);
1573
1574 DPRINTF("%s: type %#x port_count %d port_addr ",
1575 DEVNAME(sc),
1576 sc->sc_info.mci_host.mih_type,
1577 sc->sc_info.mci_host.mih_port_count);
1578
1579 for (i = 0; i < 8; i++)
1580 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1581 DPRINTF("\n");
1582
1583 DPRINTF("%s: type %.x port_count %d port_addr ",
1584 DEVNAME(sc),
1585 sc->sc_info.mci_device.mid_type,
1586 sc->sc_info.mci_device.mid_port_count);
1587
1588 for (i = 0; i < 8; i++)
1589 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1590 DPRINTF("\n");
1591
1592 return (0);
1593 }
1594
1595 static int
1596 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1597 {
1598 struct mfi_frame_header *hdr = ccb->ccb_request;
1599 u_int64_t r;
1600 int to = 0, rv = 0;
1601
1602 #ifdef DIAGNOSTIC
1603 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1604 panic("mfii_mfa_poll called with cookie or done set");
1605 #endif
1606
1607 hdr->mfh_context = ccb->ccb_smid;
1608 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1609 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1610
1611 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1612 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1613
1614 mfii_start(sc, ccb);
1615
1616 for (;;) {
1617 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1618 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1619 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1620
1621 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1622 break;
1623
1624 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1625 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1626 ccb->ccb_smid);
1627 ccb->ccb_flags |= MFI_CCB_F_ERR;
1628 rv = 1;
1629 break;
1630 }
1631
1632 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1633 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1634 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1635
1636 delay(1000);
1637 }
1638
1639 if (ccb->ccb_len > 0) {
1640 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1641 0, ccb->ccb_dmamap32->dm_mapsize,
1642 (ccb->ccb_direction == MFII_DATA_IN) ?
1643 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1644
1645 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1646 }
1647
1648 return (rv);
1649 }
1650
1651 static int
1652 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1653 {
1654 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1655 void *cookie;
1656 int rv = 1;
1657
1658 done = ccb->ccb_done;
1659 cookie = ccb->ccb_cookie;
1660
1661 ccb->ccb_done = mfii_poll_done;
1662 ccb->ccb_cookie = &rv;
1663
1664 mfii_start(sc, ccb);
1665
1666 do {
1667 delay(10);
1668 mfii_postq(sc);
1669 } while (rv == 1);
1670
1671 ccb->ccb_cookie = cookie;
1672 done(sc, ccb);
1673
1674 return (0);
1675 }
1676
1677 static void
1678 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1679 {
1680 int *rv = ccb->ccb_cookie;
1681
1682 *rv = 0;
1683 }
1684
1685 static int
1686 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1687 {
1688 #ifdef DIAGNOSTIC
1689 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1690 panic("mfii_exec called with cookie or done set");
1691 #endif
1692
1693 ccb->ccb_cookie = ccb;
1694 ccb->ccb_done = mfii_exec_done;
1695
1696 mfii_start(sc, ccb);
1697
1698 mutex_enter(&ccb->ccb_mtx);
1699 while (ccb->ccb_cookie != NULL)
1700 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1701 mutex_exit(&ccb->ccb_mtx);
1702
1703 return (0);
1704 }
1705
1706 static void
1707 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1708 {
1709 mutex_enter(&ccb->ccb_mtx);
1710 ccb->ccb_cookie = NULL;
1711 cv_signal(&ccb->ccb_cv);
1712 mutex_exit(&ccb->ccb_mtx);
1713 }
1714
1715 static int
1716 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1717 void *buf, size_t len, mfii_direction_t dir, bool poll)
1718 {
1719 struct mfii_ccb *ccb;
1720 int rv;
1721
1722 KASSERT(mutex_owned(&sc->sc_lock));
1723 if (!sc->sc_running)
1724 return EAGAIN;
1725
1726 ccb = mfii_get_ccb(sc);
1727 if (ccb == NULL)
1728 return (ENOMEM);
1729
1730 mfii_scrub_ccb(ccb);
1731 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1732 mfii_put_ccb(sc, ccb);
1733
1734 return (rv);
1735 }
1736
1737 static int
1738 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1739 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1740 bool poll)
1741 {
1742 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1743 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1744 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1745 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1746 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1747 int rv = EIO;
1748
1749 if (cold)
1750 poll = true;
1751
1752 ccb->ccb_data = buf;
1753 ccb->ccb_len = len;
1754 ccb->ccb_direction = dir;
1755 switch (dir) {
1756 case MFII_DATA_IN:
1757 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1758 break;
1759 case MFII_DATA_OUT:
1760 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1761 break;
1762 case MFII_DATA_NONE:
1763 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1764 break;
1765 }
1766
1767 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1768 rv = ENOMEM;
1769 goto done;
1770 }
1771
1772 hdr->mfh_cmd = MFI_CMD_DCMD;
1773 hdr->mfh_context = ccb->ccb_smid;
1774 hdr->mfh_data_len = htole32(len);
1775 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1776 KASSERT(!ccb->ccb_dma64);
1777
1778 dcmd->mdf_opcode = opc;
1779 /* handle special opcodes */
1780 if (mbox != NULL)
1781 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1782
1783 io->function = MFII_FUNCTION_PASSTHRU_IO;
1784 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1785 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1786
1787 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1788 sge->sg_len = htole32(MFI_FRAME_SIZE);
1789 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1790
1791 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1792 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1793
1794 if (poll) {
1795 ccb->ccb_done = mfii_empty_done;
1796 mfii_poll(sc, ccb);
1797 } else
1798 mfii_exec(sc, ccb);
1799
1800 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1801 rv = 0;
1802 }
1803
1804 done:
1805 return (rv);
1806 }
1807
1808 static void
1809 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1810 {
1811 return;
1812 }
1813
1814 static int
1815 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1816 void *sglp, int nosleep)
1817 {
1818 union mfi_sgl *sgl = sglp;
1819 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1820 int error;
1821 int i;
1822
1823 KASSERT(!ccb->ccb_dma64);
1824 if (ccb->ccb_len == 0)
1825 return (0);
1826
1827 error = bus_dmamap_load(sc->sc_dmat, dmap,
1828 ccb->ccb_data, ccb->ccb_len, NULL,
1829 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1830 if (error) {
1831 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1832 return (1);
1833 }
1834
1835 for (i = 0; i < dmap->dm_nsegs; i++) {
1836 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1837 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1838 }
1839
1840 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1841 ccb->ccb_direction == MFII_DATA_OUT ?
1842 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1843
1844 return (0);
1845 }
1846
1847 static void
1848 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1849 {
1850 #if defined(__LP64__) && 0
1851 u_long *r = (u_long *)&ccb->ccb_req;
1852 #else
1853 uint32_t *r = (uint32_t *)&ccb->ccb_req;
1854 #endif
1855
1856 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1857 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1859
1860 #if defined(__LP64__) && 0
1861 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1862 #else
1863 mutex_enter(&sc->sc_post_mtx);
1864 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1865 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1866 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1867
1868 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1869 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1870 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1871 mutex_exit(&sc->sc_post_mtx);
1872 #endif
1873 }
1874
1875 static void
1876 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1877 {
1878 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1879 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1880 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1881
1882 if (ccb->ccb_sgl_len > 0) {
1883 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1884 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1885 BUS_DMASYNC_POSTWRITE);
1886 }
1887
1888 if (ccb->ccb_dma64) {
1889 KASSERT(ccb->ccb_len > 0);
1890 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1891 0, ccb->ccb_dmamap64->dm_mapsize,
1892 (ccb->ccb_direction == MFII_DATA_IN) ?
1893 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1894
1895 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1896 } else if (ccb->ccb_len > 0) {
1897 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1898 0, ccb->ccb_dmamap32->dm_mapsize,
1899 (ccb->ccb_direction == MFII_DATA_IN) ?
1900 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1901
1902 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1903 }
1904
1905 ccb->ccb_done(sc, ccb);
1906 }
1907
1908 static int
1909 mfii_initialise_firmware(struct mfii_softc *sc)
1910 {
1911 struct mpii_msg_iocinit_request *iiq;
1912 struct mfii_dmamem *m;
1913 struct mfii_ccb *ccb;
1914 struct mfi_init_frame *init;
1915 int rv;
1916
1917 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1918 if (m == NULL)
1919 return (1);
1920
1921 iiq = MFII_DMA_KVA(m);
1922 memset(iiq, 0, sizeof(*iiq));
1923
1924 iiq->function = MPII_FUNCTION_IOC_INIT;
1925 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1926
1927 iiq->msg_version_maj = 0x02;
1928 iiq->msg_version_min = 0x00;
1929 iiq->hdr_version_unit = 0x10;
1930 iiq->hdr_version_dev = 0x0;
1931
1932 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1933
1934 iiq->reply_descriptor_post_queue_depth =
1935 htole16(sc->sc_reply_postq_depth);
1936 iiq->reply_free_queue_depth = htole16(0);
1937
1938 iiq->sense_buffer_address_high = htole32(
1939 MFII_DMA_DVA(sc->sc_sense) >> 32);
1940
1941 iiq->reply_descriptor_post_queue_address_lo =
1942 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1943 iiq->reply_descriptor_post_queue_address_hi =
1944 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1945
1946 iiq->system_request_frame_base_address_lo =
1947 htole32(MFII_DMA_DVA(sc->sc_requests));
1948 iiq->system_request_frame_base_address_hi =
1949 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1950
1951 iiq->timestamp = htole64(time_uptime);
1952
1953 ccb = mfii_get_ccb(sc);
1954 if (ccb == NULL) {
1955 /* shouldn't ever run out of ccbs during attach */
1956 return (1);
1957 }
1958 mfii_scrub_ccb(ccb);
1959 init = ccb->ccb_request;
1960
1961 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1962 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1963 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1964 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1965
1966 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1967 0, MFII_DMA_LEN(sc->sc_reply_postq),
1968 BUS_DMASYNC_PREREAD);
1969
1970 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1971 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1972
1973 rv = mfii_mfa_poll(sc, ccb);
1974
1975 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1976 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1977
1978 mfii_put_ccb(sc, ccb);
1979 mfii_dmamem_free(sc, m);
1980
1981 return (rv);
1982 }
1983
1984 static int
1985 mfii_my_intr(struct mfii_softc *sc)
1986 {
1987 u_int32_t status;
1988
1989 status = mfii_read(sc, MFI_OSTS);
1990
1991 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1992 if (ISSET(status, 0x1)) {
1993 mfii_write(sc, MFI_OSTS, status);
1994 return (1);
1995 }
1996
1997 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1998 }
1999
2000 static int
2001 mfii_intr(void *arg)
2002 {
2003 struct mfii_softc *sc = arg;
2004
2005 if (!mfii_my_intr(sc))
2006 return (0);
2007
2008 mfii_postq(sc);
2009
2010 return (1);
2011 }
2012
2013 static void
2014 mfii_postq(struct mfii_softc *sc)
2015 {
2016 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2017 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2018 struct mpii_reply_descr *rdp;
2019 struct mfii_ccb *ccb;
2020 int rpi = 0;
2021
2022 mutex_enter(&sc->sc_reply_postq_mtx);
2023
2024 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2025 0, MFII_DMA_LEN(sc->sc_reply_postq),
2026 BUS_DMASYNC_POSTREAD);
2027
2028 for (;;) {
2029 rdp = &postq[sc->sc_reply_postq_index];
2030 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2031 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2032 rdp->data == 0xffffffff);
2033 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2034 MPII_REPLY_DESCR_UNUSED)
2035 break;
2036 if (rdp->data == 0xffffffff) {
2037 /*
2038 * ioc is still writing to the reply post queue
2039 * race condition - bail!
2040 */
2041 break;
2042 }
2043
2044 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2045 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2046 memset(rdp, 0xff, sizeof(*rdp));
2047
2048 sc->sc_reply_postq_index++;
2049 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2050 rpi = 1;
2051 }
2052
2053 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2054 0, MFII_DMA_LEN(sc->sc_reply_postq),
2055 BUS_DMASYNC_PREREAD);
2056
2057 if (rpi)
2058 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2059
2060 mutex_exit(&sc->sc_reply_postq_mtx);
2061
2062 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2063 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2064 mfii_done(sc, ccb);
2065 }
2066 }
2067
2068 static void
2069 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2070 void *arg)
2071 {
2072 struct scsipi_periph *periph;
2073 struct scsipi_xfer *xs;
2074 struct scsipi_adapter *adapt = chan->chan_adapter;
2075 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2076 struct mfii_ccb *ccb;
2077 int timeout;
2078 int target;
2079
2080 switch (req) {
2081 case ADAPTER_REQ_GROW_RESOURCES:
2082 /* Not supported. */
2083 return;
2084 case ADAPTER_REQ_SET_XFER_MODE:
2085 {
2086 struct scsipi_xfer_mode *xm = arg;
2087 xm->xm_mode = PERIPH_CAP_TQING;
2088 xm->xm_period = 0;
2089 xm->xm_offset = 0;
2090 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2091 return;
2092 }
2093 case ADAPTER_REQ_RUN_XFER:
2094 break;
2095 }
2096
2097 xs = arg;
2098 periph = xs->xs_periph;
2099 target = periph->periph_target;
2100
2101 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2102 periph->periph_lun != 0) {
2103 xs->error = XS_SELTIMEOUT;
2104 scsipi_done(xs);
2105 return;
2106 }
2107
2108 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2109 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2110 /* the cache is stable storage, don't flush */
2111 xs->error = XS_NOERROR;
2112 xs->status = SCSI_OK;
2113 xs->resid = 0;
2114 scsipi_done(xs);
2115 return;
2116 }
2117
2118 ccb = mfii_get_ccb(sc);
2119 if (ccb == NULL) {
2120 xs->error = XS_RESOURCE_SHORTAGE;
2121 scsipi_done(xs);
2122 return;
2123 }
2124 mfii_scrub_ccb(ccb);
2125 ccb->ccb_cookie = xs;
2126 ccb->ccb_done = mfii_scsi_cmd_done;
2127 ccb->ccb_data = xs->data;
2128 ccb->ccb_len = xs->datalen;
2129
2130 timeout = mstohz(xs->timeout);
2131 if (timeout == 0)
2132 timeout = 1;
2133 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2134
2135 switch (xs->cmd->opcode) {
2136 case SCSI_READ_6_COMMAND:
2137 case READ_10:
2138 case READ_12:
2139 case READ_16:
2140 case SCSI_WRITE_6_COMMAND:
2141 case WRITE_10:
2142 case WRITE_12:
2143 case WRITE_16:
2144 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2145 goto stuffup;
2146 break;
2147
2148 default:
2149 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2150 goto stuffup;
2151 break;
2152 }
2153
2154 xs->error = XS_NOERROR;
2155 xs->resid = 0;
2156
2157 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2158 xs->cmd->opcode);
2159
2160 if (xs->xs_control & XS_CTL_POLL) {
2161 if (mfii_poll(sc, ccb) != 0)
2162 goto stuffup;
2163 return;
2164 }
2165
2166 mfii_start(sc, ccb);
2167
2168 return;
2169
2170 stuffup:
2171 xs->error = XS_DRIVER_STUFFUP;
2172 scsipi_done(xs);
2173 mfii_put_ccb(sc, ccb);
2174 }
2175
2176 static void
2177 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2178 {
2179 struct scsipi_xfer *xs = ccb->ccb_cookie;
2180 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2181 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2182
2183 if (callout_stop(&xs->xs_callout) != 0)
2184 return;
2185
2186 switch (ctx->status) {
2187 case MFI_STAT_OK:
2188 break;
2189
2190 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2191 xs->error = XS_SENSE;
2192 memset(&xs->sense, 0, sizeof(xs->sense));
2193 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2194 break;
2195
2196 case MFI_STAT_LD_OFFLINE:
2197 case MFI_STAT_DEVICE_NOT_FOUND:
2198 xs->error = XS_SELTIMEOUT;
2199 break;
2200
2201 default:
2202 xs->error = XS_DRIVER_STUFFUP;
2203 break;
2204 }
2205
2206 scsipi_done(xs);
2207 mfii_put_ccb(sc, ccb);
2208 }
2209
2210 static int
2211 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2212 struct scsipi_xfer *xs)
2213 {
2214 struct scsipi_periph *periph = xs->xs_periph;
2215 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2216 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2217 int segs;
2218
2219 io->dev_handle = htole16(periph->periph_target);
2220 io->function = MFII_FUNCTION_LDIO_REQUEST;
2221 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2222 io->sgl_flags = htole16(0x02); /* XXX */
2223 io->sense_buffer_length = sizeof(xs->sense);
2224 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2225 io->data_length = htole32(xs->datalen);
2226 io->io_flags = htole16(xs->cmdlen);
2227 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2228 case XS_CTL_DATA_IN:
2229 ccb->ccb_direction = MFII_DATA_IN;
2230 io->direction = MPII_SCSIIO_DIR_READ;
2231 break;
2232 case XS_CTL_DATA_OUT:
2233 ccb->ccb_direction = MFII_DATA_OUT;
2234 io->direction = MPII_SCSIIO_DIR_WRITE;
2235 break;
2236 default:
2237 ccb->ccb_direction = MFII_DATA_NONE;
2238 io->direction = MPII_SCSIIO_DIR_NONE;
2239 break;
2240 }
2241 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2242
2243 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2244 ctx->timeout_value = htole16(0x14); /* XXX */
2245 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2246 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2247
2248 if (mfii_load_ccb(sc, ccb, ctx + 1,
2249 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2250 return (1);
2251
2252 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2253 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2254 switch (sc->sc_iop->num_sge_loc) {
2255 case MFII_IOP_NUM_SGE_LOC_ORIG:
2256 ctx->num_sge = segs;
2257 break;
2258 case MFII_IOP_NUM_SGE_LOC_35:
2259 /* 12 bit field, but we're only using the lower 8 */
2260 ctx->span_arm = segs;
2261 break;
2262 }
2263
2264 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2265 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2266
2267 return (0);
2268 }
2269
2270 static int
2271 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2272 struct scsipi_xfer *xs)
2273 {
2274 struct scsipi_periph *periph = xs->xs_periph;
2275 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2276 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2277
2278 io->dev_handle = htole16(periph->periph_target);
2279 io->function = MFII_FUNCTION_LDIO_REQUEST;
2280 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2281 io->sgl_flags = htole16(0x02); /* XXX */
2282 io->sense_buffer_length = sizeof(xs->sense);
2283 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2284 io->data_length = htole32(xs->datalen);
2285 io->io_flags = htole16(xs->cmdlen);
2286 io->lun[0] = htobe16(periph->periph_lun);
2287 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2288 case XS_CTL_DATA_IN:
2289 ccb->ccb_direction = MFII_DATA_IN;
2290 io->direction = MPII_SCSIIO_DIR_READ;
2291 break;
2292 case XS_CTL_DATA_OUT:
2293 ccb->ccb_direction = MFII_DATA_OUT;
2294 io->direction = MPII_SCSIIO_DIR_WRITE;
2295 break;
2296 default:
2297 ccb->ccb_direction = MFII_DATA_NONE;
2298 io->direction = MPII_SCSIIO_DIR_NONE;
2299 break;
2300 }
2301 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2302
2303 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2304
2305 if (mfii_load_ccb(sc, ccb, ctx + 1,
2306 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2307 return (1);
2308
2309 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2310 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2311
2312 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2313 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2314
2315 return (0);
2316 }
2317
2318 #if 0
2319 void
2320 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2321 {
2322 struct scsi_link *link = xs->sc_link;
2323 struct mfii_softc *sc = link->adapter_softc;
2324 struct mfii_ccb *ccb = xs->io;
2325
2326 mfii_scrub_ccb(ccb);
2327 ccb->ccb_cookie = xs;
2328 ccb->ccb_done = mfii_scsi_cmd_done;
2329 ccb->ccb_data = xs->data;
2330 ccb->ccb_len = xs->datalen;
2331
2332 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2333
2334 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2335 if (xs->error != XS_NOERROR)
2336 goto done;
2337
2338 xs->resid = 0;
2339
2340 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2341 if (mfii_poll(sc, ccb) != 0)
2342 goto stuffup;
2343 return;
2344 }
2345
2346 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2347 mfii_start(sc, ccb);
2348
2349 return;
2350
2351 stuffup:
2352 xs->error = XS_DRIVER_STUFFUP;
2353 done:
2354 scsi_done(xs);
2355 }
2356
2357 int
2358 mfii_pd_scsi_probe(struct scsi_link *link)
2359 {
2360 struct mfii_softc *sc = link->adapter_softc;
2361 struct mfi_pd_details mpd;
2362 union mfi_mbox mbox;
2363 int rv;
2364
2365 if (link->lun > 0)
2366 return (0);
2367
2368 memset(&mbox, 0, sizeof(mbox));
2369 mbox.s[0] = htole16(link->target);
2370
2371 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2372 MFII_DATA_IN, true);
2373 if (rv != 0)
2374 return (EIO);
2375
2376 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2377 return (ENXIO);
2378
2379 return (0);
2380 }
2381
2382 int
2383 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2384 struct scsipi_xfer *xs)
2385 {
2386 struct scsi_link *link = xs->sc_link;
2387 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2388 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2389 uint16_t dev_handle;
2390
2391 dev_handle = mfii_dev_handle(sc, link->target);
2392 if (dev_handle == htole16(0xffff))
2393 return (XS_SELTIMEOUT);
2394
2395 io->dev_handle = dev_handle;
2396 io->function = 0;
2397 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2398 io->sgl_flags = htole16(0x02); /* XXX */
2399 io->sense_buffer_length = sizeof(xs->sense);
2400 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2401 io->data_length = htole32(xs->datalen);
2402 io->io_flags = htole16(xs->cmdlen);
2403 io->lun[0] = htobe16(link->lun);
2404 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2405 case XS_CTL_DATA_IN:
2406 ccb->ccb_direction = MFII_DATA_IN;
2407 io->direction = MPII_SCSIIO_DIR_READ;
2408 break;
2409 case XS_CTL_DATA_OUT:
2410 ccb->ccb_direction = MFII_DATA_OUT;
2411 io->direction = MPII_SCSIIO_DIR_WRITE;
2412 break;
2413 default:
2414 ccb->ccb_direction = MFII_DATA_NONE;
2415 io->direction = MPII_SCSIIO_DIR_NONE;
2416 break;
2417 }
2418 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2419
2420 ctx->virtual_disk_target_id = htole16(link->target);
2421 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2422 ctx->timeout_value = sc->sc_pd->pd_timeout;
2423
2424 if (mfii_load_ccb(sc, ccb, ctx + 1,
2425 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2426 return (XS_DRIVER_STUFFUP);
2427
2428 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2429 KASSERT(ccb->ccb_dma64);
2430
2431 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2432 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2433 ccb->ccb_req.dev_handle = dev_handle;
2434
2435 return (XS_NOERROR);
2436 }
2437 #endif
2438
2439 static int
2440 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2441 int nosleep)
2442 {
2443 struct mpii_msg_request *req = ccb->ccb_request;
2444 struct mfii_sge *sge = NULL, *nsge = sglp;
2445 struct mfii_sge *ce = NULL;
2446 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2447 u_int space;
2448 int i;
2449
2450 int error;
2451
2452 if (ccb->ccb_len == 0)
2453 return (0);
2454
2455 ccb->ccb_dma64 = true;
2456 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2457 ccb->ccb_data, ccb->ccb_len, NULL,
2458 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2459 if (error) {
2460 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2461 return (1);
2462 }
2463
2464 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2465 sizeof(*nsge);
2466 if (dmap->dm_nsegs > space) {
2467 space--;
2468
2469 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2470 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2471
2472 ce = nsge + space;
2473 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2474 ce->sg_len = htole32(ccb->ccb_sgl_len);
2475 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2476
2477 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2478 }
2479
2480 for (i = 0; i < dmap->dm_nsegs; i++) {
2481 if (nsge == ce)
2482 nsge = ccb->ccb_sgl;
2483
2484 sge = nsge;
2485
2486 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2487 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2488 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2489
2490 nsge = sge + 1;
2491 }
2492 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2493
2494 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2495 ccb->ccb_direction == MFII_DATA_OUT ?
2496 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2497
2498 if (ccb->ccb_sgl_len > 0) {
2499 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2500 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2501 BUS_DMASYNC_PREWRITE);
2502 }
2503
2504 return (0);
2505 }
2506
2507 static void
2508 mfii_scsi_cmd_tmo(void *p)
2509 {
2510 struct mfii_ccb *ccb = p;
2511 struct mfii_softc *sc = ccb->ccb_sc;
2512 bool start_abort;
2513
2514 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2515
2516 mutex_enter(&sc->sc_abort_mtx);
2517 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2518 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2519 if (start_abort)
2520 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2521 mutex_exit(&sc->sc_abort_mtx);
2522 }
2523
2524 static void
2525 mfii_abort_task(struct work *wk, void *scp)
2526 {
2527 struct mfii_softc *sc = scp;
2528 struct mfii_ccb *list;
2529
2530 mutex_enter(&sc->sc_abort_mtx);
2531 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2532 SIMPLEQ_INIT(&sc->sc_abort_list);
2533 mutex_exit(&sc->sc_abort_mtx);
2534
2535 while (list != NULL) {
2536 struct mfii_ccb *ccb = list;
2537 struct scsipi_xfer *xs = ccb->ccb_cookie;
2538 struct scsipi_periph *periph = xs->xs_periph;
2539 struct mfii_ccb *accb;
2540
2541 list = SIMPLEQ_NEXT(ccb, ccb_link);
2542
2543 if (!sc->sc_ld[periph->periph_target].ld_present) {
2544 /* device is gone */
2545 xs->error = XS_SELTIMEOUT;
2546 scsipi_done(xs);
2547 mfii_put_ccb(sc, ccb);
2548 continue;
2549 }
2550
2551 accb = mfii_get_ccb(sc);
2552 mfii_scrub_ccb(accb);
2553 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2554 MPII_SCSI_TASK_ABORT_TASK,
2555 htole32(MFII_TASK_MGMT_FLAGS_PD));
2556
2557 accb->ccb_cookie = ccb;
2558 accb->ccb_done = mfii_scsi_cmd_abort_done;
2559
2560 mfii_start(sc, accb);
2561 }
2562 }
2563
2564 static void
2565 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2566 uint16_t smid, uint8_t type, uint32_t flags)
2567 {
2568 struct mfii_task_mgmt *msg;
2569 struct mpii_msg_scsi_task_request *req;
2570
2571 msg = accb->ccb_request;
2572 req = &msg->mpii_request;
2573 req->dev_handle = dev_handle;
2574 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2575 req->task_type = type;
2576 req->task_mid = htole16( smid);
2577 msg->flags = flags;
2578
2579 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2580 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2581 }
2582
2583 static void
2584 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2585 {
2586 struct mfii_ccb *ccb = accb->ccb_cookie;
2587 struct scsipi_xfer *xs = ccb->ccb_cookie;
2588
2589 /* XXX check accb completion? */
2590
2591 mfii_put_ccb(sc, accb);
2592 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2593
2594 xs->error = XS_TIMEOUT;
2595 scsipi_done(xs);
2596 mfii_put_ccb(sc, ccb);
2597 }
2598
2599 static struct mfii_ccb *
2600 mfii_get_ccb(struct mfii_softc *sc)
2601 {
2602 struct mfii_ccb *ccb;
2603
2604 mutex_enter(&sc->sc_ccb_mtx);
2605 if (!sc->sc_running) {
2606 ccb = NULL;
2607 } else {
2608 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2609 if (ccb != NULL)
2610 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2611 }
2612 mutex_exit(&sc->sc_ccb_mtx);
2613 return (ccb);
2614 }
2615
2616 static void
2617 mfii_scrub_ccb(struct mfii_ccb *ccb)
2618 {
2619 ccb->ccb_cookie = NULL;
2620 ccb->ccb_done = NULL;
2621 ccb->ccb_flags = 0;
2622 ccb->ccb_data = NULL;
2623 ccb->ccb_direction = MFII_DATA_NONE;
2624 ccb->ccb_dma64 = false;
2625 ccb->ccb_len = 0;
2626 ccb->ccb_sgl_len = 0;
2627 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2628 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2629 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2630 }
2631
2632 static void
2633 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2634 {
2635 mutex_enter(&sc->sc_ccb_mtx);
2636 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2637 mutex_exit(&sc->sc_ccb_mtx);
2638 }
2639
2640 static int
2641 mfii_init_ccb(struct mfii_softc *sc)
2642 {
2643 struct mfii_ccb *ccb;
2644 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2645 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2646 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2647 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2648 u_int i;
2649 int error;
2650
2651 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2652 M_DEVBUF, M_WAITOK|M_ZERO);
2653
2654 for (i = 0; i < sc->sc_max_cmds; i++) {
2655 ccb = &sc->sc_ccb[i];
2656 ccb->ccb_sc = sc;
2657
2658 /* create a dma map for transfer */
2659 error = bus_dmamap_create(sc->sc_dmat,
2660 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2661 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2662 if (error) {
2663 printf("%s: cannot create ccb dmamap32 (%d)\n",
2664 DEVNAME(sc), error);
2665 goto destroy;
2666 }
2667 error = bus_dmamap_create(sc->sc_dmat64,
2668 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2669 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2670 if (error) {
2671 printf("%s: cannot create ccb dmamap64 (%d)\n",
2672 DEVNAME(sc), error);
2673 goto destroy32;
2674 }
2675
2676 /* select i + 1'th request. 0 is reserved for events */
2677 ccb->ccb_smid = i + 1;
2678 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2679 ccb->ccb_request = request + ccb->ccb_request_offset;
2680 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2681 ccb->ccb_request_offset;
2682
2683 /* select i'th MFI command frame */
2684 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2685 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2686 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2687 ccb->ccb_mfi_offset;
2688
2689 /* select i'th sense */
2690 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2691 ccb->ccb_sense = (struct mfi_sense *)(sense +
2692 ccb->ccb_sense_offset);
2693 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2694 ccb->ccb_sense_offset;
2695
2696 /* select i'th sgl */
2697 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2698 sc->sc_max_sgl * i;
2699 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2700 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2701 ccb->ccb_sgl_offset;
2702
2703 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2704 cv_init(&ccb->ccb_cv, "mfiiexec");
2705
2706 /* add ccb to queue */
2707 mfii_put_ccb(sc, ccb);
2708 }
2709
2710 return (0);
2711
2712 destroy32:
2713 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2714 destroy:
2715 /* free dma maps and ccb memory */
2716 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2717 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2718 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2719 }
2720
2721 free(sc->sc_ccb, M_DEVBUF);
2722
2723 return (1);
2724 }
2725
2726 #if NBIO > 0
2727 static int
2728 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2729 {
2730 struct mfii_softc *sc = device_private(dev);
2731 int error = 0;
2732
2733 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2734
2735 mutex_enter(&sc->sc_lock);
2736
2737 switch (cmd) {
2738 case BIOCINQ:
2739 DNPRINTF(MFII_D_IOCTL, "inq\n");
2740 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2741 break;
2742
2743 case BIOCVOL:
2744 DNPRINTF(MFII_D_IOCTL, "vol\n");
2745 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2746 break;
2747
2748 case BIOCDISK:
2749 DNPRINTF(MFII_D_IOCTL, "disk\n");
2750 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2751 break;
2752
2753 case BIOCALARM:
2754 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2755 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2756 break;
2757
2758 case BIOCBLINK:
2759 DNPRINTF(MFII_D_IOCTL, "blink\n");
2760 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2761 break;
2762
2763 case BIOCSETSTATE:
2764 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2765 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2766 break;
2767
2768 #if 0
2769 case BIOCPATROL:
2770 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2771 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2772 break;
2773 #endif
2774
2775 default:
2776 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2777 error = ENOTTY;
2778 }
2779
2780 mutex_exit(&sc->sc_lock);
2781
2782 return (error);
2783 }
2784
2785 static int
2786 mfii_bio_getitall(struct mfii_softc *sc)
2787 {
2788 int i, d, rv = EINVAL;
2789 size_t size;
2790 union mfi_mbox mbox;
2791 struct mfi_conf *cfg = NULL;
2792 struct mfi_ld_details *ld_det = NULL;
2793
2794 /* get info */
2795 if (mfii_get_info(sc)) {
2796 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2797 DEVNAME(sc));
2798 goto done;
2799 }
2800
2801 /* send single element command to retrieve size for full structure */
2802 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2803 if (cfg == NULL)
2804 goto done;
2805 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2806 MFII_DATA_IN, false)) {
2807 free(cfg, M_DEVBUF);
2808 goto done;
2809 }
2810
2811 size = cfg->mfc_size;
2812 free(cfg, M_DEVBUF);
2813
2814 /* memory for read config */
2815 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2816 if (cfg == NULL)
2817 goto done;
2818 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2819 MFII_DATA_IN, false)) {
2820 free(cfg, M_DEVBUF);
2821 goto done;
2822 }
2823
2824 /* replace current pointer with new one */
2825 if (sc->sc_cfg)
2826 free(sc->sc_cfg, M_DEVBUF);
2827 sc->sc_cfg = cfg;
2828
2829 /* get all ld info */
2830 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2831 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2832 goto done;
2833
2834 /* get memory for all ld structures */
2835 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2836 if (sc->sc_ld_sz != size) {
2837 if (sc->sc_ld_details)
2838 free(sc->sc_ld_details, M_DEVBUF);
2839
2840 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2841 if (ld_det == NULL)
2842 goto done;
2843 sc->sc_ld_sz = size;
2844 sc->sc_ld_details = ld_det;
2845 }
2846
2847 /* find used physical disks */
2848 size = sizeof(struct mfi_ld_details);
2849 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2850 memset(&mbox, 0, sizeof(mbox));
2851 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2852 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2853 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2854 goto done;
2855
2856 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2857 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2858 }
2859 sc->sc_no_pd = d;
2860
2861 rv = 0;
2862 done:
2863 return (rv);
2864 }
2865
2866 static int
2867 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2868 {
2869 int rv = EINVAL;
2870 struct mfi_conf *cfg = NULL;
2871
2872 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2873
2874 if (mfii_bio_getitall(sc)) {
2875 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2876 DEVNAME(sc));
2877 goto done;
2878 }
2879
2880 /* count unused disks as volumes */
2881 if (sc->sc_cfg == NULL)
2882 goto done;
2883 cfg = sc->sc_cfg;
2884
2885 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2886 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2887 #if notyet
2888 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2889 (bi->bi_nodisk - sc->sc_no_pd);
2890 #endif
2891 /* tell bio who we are */
2892 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2893
2894 rv = 0;
2895 done:
2896 return (rv);
2897 }
2898
2899 static int
2900 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2901 {
2902 int i, per, rv = EINVAL;
2903
2904 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2905 DEVNAME(sc), bv->bv_volid);
2906
2907 /* we really could skip and expect that inq took care of it */
2908 if (mfii_bio_getitall(sc)) {
2909 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2910 DEVNAME(sc));
2911 goto done;
2912 }
2913
2914 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2915 /* go do hotspares & unused disks */
2916 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2917 goto done;
2918 }
2919
2920 i = bv->bv_volid;
2921 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2922 sizeof(bv->bv_dev));
2923
2924 switch (sc->sc_ld_list.mll_list[i].mll_state) {
2925 case MFI_LD_OFFLINE:
2926 bv->bv_status = BIOC_SVOFFLINE;
2927 break;
2928
2929 case MFI_LD_PART_DEGRADED:
2930 case MFI_LD_DEGRADED:
2931 bv->bv_status = BIOC_SVDEGRADED;
2932 break;
2933
2934 case MFI_LD_ONLINE:
2935 bv->bv_status = BIOC_SVONLINE;
2936 break;
2937
2938 default:
2939 bv->bv_status = BIOC_SVINVALID;
2940 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2941 DEVNAME(sc),
2942 sc->sc_ld_list.mll_list[i].mll_state);
2943 }
2944
2945 /* additional status can modify MFI status */
2946 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2947 case MFI_LD_PROG_CC:
2948 bv->bv_status = BIOC_SVSCRUB;
2949 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2950 bv->bv_percent = (per * 100) / 0xffff;
2951 bv->bv_seconds =
2952 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2953 break;
2954
2955 case MFI_LD_PROG_BGI:
2956 bv->bv_status = BIOC_SVSCRUB;
2957 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
2958 bv->bv_percent = (per * 100) / 0xffff;
2959 bv->bv_seconds =
2960 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
2961 break;
2962
2963 case MFI_LD_PROG_FGI:
2964 case MFI_LD_PROG_RECONSTRUCT:
2965 /* nothing yet */
2966 break;
2967 }
2968
2969 #if 0
2970 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2971 bv->bv_cache = BIOC_CVWRITEBACK;
2972 else
2973 bv->bv_cache = BIOC_CVWRITETHROUGH;
2974 #endif
2975
2976 /*
2977 * The RAID levels are determined per the SNIA DDF spec, this is only
2978 * a subset that is valid for the MFI controller.
2979 */
2980 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2981 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2982 bv->bv_level *= 10;
2983
2984 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2985 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2986
2987 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2988 bv->bv_stripe_size =
2989 (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
2990 / 1024; /* in KB */
2991
2992 rv = 0;
2993 done:
2994 return (rv);
2995 }
2996
2997 static int
2998 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2999 {
3000 struct mfi_conf *cfg;
3001 struct mfi_array *ar;
3002 struct mfi_ld_cfg *ld;
3003 struct mfi_pd_details *pd;
3004 struct mfi_pd_list *pl;
3005 struct scsipi_inquiry_data *inqbuf;
3006 char vend[8+16+4+1], *vendp;
3007 int i, rv = EINVAL;
3008 int arr, vol, disk, span;
3009 union mfi_mbox mbox;
3010
3011 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3012 DEVNAME(sc), bd->bd_diskid);
3013
3014 /* we really could skip and expect that inq took care of it */
3015 if (mfii_bio_getitall(sc)) {
3016 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3017 DEVNAME(sc));
3018 return (rv);
3019 }
3020 cfg = sc->sc_cfg;
3021
3022 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3023 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3024
3025 ar = cfg->mfc_array;
3026 vol = bd->bd_volid;
3027 if (vol >= cfg->mfc_no_ld) {
3028 /* do hotspares */
3029 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3030 goto freeme;
3031 }
3032
3033 /* calculate offset to ld structure */
3034 ld = (struct mfi_ld_cfg *)(
3035 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3036 cfg->mfc_array_size * cfg->mfc_no_array);
3037
3038 /* use span 0 only when raid group is not spanned */
3039 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3040 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3041 else
3042 span = 0;
3043 arr = ld[vol].mlc_span[span].mls_index;
3044
3045 /* offset disk into pd list */
3046 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3047
3048 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3049 /* disk is missing but succeed command */
3050 bd->bd_status = BIOC_SDFAILED;
3051 rv = 0;
3052
3053 /* try to find an unused disk for the target to rebuild */
3054 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3055 MFII_DATA_IN, false))
3056 goto freeme;
3057
3058 for (i = 0; i < pl->mpl_no_pd; i++) {
3059 if (pl->mpl_address[i].mpa_scsi_type != 0)
3060 continue;
3061
3062 memset(&mbox, 0, sizeof(mbox));
3063 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3064 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3065 pd, sizeof(*pd), MFII_DATA_IN, false))
3066 continue;
3067
3068 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3069 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3070 break;
3071 }
3072
3073 if (i == pl->mpl_no_pd)
3074 goto freeme;
3075 } else {
3076 memset(&mbox, 0, sizeof(mbox));
3077 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3078 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3079 MFII_DATA_IN, false)) {
3080 bd->bd_status = BIOC_SDINVALID;
3081 goto freeme;
3082 }
3083 }
3084
3085 /* get the remaining fields */
3086 bd->bd_channel = pd->mpd_enc_idx;
3087 bd->bd_target = pd->mpd_enc_slot;
3088
3089 /* get status */
3090 switch (pd->mpd_fw_state){
3091 case MFI_PD_UNCONFIG_GOOD:
3092 case MFI_PD_UNCONFIG_BAD:
3093 bd->bd_status = BIOC_SDUNUSED;
3094 break;
3095
3096 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3097 bd->bd_status = BIOC_SDHOTSPARE;
3098 break;
3099
3100 case MFI_PD_OFFLINE:
3101 bd->bd_status = BIOC_SDOFFLINE;
3102 break;
3103
3104 case MFI_PD_FAILED:
3105 bd->bd_status = BIOC_SDFAILED;
3106 break;
3107
3108 case MFI_PD_REBUILD:
3109 bd->bd_status = BIOC_SDREBUILD;
3110 break;
3111
3112 case MFI_PD_ONLINE:
3113 bd->bd_status = BIOC_SDONLINE;
3114 break;
3115
3116 case MFI_PD_COPYBACK:
3117 case MFI_PD_SYSTEM:
3118 default:
3119 bd->bd_status = BIOC_SDINVALID;
3120 break;
3121 }
3122
3123 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3124
3125 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3126 vendp = inqbuf->vendor;
3127 memcpy(vend, vendp, sizeof vend - 1);
3128 vend[sizeof vend - 1] = '\0';
3129 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3130
3131 /* XXX find a way to retrieve serial nr from drive */
3132 /* XXX find a way to get bd_procdev */
3133
3134 #if 0
3135 mfp = &pd->mpd_progress;
3136 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3137 mp = &mfp->mfp_patrol_read;
3138 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3139 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3140 }
3141 #endif
3142
3143 rv = 0;
3144 freeme:
3145 free(pd, M_DEVBUF);
3146 free(pl, M_DEVBUF);
3147
3148 return (rv);
3149 }
3150
3151 static int
3152 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3153 {
3154 uint32_t opc;
3155 int rv = 0;
3156 int8_t ret;
3157 mfii_direction_t dir = MFII_DATA_NONE;
3158
3159 switch (ba->ba_opcode) {
3160 case BIOC_SADISABLE:
3161 opc = MR_DCMD_SPEAKER_DISABLE;
3162 break;
3163
3164 case BIOC_SAENABLE:
3165 opc = MR_DCMD_SPEAKER_ENABLE;
3166 break;
3167
3168 case BIOC_SASILENCE:
3169 opc = MR_DCMD_SPEAKER_SILENCE;
3170 break;
3171
3172 case BIOC_GASTATUS:
3173 opc = MR_DCMD_SPEAKER_GET;
3174 dir = MFII_DATA_IN;
3175 break;
3176
3177 case BIOC_SATEST:
3178 opc = MR_DCMD_SPEAKER_TEST;
3179 break;
3180
3181 default:
3182 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3183 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3184 return (EINVAL);
3185 }
3186
3187 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3188 rv = EINVAL;
3189 else
3190 if (ba->ba_opcode == BIOC_GASTATUS)
3191 ba->ba_status = ret;
3192 else
3193 ba->ba_status = 0;
3194
3195 return (rv);
3196 }
3197
3198 static int
3199 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3200 {
3201 int i, found, rv = EINVAL;
3202 union mfi_mbox mbox;
3203 uint32_t cmd;
3204 struct mfi_pd_list *pd;
3205
3206 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3207 bb->bb_status);
3208
3209 /* channel 0 means not in an enclosure so can't be blinked */
3210 if (bb->bb_channel == 0)
3211 return (EINVAL);
3212
3213 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3214
3215 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3216 MFII_DATA_IN, false))
3217 goto done;
3218
3219 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3220 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3221 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3222 found = 1;
3223 break;
3224 }
3225
3226 if (!found)
3227 goto done;
3228
3229 memset(&mbox, 0, sizeof(mbox));
3230 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3231
3232 switch (bb->bb_status) {
3233 case BIOC_SBUNBLINK:
3234 cmd = MR_DCMD_PD_UNBLINK;
3235 break;
3236
3237 case BIOC_SBBLINK:
3238 cmd = MR_DCMD_PD_BLINK;
3239 break;
3240
3241 case BIOC_SBALARM:
3242 default:
3243 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3244 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3245 goto done;
3246 }
3247
3248
3249 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3250 goto done;
3251
3252 rv = 0;
3253 done:
3254 free(pd, M_DEVBUF);
3255 return (rv);
3256 }
3257
3258 static int
3259 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3260 {
3261 struct mfii_foreign_scan_info *fsi;
3262 struct mfi_pd_details *pd;
3263 union mfi_mbox mbox;
3264 int rv;
3265
3266 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3267 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3268
3269 memset(&mbox, 0, sizeof mbox);
3270 mbox.s[0] = pd_id;
3271 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3272 MFII_DATA_IN, false);
3273 if (rv != 0)
3274 goto done;
3275
3276 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3277 mbox.s[0] = pd_id;
3278 mbox.s[1] = pd->mpd_pd.mfp_seq;
3279 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3280 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3281 MFII_DATA_NONE, false);
3282 if (rv != 0)
3283 goto done;
3284 }
3285
3286 memset(&mbox, 0, sizeof mbox);
3287 mbox.s[0] = pd_id;
3288 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3289 MFII_DATA_IN, false);
3290 if (rv != 0)
3291 goto done;
3292
3293 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3294 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3295 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3296 if (rv != 0)
3297 goto done;
3298
3299 if (fsi->count > 0) {
3300 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3301 NULL, 0, MFII_DATA_NONE, false);
3302 if (rv != 0)
3303 goto done;
3304 }
3305 }
3306
3307 memset(&mbox, 0, sizeof mbox);
3308 mbox.s[0] = pd_id;
3309 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3310 MFII_DATA_IN, false);
3311 if (rv != 0)
3312 goto done;
3313
3314 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3315 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3316 rv = ENXIO;
3317
3318 done:
3319 free(fsi, M_DEVBUF);
3320 free(pd, M_DEVBUF);
3321
3322 return (rv);
3323 }
3324
3325 static int
3326 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3327 {
3328 struct mfi_hotspare *hs;
3329 struct mfi_pd_details *pd;
3330 union mfi_mbox mbox;
3331 size_t size;
3332 int rv = EINVAL;
3333
3334 /* we really could skip and expect that inq took care of it */
3335 if (mfii_bio_getitall(sc)) {
3336 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3337 DEVNAME(sc));
3338 return (rv);
3339 }
3340 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3341
3342 hs = malloc(size, M_DEVBUF, M_WAITOK);
3343 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3344
3345 memset(&mbox, 0, sizeof mbox);
3346 mbox.s[0] = pd_id;
3347 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3348 MFII_DATA_IN, false);
3349 if (rv != 0)
3350 goto done;
3351
3352 memset(hs, 0, size);
3353 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3354 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3355 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3356 MFII_DATA_OUT, false);
3357
3358 done:
3359 free(hs, M_DEVBUF);
3360 free(pd, M_DEVBUF);
3361
3362 return (rv);
3363 }
3364
3365 static int
3366 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3367 {
3368 struct mfi_pd_details *pd;
3369 struct mfi_pd_list *pl;
3370 int i, found, rv = EINVAL;
3371 union mfi_mbox mbox;
3372
3373 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3374 bs->bs_status);
3375
3376 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3377 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3378
3379 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3380 MFII_DATA_IN, false))
3381 goto done;
3382
3383 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3384 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3385 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3386 found = 1;
3387 break;
3388 }
3389
3390 if (!found)
3391 goto done;
3392
3393 memset(&mbox, 0, sizeof(mbox));
3394 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3395
3396 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3397 MFII_DATA_IN, false))
3398 goto done;
3399
3400 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3401 mbox.s[1] = pd->mpd_pd.mfp_seq;
3402
3403 switch (bs->bs_status) {
3404 case BIOC_SSONLINE:
3405 mbox.b[4] = MFI_PD_ONLINE;
3406 break;
3407
3408 case BIOC_SSOFFLINE:
3409 mbox.b[4] = MFI_PD_OFFLINE;
3410 break;
3411
3412 case BIOC_SSHOTSPARE:
3413 mbox.b[4] = MFI_PD_HOTSPARE;
3414 break;
3415
3416 case BIOC_SSREBUILD:
3417 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3418 if ((rv = mfii_makegood(sc,
3419 pl->mpl_address[i].mpa_pd_id)))
3420 goto done;
3421
3422 if ((rv = mfii_makespare(sc,
3423 pl->mpl_address[i].mpa_pd_id)))
3424 goto done;
3425
3426 memset(&mbox, 0, sizeof(mbox));
3427 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3428 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3429 pd, sizeof(*pd), MFII_DATA_IN, false);
3430 if (rv != 0)
3431 goto done;
3432
3433 /* rebuilding might be started by mfii_makespare() */
3434 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3435 rv = 0;
3436 goto done;
3437 }
3438
3439 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3440 mbox.s[1] = pd->mpd_pd.mfp_seq;
3441 }
3442 mbox.b[4] = MFI_PD_REBUILD;
3443 break;
3444
3445 default:
3446 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3447 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3448 goto done;
3449 }
3450
3451
3452 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3453 MFII_DATA_NONE, false);
3454 done:
3455 free(pd, M_DEVBUF);
3456 free(pl, M_DEVBUF);
3457 return (rv);
3458 }
3459
3460 #if 0
3461 int
3462 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3463 {
3464 uint32_t opc;
3465 int rv = 0;
3466 struct mfi_pr_properties prop;
3467 struct mfi_pr_status status;
3468 uint32_t time, exec_freq;
3469
3470 switch (bp->bp_opcode) {
3471 case BIOC_SPSTOP:
3472 case BIOC_SPSTART:
3473 if (bp->bp_opcode == BIOC_SPSTART)
3474 opc = MR_DCMD_PR_START;
3475 else
3476 opc = MR_DCMD_PR_STOP;
3477 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3478 return (EINVAL);
3479 break;
3480
3481 case BIOC_SPMANUAL:
3482 case BIOC_SPDISABLE:
3483 case BIOC_SPAUTO:
3484 /* Get device's time. */
3485 opc = MR_DCMD_TIME_SECS_GET;
3486 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3487 MFII_DATA_IN, false))
3488 return (EINVAL);
3489
3490 opc = MR_DCMD_PR_GET_PROPERTIES;
3491 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3492 MFII_DATA_IN, false))
3493 return (EINVAL);
3494
3495 switch (bp->bp_opcode) {
3496 case BIOC_SPMANUAL:
3497 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3498 break;
3499 case BIOC_SPDISABLE:
3500 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3501 break;
3502 case BIOC_SPAUTO:
3503 if (bp->bp_autoival != 0) {
3504 if (bp->bp_autoival == -1)
3505 /* continuously */
3506 exec_freq = 0xffffffffU;
3507 else if (bp->bp_autoival > 0)
3508 exec_freq = bp->bp_autoival;
3509 else
3510 return (EINVAL);
3511 prop.exec_freq = exec_freq;
3512 }
3513 if (bp->bp_autonext != 0) {
3514 if (bp->bp_autonext < 0)
3515 return (EINVAL);
3516 else
3517 prop.next_exec = time + bp->bp_autonext;
3518 }
3519 prop.op_mode = MFI_PR_OPMODE_AUTO;
3520 break;
3521 }
3522
3523 opc = MR_DCMD_PR_SET_PROPERTIES;
3524 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3525 MFII_DATA_OUT, false))
3526 return (EINVAL);
3527
3528 break;
3529
3530 case BIOC_GPSTATUS:
3531 opc = MR_DCMD_PR_GET_PROPERTIES;
3532 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3533 MFII_DATA_IN, false))
3534 return (EINVAL);
3535
3536 opc = MR_DCMD_PR_GET_STATUS;
3537 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3538 MFII_DATA_IN, false))
3539 return (EINVAL);
3540
3541 /* Get device's time. */
3542 opc = MR_DCMD_TIME_SECS_GET;
3543 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3544 MFII_DATA_IN, false))
3545 return (EINVAL);
3546
3547 switch (prop.op_mode) {
3548 case MFI_PR_OPMODE_AUTO:
3549 bp->bp_mode = BIOC_SPMAUTO;
3550 bp->bp_autoival = prop.exec_freq;
3551 bp->bp_autonext = prop.next_exec;
3552 bp->bp_autonow = time;
3553 break;
3554 case MFI_PR_OPMODE_MANUAL:
3555 bp->bp_mode = BIOC_SPMMANUAL;
3556 break;
3557 case MFI_PR_OPMODE_DISABLED:
3558 bp->bp_mode = BIOC_SPMDISABLED;
3559 break;
3560 default:
3561 printf("%s: unknown patrol mode %d\n",
3562 DEVNAME(sc), prop.op_mode);
3563 break;
3564 }
3565
3566 switch (status.state) {
3567 case MFI_PR_STATE_STOPPED:
3568 bp->bp_status = BIOC_SPSSTOPPED;
3569 break;
3570 case MFI_PR_STATE_READY:
3571 bp->bp_status = BIOC_SPSREADY;
3572 break;
3573 case MFI_PR_STATE_ACTIVE:
3574 bp->bp_status = BIOC_SPSACTIVE;
3575 break;
3576 case MFI_PR_STATE_ABORTED:
3577 bp->bp_status = BIOC_SPSABORTED;
3578 break;
3579 default:
3580 printf("%s: unknown patrol state %d\n",
3581 DEVNAME(sc), status.state);
3582 break;
3583 }
3584
3585 break;
3586
3587 default:
3588 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3589 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3590 return (EINVAL);
3591 }
3592
3593 return (rv);
3594 }
3595 #endif
3596
3597 static int
3598 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3599 {
3600 struct mfi_conf *cfg;
3601 struct mfi_hotspare *hs;
3602 struct mfi_pd_details *pd;
3603 struct bioc_disk *sdhs;
3604 struct bioc_vol *vdhs;
3605 struct scsipi_inquiry_data *inqbuf;
3606 char vend[8+16+4+1], *vendp;
3607 int i, rv = EINVAL;
3608 uint32_t size;
3609 union mfi_mbox mbox;
3610
3611 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3612
3613 if (!bio_hs)
3614 return (EINVAL);
3615
3616 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3617
3618 /* send single element command to retrieve size for full structure */
3619 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3620 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3621 MFII_DATA_IN, false))
3622 goto freeme;
3623
3624 size = cfg->mfc_size;
3625 free(cfg, M_DEVBUF);
3626
3627 /* memory for read config */
3628 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3629 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3630 MFII_DATA_IN, false))
3631 goto freeme;
3632
3633 /* calculate offset to hs structure */
3634 hs = (struct mfi_hotspare *)(
3635 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3636 cfg->mfc_array_size * cfg->mfc_no_array +
3637 cfg->mfc_ld_size * cfg->mfc_no_ld);
3638
3639 if (volid < cfg->mfc_no_ld)
3640 goto freeme; /* not a hotspare */
3641
3642 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3643 goto freeme; /* not a hotspare */
3644
3645 /* offset into hotspare structure */
3646 i = volid - cfg->mfc_no_ld;
3647
3648 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3649 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3650 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3651
3652 /* get pd fields */
3653 memset(&mbox, 0, sizeof(mbox));
3654 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3655 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3656 MFII_DATA_IN, false)) {
3657 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3658 DEVNAME(sc));
3659 goto freeme;
3660 }
3661
3662 switch (type) {
3663 case MFI_MGMT_VD:
3664 vdhs = bio_hs;
3665 vdhs->bv_status = BIOC_SVONLINE;
3666 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3667 vdhs->bv_level = -1; /* hotspare */
3668 vdhs->bv_nodisk = 1;
3669 break;
3670
3671 case MFI_MGMT_SD:
3672 sdhs = bio_hs;
3673 sdhs->bd_status = BIOC_SDHOTSPARE;
3674 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3675 sdhs->bd_channel = pd->mpd_enc_idx;
3676 sdhs->bd_target = pd->mpd_enc_slot;
3677 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3678 vendp = inqbuf->vendor;
3679 memcpy(vend, vendp, sizeof vend - 1);
3680 vend[sizeof vend - 1] = '\0';
3681 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3682 break;
3683
3684 default:
3685 goto freeme;
3686 }
3687
3688 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3689 rv = 0;
3690 freeme:
3691 free(pd, M_DEVBUF);
3692 free(cfg, M_DEVBUF);
3693
3694 return (rv);
3695 }
3696
3697 #endif /* NBIO > 0 */
3698
3699 #define MFI_BBU_SENSORS 4
3700
3701 static void
3702 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3703 {
3704 struct mfi_bbu_status bbu;
3705 u_int32_t status;
3706 u_int32_t mask;
3707 u_int32_t soh_bad;
3708 int rv;
3709
3710 mutex_enter(&sc->sc_lock);
3711 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3712 sizeof(bbu), MFII_DATA_IN, false);
3713 mutex_exit(&sc->sc_lock);
3714 if (rv != 0) {
3715 edata->state = ENVSYS_SINVALID;
3716 edata->value_cur = 0;
3717 return;
3718 }
3719
3720 switch (bbu.battery_type) {
3721 case MFI_BBU_TYPE_IBBU:
3722 case MFI_BBU_TYPE_IBBU09:
3723 mask = MFI_BBU_STATE_BAD_IBBU;
3724 soh_bad = 0;
3725 break;
3726 case MFI_BBU_TYPE_BBU:
3727 mask = MFI_BBU_STATE_BAD_BBU;
3728 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3729 break;
3730
3731 case MFI_BBU_TYPE_NONE:
3732 default:
3733 edata->state = ENVSYS_SCRITICAL;
3734 edata->value_cur = 0;
3735 return;
3736 }
3737
3738 status = le32toh(bbu.fw_status) & mask;
3739 switch (edata->sensor) {
3740 case 0:
3741 edata->value_cur = (status || soh_bad) ? 0 : 1;
3742 edata->state =
3743 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3744 return;
3745 case 1:
3746 edata->value_cur = le16toh(bbu.voltage) * 1000;
3747 edata->state = ENVSYS_SVALID;
3748 return;
3749 case 2:
3750 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3751 edata->state = ENVSYS_SVALID;
3752 return;
3753 case 3:
3754 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3755 edata->state = ENVSYS_SVALID;
3756 return;
3757 }
3758 }
3759
3760 static void
3761 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3762 {
3763 struct bioc_vol bv;
3764 int error;
3765
3766 memset(&bv, 0, sizeof(bv));
3767 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3768 mutex_enter(&sc->sc_lock);
3769 error = mfii_ioctl_vol(sc, &bv);
3770 mutex_exit(&sc->sc_lock);
3771 if (error)
3772 bv.bv_status = BIOC_SVINVALID;
3773 bio_vol_to_envsys(edata, &bv);
3774 }
3775
3776 static void
3777 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3778 {
3779 sensor->units = ENVSYS_DRIVE;
3780 sensor->state = ENVSYS_SINVALID;
3781 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3782 /* Enable monitoring for drive state changes */
3783 sensor->flags |= ENVSYS_FMONSTCHANGED;
3784 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3785 }
3786
3787 static void
3788 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3789 {
3790 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3791 aprint_error_dev(sc->sc_dev,
3792 "failed to attach sensor %s\n", s->desc);
3793 }
3794
3795 static int
3796 mfii_create_sensors(struct mfii_softc *sc)
3797 {
3798 int i, rv;
3799 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3800
3801 sc->sc_sme = sysmon_envsys_create();
3802 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3803 M_DEVBUF, M_WAITOK | M_ZERO);
3804
3805 /* BBU */
3806 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3807 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3808 sc->sc_sensors[0].value_cur = 0;
3809 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3810 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3811 sc->sc_sensors[1].value_cur = 0;
3812 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3813 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3814 sc->sc_sensors[2].value_cur = 0;
3815 sc->sc_sensors[3].units = ENVSYS_STEMP;
3816 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3817 sc->sc_sensors[3].value_cur = 0;
3818
3819 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3820 sc->sc_bbuok = true;
3821 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3822 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3823 "%s BBU state", DEVNAME(sc));
3824 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3825 "%s BBU voltage", DEVNAME(sc));
3826 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3827 "%s BBU current", DEVNAME(sc));
3828 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3829 "%s BBU temperature", DEVNAME(sc));
3830 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3831 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3832 }
3833 }
3834
3835 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3836 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3837 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3838 }
3839
3840 sc->sc_sme->sme_name = DEVNAME(sc);
3841 sc->sc_sme->sme_cookie = sc;
3842 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3843 rv = sysmon_envsys_register(sc->sc_sme);
3844 if (rv) {
3845 aprint_error_dev(sc->sc_dev,
3846 "unable to register with sysmon (rv = %d)\n", rv);
3847 }
3848 return rv;
3849
3850 }
3851
3852 static int
3853 mfii_destroy_sensors(struct mfii_softc *sc)
3854 {
3855 if (sc->sc_sme == NULL)
3856 return 0;
3857 sysmon_envsys_unregister(sc->sc_sme);
3858 sc->sc_sme = NULL;
3859 free(sc->sc_sensors, M_DEVBUF);
3860 return 0;
3861 }
3862
3863 static void
3864 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3865 {
3866 struct mfii_softc *sc = sme->sme_cookie;
3867
3868 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3869 return;
3870
3871 if (edata->sensor < MFI_BBU_SENSORS) {
3872 if (sc->sc_bbuok)
3873 mfii_bbu(sc, edata);
3874 } else {
3875 mfii_refresh_ld_sensor(sc, edata);
3876 }
3877 }
3878