mfii.c revision 1.10 1 /* $NetBSD: mfii.c,v 1.10 2022/05/05 09:14:17 msaitoh Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.10 2022/05/05 09:14:17 msaitoh Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271
272 struct mfii_iop {
273 int bar;
274 int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
276 #define MFII_IOP_NUM_SGE_LOC_35 1
277 u_int16_t ldio_ctx_reg_lock_flags;
278 u_int8_t ldio_req_type;
279 u_int8_t ldio_ctx_type_nseg;
280 u_int8_t sge_flag_chain;
281 u_int8_t sge_flag_eol;
282 };
283
284 struct mfii_softc {
285 device_t sc_dev;
286 struct scsipi_channel sc_chan;
287 struct scsipi_adapter sc_adapt;
288
289 const struct mfii_iop *sc_iop;
290
291 pci_chipset_tag_t sc_pc;
292 pcitag_t sc_tag;
293
294 bus_space_tag_t sc_iot;
295 bus_space_handle_t sc_ioh;
296 bus_size_t sc_ios;
297 bus_dma_tag_t sc_dmat;
298 bus_dma_tag_t sc_dmat64;
299 bool sc_64bit_dma;
300
301 void *sc_ih;
302
303 kmutex_t sc_ccb_mtx;
304 kmutex_t sc_post_mtx;
305
306 u_int sc_max_fw_cmds;
307 u_int sc_max_cmds;
308 u_int sc_max_sgl;
309
310 u_int sc_reply_postq_depth;
311 u_int sc_reply_postq_index;
312 kmutex_t sc_reply_postq_mtx;
313 struct mfii_dmamem *sc_reply_postq;
314
315 struct mfii_dmamem *sc_requests;
316 struct mfii_dmamem *sc_mfi;
317 struct mfii_dmamem *sc_sense;
318 struct mfii_dmamem *sc_sgl;
319
320 struct mfii_ccb *sc_ccb;
321 struct mfii_ccb_list sc_ccb_freeq;
322
323 struct mfii_ccb *sc_aen_ccb;
324 struct workqueue *sc_aen_wq;
325 struct work sc_aen_work;
326
327 kmutex_t sc_abort_mtx;
328 struct mfii_ccb_list sc_abort_list;
329 struct workqueue *sc_abort_wq;
330 struct work sc_abort_work;
331
332 /* save some useful information for logical drives that is missing
333 * in sc_ld_list
334 */
335 struct {
336 bool ld_present;
337 char ld_dev[16]; /* device name sd? */
338 } sc_ld[MFI_MAX_LD];
339 int sc_target_lds[MFI_MAX_LD];
340
341 /* bio */
342 struct mfi_conf *sc_cfg;
343 struct mfi_ctrl_info sc_info;
344 struct mfi_ld_list sc_ld_list;
345 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
346 int sc_no_pd; /* used physical disks */
347 int sc_ld_sz; /* sizeof sc_ld_details */
348
349 /* mgmt lock */
350 kmutex_t sc_lock;
351 bool sc_running;
352
353 /* sensors */
354 struct sysmon_envsys *sc_sme;
355 envsys_data_t *sc_sensors;
356 bool sc_bbuok;
357
358 device_t sc_child;
359 };
360
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
365 #define MFII_D_CMD 0x0001
366 #define MFII_D_INTR 0x0002
367 #define MFII_D_MISC 0x0004
368 #define MFII_D_DMA 0x0008
369 #define MFII_D_IOCTL 0x0010
370 #define MFII_D_RW 0x0020
371 #define MFII_D_MEM 0x0040
372 #define MFII_D_CCB 0x0080
373 uint32_t mfii_debug = 0
374 /* | MFII_D_CMD */
375 /* | MFII_D_INTR */
376 | MFII_D_MISC
377 /* | MFII_D_DMA */
378 /* | MFII_D_IOCTL */
379 /* | MFII_D_RW */
380 /* | MFII_D_MEM */
381 /* | MFII_D_CCB */
382 ;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387
388 static int mfii_match(device_t, cfdata_t, void *);
389 static void mfii_attach(device_t, device_t, void *);
390 static int mfii_detach(device_t, int);
391 static int mfii_rescan(device_t, const char *, const int *);
392 static void mfii_childdetached(device_t, device_t);
393 static bool mfii_suspend(device_t, const pmf_qual_t *);
394 static bool mfii_resume(device_t, const pmf_qual_t *);
395 static bool mfii_shutdown(device_t, int);
396
397
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 mfii_childdetached, DVF_DETACH_SHUTDOWN);
401
402 static void mfii_scsipi_request(struct scsipi_channel *,
403 scsipi_adapter_req_t, void *);
404 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405
406 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
407
408 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
409 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410
411 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 static void mfii_dmamem_free(struct mfii_softc *,
413 struct mfii_dmamem *);
414
415 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
416 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 static int mfii_init_ccb(struct mfii_softc *);
418 static void mfii_scrub_ccb(struct mfii_ccb *);
419
420 static int mfii_transition_firmware(struct mfii_softc *);
421 static int mfii_initialise_firmware(struct mfii_softc *);
422 static int mfii_get_info(struct mfii_softc *);
423
424 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 static int mfii_my_intr(struct mfii_softc *);
431 static int mfii_intr(void *);
432 static void mfii_postq(struct mfii_softc *);
433
434 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 void *, int);
436 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 void *, int);
438
439 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440
441 static int mfii_mgmt(struct mfii_softc *, uint32_t,
442 const union mfi_mbox *, void *, size_t,
443 mfii_direction_t, bool);
444 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 uint32_t, const union mfi_mbox *, void *, size_t,
446 mfii_direction_t, bool);
447 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448
449 static int mfii_scsi_cmd_io(struct mfii_softc *,
450 struct mfii_ccb *, struct scsipi_xfer *);
451 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
452 struct mfii_ccb *, struct scsipi_xfer *);
453 static void mfii_scsi_cmd_tmo(void *);
454
455 static void mfii_abort_task(struct work *, void *);
456 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
457 uint16_t, uint16_t, uint8_t, uint32_t);
458 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
459 struct mfii_ccb *);
460
461 static int mfii_aen_register(struct mfii_softc *);
462 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
463 struct mfii_dmamem *, uint32_t);
464 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
465 static void mfii_aen(struct work *, void *);
466 static void mfii_aen_unregister(struct mfii_softc *);
467
468 static void mfii_aen_pd_insert(struct mfii_softc *,
469 const struct mfi_evtarg_pd_address *);
470 static void mfii_aen_pd_remove(struct mfii_softc *,
471 const struct mfi_evtarg_pd_address *);
472 static void mfii_aen_pd_state_change(struct mfii_softc *,
473 const struct mfi_evtarg_pd_state *);
474 static void mfii_aen_ld_update(struct mfii_softc *);
475
476 #if NBIO > 0
477 static int mfii_ioctl(device_t, u_long, void *);
478 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
479 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
480 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
481 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
482 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
483 static int mfii_ioctl_setstate(struct mfii_softc *,
484 struct bioc_setstate *);
485 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
486 static int mfii_bio_getitall(struct mfii_softc *);
487 #endif /* NBIO > 0 */
488
489 #if 0
490 static const char *mfi_bbu_indicators[] = {
491 "pack missing",
492 "voltage low",
493 "temp high",
494 "charge active",
495 "discharge active",
496 "learn cycle req'd",
497 "learn cycle active",
498 "learn cycle failed",
499 "learn cycle timeout",
500 "I2C errors",
501 "replace pack",
502 "low capacity",
503 "periodic learn req'd"
504 };
505 #endif
506
507 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
508 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
509 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
510 static int mfii_create_sensors(struct mfii_softc *);
511 static int mfii_destroy_sensors(struct mfii_softc *);
512 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
513 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
514
515 /*
516 * mfii boards support asynchronous (and non-polled) completion of
517 * dcmds by proxying them through a passthru mpii command that points
518 * at a dcmd frame. since the passthru command is submitted like
519 * the scsi commands using an SMID in the request descriptor,
520 * ccb_request memory * must contain the passthru command because
521 * that is what the SMID refers to. this means ccb_request cannot
522 * contain the dcmd. rather than allocating separate dma memory to
523 * hold the dcmd, we reuse the sense memory buffer for it.
524 */
525
526 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
527
528 static inline void
529 mfii_dcmd_scrub(struct mfii_ccb *ccb)
530 {
531 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
532 }
533
534 static inline struct mfi_dcmd_frame *
535 mfii_dcmd_frame(struct mfii_ccb *ccb)
536 {
537 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
538 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
539 }
540
541 static inline void
542 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
543 {
544 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
545 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
546 }
547
548 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
549
550 static const struct mfii_iop mfii_iop_thunderbolt = {
551 MFII_BAR,
552 MFII_IOP_NUM_SGE_LOC_ORIG,
553 0,
554 MFII_REQ_TYPE_LDIO,
555 0,
556 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
557 0
558 };
559
560 /*
561 * a lot of these values depend on us not implementing fastpath yet.
562 */
563 static const struct mfii_iop mfii_iop_25 = {
564 MFII_BAR,
565 MFII_IOP_NUM_SGE_LOC_ORIG,
566 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
567 MFII_REQ_TYPE_NO_LOCK,
568 MFII_RAID_CTX_TYPE_CUDA | 0x1,
569 MFII_SGE_CHAIN_ELEMENT,
570 MFII_SGE_END_OF_LIST
571 };
572
573 static const struct mfii_iop mfii_iop_35 = {
574 MFII_BAR_35,
575 MFII_IOP_NUM_SGE_LOC_35,
576 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
577 MFII_REQ_TYPE_NO_LOCK,
578 MFII_RAID_CTX_TYPE_CUDA | 0x1,
579 MFII_SGE_CHAIN_ELEMENT,
580 MFII_SGE_END_OF_LIST
581 };
582
583 struct mfii_device {
584 pcireg_t mpd_vendor;
585 pcireg_t mpd_product;
586 const struct mfii_iop *mpd_iop;
587 };
588
589 static const struct mfii_device mfii_devices[] = {
590 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
591 &mfii_iop_thunderbolt },
592 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
593 &mfii_iop_25 },
594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
595 &mfii_iop_25 },
596 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
597 &mfii_iop_35 },
598 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
599 &mfii_iop_35 },
600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
601 &mfii_iop_35 },
602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
603 &mfii_iop_35 },
604 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
605 &mfii_iop_35 },
606 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
607 &mfii_iop_35 }
608 };
609
610 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
611
612 static const struct mfii_iop *
613 mfii_find_iop(struct pci_attach_args *pa)
614 {
615 const struct mfii_device *mpd;
616 int i;
617
618 for (i = 0; i < __arraycount(mfii_devices); i++) {
619 mpd = &mfii_devices[i];
620
621 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
622 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
623 return (mpd->mpd_iop);
624 }
625
626 return (NULL);
627 }
628
629 static int
630 mfii_match(device_t parent, cfdata_t match, void *aux)
631 {
632 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
633 }
634
635 static void
636 mfii_attach(device_t parent, device_t self, void *aux)
637 {
638 struct mfii_softc *sc = device_private(self);
639 struct pci_attach_args *pa = aux;
640 pcireg_t memtype;
641 pci_intr_handle_t ih;
642 char intrbuf[PCI_INTRSTR_LEN];
643 const char *intrstr;
644 u_int32_t status, scpad2, scpad3;
645 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
646 struct scsipi_adapter *adapt = &sc->sc_adapt;
647 struct scsipi_channel *chan = &sc->sc_chan;
648
649 /* init sc */
650 sc->sc_dev = self;
651 sc->sc_iop = mfii_find_iop(aux);
652 sc->sc_dmat = pa->pa_dmat;
653 if (pci_dma64_available(pa)) {
654 sc->sc_dmat64 = pa->pa_dmat64;
655 sc->sc_64bit_dma = 1;
656 } else {
657 sc->sc_dmat64 = pa->pa_dmat;
658 sc->sc_64bit_dma = 0;
659 }
660 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
661 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
662 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
663 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
664
665 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
666
667 sc->sc_aen_ccb = NULL;
668 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
669 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
670 PRI_BIO, IPL_BIO, WQ_MPSAFE);
671
672 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
673 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
674 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
675
676 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
677 SIMPLEQ_INIT(&sc->sc_abort_list);
678
679 /* wire up the bus shizz */
680 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
681 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
682 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
683 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
684 aprint_error(": unable to map registers\n");
685 return;
686 }
687
688 /* disable interrupts */
689 mfii_write(sc, MFI_OMSK, 0xffffffff);
690
691 if (pci_intr_map(pa, &ih) != 0) {
692 aprint_error(": unable to map interrupt\n");
693 goto pci_unmap;
694 }
695 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
696 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
697
698 /* lets get started */
699 if (mfii_transition_firmware(sc))
700 goto pci_unmap;
701 sc->sc_running = true;
702
703 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
704 scpad3 = mfii_read(sc, MFII_OSP3);
705 status = mfii_fw_state(sc);
706 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
707 if (sc->sc_max_fw_cmds == 0)
708 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
709 /*
710 * reduce max_cmds by 1 to ensure that the reply queue depth does not
711 * exceed FW supplied max_fw_cmds.
712 */
713 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
714
715 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
716 scpad2 = mfii_read(sc, MFII_OSP2);
717 chain_frame_sz =
718 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
719 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
720 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
721 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
722
723 nsge_in_io = (MFII_REQUEST_SIZE -
724 sizeof(struct mpii_msg_scsi_io) -
725 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
726 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
727
728 /* round down to nearest power of two */
729 sc->sc_max_sgl = 1;
730 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
731 sc->sc_max_sgl <<= 1;
732
733 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
734 DEVNAME(sc), status, scpad2, scpad3);
735 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
736 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
737 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
738 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
739 sc->sc_max_sgl);
740
741 /* sense memory */
742 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
743 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
744 if (sc->sc_sense == NULL) {
745 aprint_error(": unable to allocate sense memory\n");
746 goto pci_unmap;
747 }
748
749 /* reply post queue */
750 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
751
752 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
753 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
754 if (sc->sc_reply_postq == NULL)
755 goto free_sense;
756
757 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
758 MFII_DMA_LEN(sc->sc_reply_postq));
759
760 /* MPII request frame array */
761 sc->sc_requests = mfii_dmamem_alloc(sc,
762 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
763 if (sc->sc_requests == NULL)
764 goto free_reply_postq;
765
766 /* MFI command frame array */
767 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
768 if (sc->sc_mfi == NULL)
769 goto free_requests;
770
771 /* MPII SGL array */
772 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
773 sizeof(struct mfii_sge) * sc->sc_max_sgl);
774 if (sc->sc_sgl == NULL)
775 goto free_mfi;
776
777 if (mfii_init_ccb(sc) != 0) {
778 aprint_error(": could not init ccb list\n");
779 goto free_sgl;
780 }
781
782 /* kickstart firmware with all addresses and pointers */
783 if (mfii_initialise_firmware(sc) != 0) {
784 aprint_error(": could not initialize firmware\n");
785 goto free_sgl;
786 }
787
788 mutex_enter(&sc->sc_lock);
789 if (mfii_get_info(sc) != 0) {
790 mutex_exit(&sc->sc_lock);
791 aprint_error(": could not retrieve controller information\n");
792 goto free_sgl;
793 }
794 mutex_exit(&sc->sc_lock);
795
796 aprint_normal(": \"%s\", firmware %s",
797 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
798 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
799 aprint_normal(", %uMB cache",
800 le16toh(sc->sc_info.mci_memory_size));
801 }
802 aprint_normal("\n");
803 aprint_naive("\n");
804
805 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
806 mfii_intr, sc, DEVNAME(sc));
807 if (sc->sc_ih == NULL) {
808 aprint_error_dev(self, "can't establish interrupt");
809 if (intrstr)
810 aprint_error(" at %s", intrstr);
811 aprint_error("\n");
812 goto free_sgl;
813 }
814 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
815
816 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
817 sc->sc_ld[i].ld_present = 1;
818
819 memset(adapt, 0, sizeof(*adapt));
820 adapt->adapt_dev = sc->sc_dev;
821 adapt->adapt_nchannels = 1;
822 /* keep a few commands for management */
823 if (sc->sc_max_cmds > 4)
824 adapt->adapt_openings = sc->sc_max_cmds - 4;
825 else
826 adapt->adapt_openings = sc->sc_max_cmds;
827 adapt->adapt_max_periph = adapt->adapt_openings;
828 adapt->adapt_request = mfii_scsipi_request;
829 adapt->adapt_minphys = minphys;
830 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
831
832 memset(chan, 0, sizeof(*chan));
833 chan->chan_adapter = adapt;
834 chan->chan_bustype = &scsi_sas_bustype;
835 chan->chan_channel = 0;
836 chan->chan_flags = 0;
837 chan->chan_nluns = 8;
838 chan->chan_ntargets = sc->sc_info.mci_max_lds;
839 chan->chan_id = sc->sc_info.mci_max_lds;
840
841 mfii_rescan(sc->sc_dev, NULL, NULL);
842
843 if (mfii_aen_register(sc) != 0) {
844 /* error printed by mfii_aen_register */
845 goto intr_disestablish;
846 }
847
848 mutex_enter(&sc->sc_lock);
849 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
850 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
851 mutex_exit(&sc->sc_lock);
852 aprint_error_dev(self,
853 "getting list of logical disks failed\n");
854 goto intr_disestablish;
855 }
856 mutex_exit(&sc->sc_lock);
857 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
858 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
859 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
860 sc->sc_target_lds[target] = i;
861 }
862
863 /* enable interrupts */
864 mfii_write(sc, MFI_OSTS, 0xffffffff);
865 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
866
867 #if NBIO > 0
868 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
869 panic("%s: controller registration failed", DEVNAME(sc));
870 #endif /* NBIO > 0 */
871
872 if (mfii_create_sensors(sc) != 0)
873 aprint_error_dev(self, "unable to create sensors\n");
874
875 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
876 mfii_shutdown))
877 aprint_error_dev(self, "couldn't establish power handler\n");
878 return;
879 intr_disestablish:
880 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
881 free_sgl:
882 mfii_dmamem_free(sc, sc->sc_sgl);
883 free_mfi:
884 mfii_dmamem_free(sc, sc->sc_mfi);
885 free_requests:
886 mfii_dmamem_free(sc, sc->sc_requests);
887 free_reply_postq:
888 mfii_dmamem_free(sc, sc->sc_reply_postq);
889 free_sense:
890 mfii_dmamem_free(sc, sc->sc_sense);
891 pci_unmap:
892 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
893 }
894
895 #if 0
896 struct srp_gc mfii_dev_handles_gc =
897 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
898
899 static inline uint16_t
900 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
901 {
902 struct srp_ref sr;
903 uint16_t *map, handle;
904
905 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
906 handle = map[target];
907 srp_leave(&sr);
908
909 return (handle);
910 }
911
912 static int
913 mfii_dev_handles_update(struct mfii_softc *sc)
914 {
915 struct mfii_ld_map *lm;
916 uint16_t *dev_handles = NULL;
917 int i;
918 int rv = 0;
919
920 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
921
922 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
923 MFII_DATA_IN, false);
924
925 if (rv != 0) {
926 rv = EIO;
927 goto free_lm;
928 }
929
930 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
931 M_DEVBUF, M_WAITOK);
932
933 for (i = 0; i < MFI_MAX_PD; i++)
934 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
935
936 /* commit the updated info */
937 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
938 srp_update_locked(&mfii_dev_handles_gc,
939 &sc->sc_pd->pd_dev_handles, dev_handles);
940
941 free_lm:
942 free(lm, M_TEMP, sizeof(*lm));
943
944 return (rv);
945 }
946
947 static void
948 mfii_dev_handles_dtor(void *null, void *v)
949 {
950 uint16_t *dev_handles = v;
951
952 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
953 }
954 #endif /* 0 */
955
956 static int
957 mfii_detach(device_t self, int flags)
958 {
959 struct mfii_softc *sc = device_private(self);
960 int error;
961
962 if (sc->sc_ih == NULL)
963 return (0);
964
965 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
966 return error;
967
968 mfii_destroy_sensors(sc);
969 #if NBIO > 0
970 bio_unregister(sc->sc_dev);
971 #endif
972 mfii_shutdown(sc->sc_dev, 0);
973 mfii_write(sc, MFI_OMSK, 0xffffffff);
974
975 mfii_aen_unregister(sc);
976 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
977 mfii_dmamem_free(sc, sc->sc_sgl);
978 mfii_dmamem_free(sc, sc->sc_mfi);
979 mfii_dmamem_free(sc, sc->sc_requests);
980 mfii_dmamem_free(sc, sc->sc_reply_postq);
981 mfii_dmamem_free(sc, sc->sc_sense);
982 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
983
984 return (0);
985 }
986
987 static int
988 mfii_rescan(device_t self, const char *ifattr, const int *locators)
989 {
990 struct mfii_softc *sc = device_private(self);
991
992 if (sc->sc_child != NULL)
993 return 0;
994
995 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint, CFARGS_NONE);
996 return 0;
997 }
998
999 static void
1000 mfii_childdetached(device_t self, device_t child)
1001 {
1002 struct mfii_softc *sc = device_private(self);
1003
1004 KASSERT(self == sc->sc_dev);
1005 KASSERT(child == sc->sc_child);
1006
1007 if (child == sc->sc_child)
1008 sc->sc_child = NULL;
1009 }
1010
1011 static bool
1012 mfii_suspend(device_t dev, const pmf_qual_t *q)
1013 {
1014 /* XXX to be implemented */
1015 return false;
1016 }
1017
1018 static bool
1019 mfii_resume(device_t dev, const pmf_qual_t *q)
1020 {
1021 /* XXX to be implemented */
1022 return false;
1023 }
1024
1025 static bool
1026 mfii_shutdown(device_t dev, int how)
1027 {
1028 struct mfii_softc *sc = device_private(dev);
1029 struct mfii_ccb *ccb;
1030 union mfi_mbox mbox;
1031 bool rv = true;
1032
1033 memset(&mbox, 0, sizeof(mbox));
1034
1035 mutex_enter(&sc->sc_lock);
1036 DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1037 ccb = mfii_get_ccb(sc);
1038 if (ccb == NULL)
1039 return false;
1040 mutex_enter(&sc->sc_ccb_mtx);
1041 if (sc->sc_running) {
1042 sc->sc_running = 0; /* prevent new commands */
1043 mutex_exit(&sc->sc_ccb_mtx);
1044 #if 0 /* XXX why does this hang ? */
1045 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1046 mfii_scrub_ccb(ccb);
1047 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1048 NULL, 0, MFII_DATA_NONE, true)) {
1049 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1050 rv = false;
1051 goto fail;
1052 }
1053 printf("ok1\n");
1054 #endif
1055 mbox.b[0] = 0;
1056 mfii_scrub_ccb(ccb);
1057 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1058 NULL, 0, MFII_DATA_NONE, true)) {
1059 aprint_error_dev(dev, "shutdown: "
1060 "firmware shutdown failed\n");
1061 rv = false;
1062 goto fail;
1063 }
1064 } else {
1065 mutex_exit(&sc->sc_ccb_mtx);
1066 }
1067 fail:
1068 mfii_put_ccb(sc, ccb);
1069 mutex_exit(&sc->sc_lock);
1070 return rv;
1071 }
1072
1073 static u_int32_t
1074 mfii_read(struct mfii_softc *sc, bus_size_t r)
1075 {
1076 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1077 BUS_SPACE_BARRIER_READ);
1078 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1079 }
1080
1081 static void
1082 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1083 {
1084 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1085 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1086 BUS_SPACE_BARRIER_WRITE);
1087 }
1088
1089 static struct mfii_dmamem *
1090 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1091 {
1092 struct mfii_dmamem *m;
1093 int nsegs;
1094
1095 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1096 m->mdm_size = size;
1097
1098 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1099 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1100 goto mdmfree;
1101
1102 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1103 &nsegs, BUS_DMA_NOWAIT) != 0)
1104 goto destroy;
1105
1106 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1107 BUS_DMA_NOWAIT) != 0)
1108 goto free;
1109
1110 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1111 BUS_DMA_NOWAIT) != 0)
1112 goto unmap;
1113
1114 memset(m->mdm_kva, 0, size);
1115 return (m);
1116
1117 unmap:
1118 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1119 free:
1120 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1121 destroy:
1122 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1123 mdmfree:
1124 free(m, M_DEVBUF);
1125
1126 return (NULL);
1127 }
1128
1129 static void
1130 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1131 {
1132 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1133 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1134 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1135 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1136 free(m, M_DEVBUF);
1137 }
1138
1139 static void
1140 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1141 {
1142 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1143 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1144 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1145
1146 io->function = MFII_FUNCTION_PASSTHRU_IO;
1147 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1148 io->chain_offset = io->sgl_offset0 / 4;
1149
1150 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1151 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1152 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1153
1154 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1155 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1156
1157 mfii_start(sc, ccb);
1158 }
1159
1160 static int
1161 mfii_aen_register(struct mfii_softc *sc)
1162 {
1163 struct mfi_evt_log_info mel;
1164 struct mfii_ccb *ccb;
1165 struct mfii_dmamem *mdm;
1166 int rv;
1167
1168 ccb = mfii_get_ccb(sc);
1169 if (ccb == NULL) {
1170 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1171 return (ENOMEM);
1172 }
1173
1174 memset(&mel, 0, sizeof(mel));
1175 mfii_scrub_ccb(ccb);
1176
1177 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1178 &mel, sizeof(mel), MFII_DATA_IN, true);
1179 if (rv != 0) {
1180 mfii_put_ccb(sc, ccb);
1181 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1182 return (EIO);
1183 }
1184
1185 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1186 if (mdm == NULL) {
1187 mfii_put_ccb(sc, ccb);
1188 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1189 return (ENOMEM);
1190 }
1191
1192 /* replay all the events from boot */
1193 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1194
1195 return (0);
1196 }
1197
1198 static void
1199 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1200 struct mfii_dmamem *mdm, uint32_t seq)
1201 {
1202 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1203 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1204 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1205 union mfi_evt_class_locale mec;
1206
1207 mfii_scrub_ccb(ccb);
1208 mfii_dcmd_scrub(ccb);
1209 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1210
1211 ccb->ccb_cookie = mdm;
1212 ccb->ccb_done = mfii_aen_done;
1213 sc->sc_aen_ccb = ccb;
1214
1215 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1216 mec.mec_members.reserved = 0;
1217 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1218
1219 hdr->mfh_cmd = MFI_CMD_DCMD;
1220 hdr->mfh_sg_count = 1;
1221 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1222 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1223 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1224 dcmd->mdf_mbox.w[0] = htole32(seq);
1225 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1226 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1227 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1228
1229 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1230 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1231
1232 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1233 mfii_dcmd_start(sc, ccb);
1234 }
1235
1236 static void
1237 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1238 {
1239 KASSERT(sc->sc_aen_ccb == ccb);
1240
1241 /*
1242 * defer to a thread with KERNEL_LOCK so we can run autoconf
1243 * We shouldn't have more than one AEN command pending at a time,
1244 * so no need to lock
1245 */
1246 if (sc->sc_running)
1247 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1248 }
1249
1250 static void
1251 mfii_aen(struct work *wk, void *arg)
1252 {
1253 struct mfii_softc *sc = arg;
1254 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1255 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1256 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1257
1258 mfii_dcmd_sync(sc, ccb,
1259 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1260 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1261 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1262
1263 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1264 le32toh(med->med_seq_num), le32toh(med->med_code),
1265 med->med_arg_type, med->med_description);
1266
1267 switch (le32toh(med->med_code)) {
1268 case MR_EVT_PD_INSERTED_EXT:
1269 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1270 break;
1271
1272 mfii_aen_pd_insert(sc, &med->args.pd_address);
1273 break;
1274 case MR_EVT_PD_REMOVED_EXT:
1275 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1276 break;
1277
1278 mfii_aen_pd_remove(sc, &med->args.pd_address);
1279 break;
1280
1281 case MR_EVT_PD_STATE_CHANGE:
1282 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1283 break;
1284
1285 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1286 break;
1287
1288 case MR_EVT_LD_CREATED:
1289 case MR_EVT_LD_DELETED:
1290 mfii_aen_ld_update(sc);
1291 break;
1292
1293 default:
1294 break;
1295 }
1296
1297 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1298 }
1299
1300 static void
1301 mfii_aen_pd_insert(struct mfii_softc *sc,
1302 const struct mfi_evtarg_pd_address *pd)
1303 {
1304 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1305 le16toh(pd->device_id), le16toh(pd->encl_id));
1306 }
1307
1308 static void
1309 mfii_aen_pd_remove(struct mfii_softc *sc,
1310 const struct mfi_evtarg_pd_address *pd)
1311 {
1312 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1313 le16toh(pd->device_id), le16toh(pd->encl_id));
1314 }
1315
1316 static void
1317 mfii_aen_pd_state_change(struct mfii_softc *sc,
1318 const struct mfi_evtarg_pd_state *state)
1319 {
1320 return;
1321 }
1322
1323 static void
1324 mfii_aen_ld_update(struct mfii_softc *sc)
1325 {
1326 int i, target, old, nld;
1327 int newlds[MFI_MAX_LD];
1328
1329 mutex_enter(&sc->sc_lock);
1330 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1331 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1332 mutex_exit(&sc->sc_lock);
1333 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1334 DEVNAME(sc));
1335 return;
1336 }
1337 mutex_exit(&sc->sc_lock);
1338
1339 memset(newlds, -1, sizeof(newlds));
1340
1341 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1342 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1343 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1344 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1345 newlds[target] = i;
1346 }
1347
1348 for (i = 0; i < MFI_MAX_LD; i++) {
1349 old = sc->sc_target_lds[i];
1350 nld = newlds[i];
1351
1352 if (old == -1 && nld != -1) {
1353 printf("%s: logical drive %d added (target %d)\n",
1354 DEVNAME(sc), i, nld);
1355
1356 // XXX scsi_probe_target(sc->sc_scsibus, i);
1357
1358 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1359 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1360 } else if (nld == -1 && old != -1) {
1361 printf("%s: logical drive %d removed (target %d)\n",
1362 DEVNAME(sc), i, old);
1363
1364 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1365 sysmon_envsys_sensor_detach(sc->sc_sme,
1366 &sc->sc_sensors[i]);
1367 }
1368 }
1369
1370 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1371 }
1372
1373 static void
1374 mfii_aen_unregister(struct mfii_softc *sc)
1375 {
1376 /* XXX */
1377 }
1378
1379 static int
1380 mfii_transition_firmware(struct mfii_softc *sc)
1381 {
1382 int32_t fw_state, cur_state;
1383 int max_wait, i;
1384
1385 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1386
1387 while (fw_state != MFI_STATE_READY) {
1388 cur_state = fw_state;
1389 switch (fw_state) {
1390 case MFI_STATE_FAULT:
1391 printf("%s: firmware fault\n", DEVNAME(sc));
1392 return (1);
1393 case MFI_STATE_WAIT_HANDSHAKE:
1394 mfii_write(sc, MFI_SKINNY_IDB,
1395 MFI_INIT_CLEAR_HANDSHAKE);
1396 max_wait = 2;
1397 break;
1398 case MFI_STATE_OPERATIONAL:
1399 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1400 max_wait = 10;
1401 break;
1402 case MFI_STATE_UNDEFINED:
1403 case MFI_STATE_BB_INIT:
1404 max_wait = 2;
1405 break;
1406 case MFI_STATE_FW_INIT:
1407 case MFI_STATE_DEVICE_SCAN:
1408 case MFI_STATE_FLUSH_CACHE:
1409 max_wait = 20;
1410 break;
1411 default:
1412 printf("%s: unknown firmware state %d\n",
1413 DEVNAME(sc), fw_state);
1414 return (1);
1415 }
1416 for (i = 0; i < (max_wait * 10); i++) {
1417 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1418 if (fw_state == cur_state)
1419 DELAY(100000);
1420 else
1421 break;
1422 }
1423 if (fw_state == cur_state) {
1424 printf("%s: firmware stuck in state %#x\n",
1425 DEVNAME(sc), fw_state);
1426 return (1);
1427 }
1428 }
1429
1430 return (0);
1431 }
1432
1433 static int
1434 mfii_get_info(struct mfii_softc *sc)
1435 {
1436 int i, rv;
1437
1438 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1439 sizeof(sc->sc_info), MFII_DATA_IN, true);
1440
1441 if (rv != 0)
1442 return (rv);
1443
1444 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1445 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1446 DEVNAME(sc),
1447 sc->sc_info.mci_image_component[i].mic_name,
1448 sc->sc_info.mci_image_component[i].mic_version,
1449 sc->sc_info.mci_image_component[i].mic_build_date,
1450 sc->sc_info.mci_image_component[i].mic_build_time);
1451 }
1452
1453 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1454 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1455 DEVNAME(sc),
1456 sc->sc_info.mci_pending_image_component[i].mic_name,
1457 sc->sc_info.mci_pending_image_component[i].mic_version,
1458 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1459 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1460 }
1461
1462 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1463 DEVNAME(sc),
1464 sc->sc_info.mci_max_arms,
1465 sc->sc_info.mci_max_spans,
1466 sc->sc_info.mci_max_arrays,
1467 sc->sc_info.mci_max_lds,
1468 sc->sc_info.mci_product_name);
1469
1470 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1471 DEVNAME(sc),
1472 sc->sc_info.mci_serial_number,
1473 sc->sc_info.mci_hw_present,
1474 sc->sc_info.mci_current_fw_time,
1475 sc->sc_info.mci_max_cmds,
1476 sc->sc_info.mci_max_sg_elements);
1477
1478 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1479 DEVNAME(sc),
1480 sc->sc_info.mci_max_request_size,
1481 sc->sc_info.mci_lds_present,
1482 sc->sc_info.mci_lds_degraded,
1483 sc->sc_info.mci_lds_offline,
1484 sc->sc_info.mci_pd_present);
1485
1486 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1487 DEVNAME(sc),
1488 sc->sc_info.mci_pd_disks_present,
1489 sc->sc_info.mci_pd_disks_pred_failure,
1490 sc->sc_info.mci_pd_disks_failed);
1491
1492 DPRINTF("%s: nvram %d mem %d flash %d\n",
1493 DEVNAME(sc),
1494 sc->sc_info.mci_nvram_size,
1495 sc->sc_info.mci_memory_size,
1496 sc->sc_info.mci_flash_size);
1497
1498 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1499 DEVNAME(sc),
1500 sc->sc_info.mci_ram_correctable_errors,
1501 sc->sc_info.mci_ram_uncorrectable_errors,
1502 sc->sc_info.mci_cluster_allowed,
1503 sc->sc_info.mci_cluster_active);
1504
1505 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1506 DEVNAME(sc),
1507 sc->sc_info.mci_max_strips_per_io,
1508 sc->sc_info.mci_raid_levels,
1509 sc->sc_info.mci_adapter_ops,
1510 sc->sc_info.mci_ld_ops);
1511
1512 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1513 DEVNAME(sc),
1514 sc->sc_info.mci_stripe_sz_ops.min,
1515 sc->sc_info.mci_stripe_sz_ops.max,
1516 sc->sc_info.mci_pd_ops,
1517 sc->sc_info.mci_pd_mix_support);
1518
1519 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1520 DEVNAME(sc),
1521 sc->sc_info.mci_ecc_bucket_count,
1522 sc->sc_info.mci_package_version);
1523
1524 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1525 DEVNAME(sc),
1526 sc->sc_info.mci_properties.mcp_seq_num,
1527 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1528 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1529 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1530
1531 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1532 DEVNAME(sc),
1533 sc->sc_info.mci_properties.mcp_rebuild_rate,
1534 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1535 sc->sc_info.mci_properties.mcp_bgi_rate,
1536 sc->sc_info.mci_properties.mcp_cc_rate);
1537
1538 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1539 DEVNAME(sc),
1540 sc->sc_info.mci_properties.mcp_recon_rate,
1541 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1542 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1543 sc->sc_info.mci_properties.mcp_spinup_delay,
1544 sc->sc_info.mci_properties.mcp_cluster_enable);
1545
1546 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1547 DEVNAME(sc),
1548 sc->sc_info.mci_properties.mcp_coercion_mode,
1549 sc->sc_info.mci_properties.mcp_alarm_enable,
1550 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1551 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1552 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1553
1554 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1555 DEVNAME(sc),
1556 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1557 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1558 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1559
1560 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1561 DEVNAME(sc),
1562 sc->sc_info.mci_pci.mip_vendor,
1563 sc->sc_info.mci_pci.mip_device,
1564 sc->sc_info.mci_pci.mip_subvendor,
1565 sc->sc_info.mci_pci.mip_subdevice);
1566
1567 DPRINTF("%s: type %#x port_count %d port_addr ",
1568 DEVNAME(sc),
1569 sc->sc_info.mci_host.mih_type,
1570 sc->sc_info.mci_host.mih_port_count);
1571
1572 for (i = 0; i < 8; i++)
1573 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1574 DPRINTF("\n");
1575
1576 DPRINTF("%s: type %.x port_count %d port_addr ",
1577 DEVNAME(sc),
1578 sc->sc_info.mci_device.mid_type,
1579 sc->sc_info.mci_device.mid_port_count);
1580
1581 for (i = 0; i < 8; i++)
1582 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1583 DPRINTF("\n");
1584
1585 return (0);
1586 }
1587
1588 static int
1589 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1590 {
1591 struct mfi_frame_header *hdr = ccb->ccb_request;
1592 u_int64_t r;
1593 int to = 0, rv = 0;
1594
1595 #ifdef DIAGNOSTIC
1596 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1597 panic("mfii_mfa_poll called with cookie or done set");
1598 #endif
1599
1600 hdr->mfh_context = ccb->ccb_smid;
1601 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1602 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1603
1604 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1605 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1606
1607 mfii_start(sc, ccb);
1608
1609 for (;;) {
1610 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1611 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1612 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1613
1614 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1615 break;
1616
1617 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1618 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1619 ccb->ccb_smid);
1620 ccb->ccb_flags |= MFI_CCB_F_ERR;
1621 rv = 1;
1622 break;
1623 }
1624
1625 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1626 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1627 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1628
1629 delay(1000);
1630 }
1631
1632 if (ccb->ccb_len > 0) {
1633 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1634 0, ccb->ccb_dmamap32->dm_mapsize,
1635 (ccb->ccb_direction == MFII_DATA_IN) ?
1636 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1637
1638 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1639 }
1640
1641 return (rv);
1642 }
1643
1644 static int
1645 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1646 {
1647 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1648 void *cookie;
1649 int rv = 1;
1650
1651 done = ccb->ccb_done;
1652 cookie = ccb->ccb_cookie;
1653
1654 ccb->ccb_done = mfii_poll_done;
1655 ccb->ccb_cookie = &rv;
1656
1657 mfii_start(sc, ccb);
1658
1659 do {
1660 delay(10);
1661 mfii_postq(sc);
1662 } while (rv == 1);
1663
1664 ccb->ccb_cookie = cookie;
1665 done(sc, ccb);
1666
1667 return (0);
1668 }
1669
1670 static void
1671 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1672 {
1673 int *rv = ccb->ccb_cookie;
1674
1675 *rv = 0;
1676 }
1677
1678 static int
1679 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1680 {
1681 #ifdef DIAGNOSTIC
1682 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1683 panic("mfii_exec called with cookie or done set");
1684 #endif
1685
1686 ccb->ccb_cookie = ccb;
1687 ccb->ccb_done = mfii_exec_done;
1688
1689 mfii_start(sc, ccb);
1690
1691 mutex_enter(&ccb->ccb_mtx);
1692 while (ccb->ccb_cookie != NULL)
1693 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1694 mutex_exit(&ccb->ccb_mtx);
1695
1696 return (0);
1697 }
1698
1699 static void
1700 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1701 {
1702 mutex_enter(&ccb->ccb_mtx);
1703 ccb->ccb_cookie = NULL;
1704 cv_signal(&ccb->ccb_cv);
1705 mutex_exit(&ccb->ccb_mtx);
1706 }
1707
1708 static int
1709 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1710 void *buf, size_t len, mfii_direction_t dir, bool poll)
1711 {
1712 struct mfii_ccb *ccb;
1713 int rv;
1714
1715 KASSERT(mutex_owned(&sc->sc_lock));
1716 if (!sc->sc_running)
1717 return EAGAIN;
1718
1719 ccb = mfii_get_ccb(sc);
1720 if (ccb == NULL)
1721 return (ENOMEM);
1722
1723 mfii_scrub_ccb(ccb);
1724 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1725 mfii_put_ccb(sc, ccb);
1726
1727 return (rv);
1728 }
1729
1730 static int
1731 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1732 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1733 bool poll)
1734 {
1735 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1736 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1737 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1738 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1739 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1740 int rv = EIO;
1741
1742 if (cold)
1743 poll = true;
1744
1745 ccb->ccb_data = buf;
1746 ccb->ccb_len = len;
1747 ccb->ccb_direction = dir;
1748 switch (dir) {
1749 case MFII_DATA_IN:
1750 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1751 break;
1752 case MFII_DATA_OUT:
1753 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1754 break;
1755 case MFII_DATA_NONE:
1756 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1757 break;
1758 }
1759
1760 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1761 rv = ENOMEM;
1762 goto done;
1763 }
1764
1765 hdr->mfh_cmd = MFI_CMD_DCMD;
1766 hdr->mfh_context = ccb->ccb_smid;
1767 hdr->mfh_data_len = htole32(len);
1768 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1769 KASSERT(!ccb->ccb_dma64);
1770
1771 dcmd->mdf_opcode = opc;
1772 /* handle special opcodes */
1773 if (mbox != NULL)
1774 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1775
1776 io->function = MFII_FUNCTION_PASSTHRU_IO;
1777 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1778 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1779
1780 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1781 sge->sg_len = htole32(MFI_FRAME_SIZE);
1782 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1783
1784 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1785 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1786
1787 if (poll) {
1788 ccb->ccb_done = mfii_empty_done;
1789 mfii_poll(sc, ccb);
1790 } else
1791 mfii_exec(sc, ccb);
1792
1793 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1794 rv = 0;
1795 }
1796
1797 done:
1798 return (rv);
1799 }
1800
1801 static void
1802 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1803 {
1804 return;
1805 }
1806
1807 static int
1808 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1809 void *sglp, int nosleep)
1810 {
1811 union mfi_sgl *sgl = sglp;
1812 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1813 int error;
1814 int i;
1815
1816 KASSERT(!ccb->ccb_dma64);
1817 if (ccb->ccb_len == 0)
1818 return (0);
1819
1820 error = bus_dmamap_load(sc->sc_dmat, dmap,
1821 ccb->ccb_data, ccb->ccb_len, NULL,
1822 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1823 if (error) {
1824 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1825 return (1);
1826 }
1827
1828 for (i = 0; i < dmap->dm_nsegs; i++) {
1829 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1830 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1831 }
1832
1833 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1834 ccb->ccb_direction == MFII_DATA_OUT ?
1835 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1836
1837 return (0);
1838 }
1839
1840 static void
1841 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1842 {
1843 u_long *r = (u_long *)&ccb->ccb_req;
1844
1845 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1846 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1848
1849 #if defined(__LP64__) && 0
1850 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1851 #else
1852 mutex_enter(&sc->sc_post_mtx);
1853 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1854 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1855 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1856
1857 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1858 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1859 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1860 mutex_exit(&sc->sc_post_mtx);
1861 #endif
1862 }
1863
1864 static void
1865 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1866 {
1867 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1868 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1869 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1870
1871 if (ccb->ccb_sgl_len > 0) {
1872 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1873 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1874 BUS_DMASYNC_POSTWRITE);
1875 }
1876
1877 if (ccb->ccb_dma64) {
1878 KASSERT(ccb->ccb_len > 0);
1879 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1880 0, ccb->ccb_dmamap64->dm_mapsize,
1881 (ccb->ccb_direction == MFII_DATA_IN) ?
1882 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1883
1884 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1885 } else if (ccb->ccb_len > 0) {
1886 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1887 0, ccb->ccb_dmamap32->dm_mapsize,
1888 (ccb->ccb_direction == MFII_DATA_IN) ?
1889 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1890
1891 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1892 }
1893
1894 ccb->ccb_done(sc, ccb);
1895 }
1896
1897 static int
1898 mfii_initialise_firmware(struct mfii_softc *sc)
1899 {
1900 struct mpii_msg_iocinit_request *iiq;
1901 struct mfii_dmamem *m;
1902 struct mfii_ccb *ccb;
1903 struct mfi_init_frame *init;
1904 int rv;
1905
1906 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1907 if (m == NULL)
1908 return (1);
1909
1910 iiq = MFII_DMA_KVA(m);
1911 memset(iiq, 0, sizeof(*iiq));
1912
1913 iiq->function = MPII_FUNCTION_IOC_INIT;
1914 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1915
1916 iiq->msg_version_maj = 0x02;
1917 iiq->msg_version_min = 0x00;
1918 iiq->hdr_version_unit = 0x10;
1919 iiq->hdr_version_dev = 0x0;
1920
1921 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1922
1923 iiq->reply_descriptor_post_queue_depth =
1924 htole16(sc->sc_reply_postq_depth);
1925 iiq->reply_free_queue_depth = htole16(0);
1926
1927 iiq->sense_buffer_address_high = htole32(
1928 MFII_DMA_DVA(sc->sc_sense) >> 32);
1929
1930 iiq->reply_descriptor_post_queue_address_lo =
1931 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1932 iiq->reply_descriptor_post_queue_address_hi =
1933 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1934
1935 iiq->system_request_frame_base_address_lo =
1936 htole32(MFII_DMA_DVA(sc->sc_requests));
1937 iiq->system_request_frame_base_address_hi =
1938 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1939
1940 iiq->timestamp = htole64(time_uptime);
1941
1942 ccb = mfii_get_ccb(sc);
1943 if (ccb == NULL) {
1944 /* shouldn't ever run out of ccbs during attach */
1945 return (1);
1946 }
1947 mfii_scrub_ccb(ccb);
1948 init = ccb->ccb_request;
1949
1950 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1951 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1952 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1953 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1954
1955 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1956 0, MFII_DMA_LEN(sc->sc_reply_postq),
1957 BUS_DMASYNC_PREREAD);
1958
1959 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1960 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1961
1962 rv = mfii_mfa_poll(sc, ccb);
1963
1964 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1965 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1966
1967 mfii_put_ccb(sc, ccb);
1968 mfii_dmamem_free(sc, m);
1969
1970 return (rv);
1971 }
1972
1973 static int
1974 mfii_my_intr(struct mfii_softc *sc)
1975 {
1976 u_int32_t status;
1977
1978 status = mfii_read(sc, MFI_OSTS);
1979
1980 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1981 if (ISSET(status, 0x1)) {
1982 mfii_write(sc, MFI_OSTS, status);
1983 return (1);
1984 }
1985
1986 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1987 }
1988
1989 static int
1990 mfii_intr(void *arg)
1991 {
1992 struct mfii_softc *sc = arg;
1993
1994 if (!mfii_my_intr(sc))
1995 return (0);
1996
1997 mfii_postq(sc);
1998
1999 return (1);
2000 }
2001
2002 static void
2003 mfii_postq(struct mfii_softc *sc)
2004 {
2005 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2006 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2007 struct mpii_reply_descr *rdp;
2008 struct mfii_ccb *ccb;
2009 int rpi = 0;
2010
2011 mutex_enter(&sc->sc_reply_postq_mtx);
2012
2013 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2014 0, MFII_DMA_LEN(sc->sc_reply_postq),
2015 BUS_DMASYNC_POSTREAD);
2016
2017 for (;;) {
2018 rdp = &postq[sc->sc_reply_postq_index];
2019 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2020 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2021 rdp->data == 0xffffffff);
2022 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2023 MPII_REPLY_DESCR_UNUSED)
2024 break;
2025 if (rdp->data == 0xffffffff) {
2026 /*
2027 * ioc is still writing to the reply post queue
2028 * race condition - bail!
2029 */
2030 break;
2031 }
2032
2033 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2034 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2035 memset(rdp, 0xff, sizeof(*rdp));
2036
2037 sc->sc_reply_postq_index++;
2038 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2039 rpi = 1;
2040 }
2041
2042 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2043 0, MFII_DMA_LEN(sc->sc_reply_postq),
2044 BUS_DMASYNC_PREREAD);
2045
2046 if (rpi)
2047 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2048
2049 mutex_exit(&sc->sc_reply_postq_mtx);
2050
2051 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2052 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2053 mfii_done(sc, ccb);
2054 }
2055 }
2056
2057 static void
2058 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2059 void *arg)
2060 {
2061 struct scsipi_periph *periph;
2062 struct scsipi_xfer *xs;
2063 struct scsipi_adapter *adapt = chan->chan_adapter;
2064 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2065 struct mfii_ccb *ccb;
2066 int timeout;
2067 int target;
2068
2069 switch (req) {
2070 case ADAPTER_REQ_GROW_RESOURCES:
2071 /* Not supported. */
2072 return;
2073 case ADAPTER_REQ_SET_XFER_MODE:
2074 {
2075 struct scsipi_xfer_mode *xm = arg;
2076 xm->xm_mode = PERIPH_CAP_TQING;
2077 xm->xm_period = 0;
2078 xm->xm_offset = 0;
2079 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2080 return;
2081 }
2082 case ADAPTER_REQ_RUN_XFER:
2083 break;
2084 }
2085
2086 xs = arg;
2087 periph = xs->xs_periph;
2088 target = periph->periph_target;
2089
2090 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2091 periph->periph_lun != 0) {
2092 xs->error = XS_SELTIMEOUT;
2093 scsipi_done(xs);
2094 return;
2095 }
2096
2097 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2098 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2099 /* the cache is stable storage, don't flush */
2100 xs->error = XS_NOERROR;
2101 xs->status = SCSI_OK;
2102 xs->resid = 0;
2103 scsipi_done(xs);
2104 return;
2105 }
2106
2107 ccb = mfii_get_ccb(sc);
2108 if (ccb == NULL) {
2109 xs->error = XS_RESOURCE_SHORTAGE;
2110 scsipi_done(xs);
2111 return;
2112 }
2113 mfii_scrub_ccb(ccb);
2114 ccb->ccb_cookie = xs;
2115 ccb->ccb_done = mfii_scsi_cmd_done;
2116 ccb->ccb_data = xs->data;
2117 ccb->ccb_len = xs->datalen;
2118
2119 timeout = mstohz(xs->timeout);
2120 if (timeout == 0)
2121 timeout = 1;
2122 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2123
2124 switch (xs->cmd->opcode) {
2125 case SCSI_READ_6_COMMAND:
2126 case READ_10:
2127 case READ_12:
2128 case READ_16:
2129 case SCSI_WRITE_6_COMMAND:
2130 case WRITE_10:
2131 case WRITE_12:
2132 case WRITE_16:
2133 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2134 goto stuffup;
2135 break;
2136
2137 default:
2138 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2139 goto stuffup;
2140 break;
2141 }
2142
2143 xs->error = XS_NOERROR;
2144 xs->resid = 0;
2145
2146 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2147 xs->cmd->opcode);
2148
2149 if (xs->xs_control & XS_CTL_POLL) {
2150 if (mfii_poll(sc, ccb) != 0)
2151 goto stuffup;
2152 return;
2153 }
2154
2155 mfii_start(sc, ccb);
2156
2157 return;
2158
2159 stuffup:
2160 xs->error = XS_DRIVER_STUFFUP;
2161 scsipi_done(xs);
2162 mfii_put_ccb(sc, ccb);
2163 }
2164
2165 static void
2166 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2167 {
2168 struct scsipi_xfer *xs = ccb->ccb_cookie;
2169 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2170 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2171
2172 if (callout_stop(&xs->xs_callout) != 0)
2173 return;
2174
2175 switch (ctx->status) {
2176 case MFI_STAT_OK:
2177 break;
2178
2179 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2180 xs->error = XS_SENSE;
2181 memset(&xs->sense, 0, sizeof(xs->sense));
2182 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2183 break;
2184
2185 case MFI_STAT_LD_OFFLINE:
2186 case MFI_STAT_DEVICE_NOT_FOUND:
2187 xs->error = XS_SELTIMEOUT;
2188 break;
2189
2190 default:
2191 xs->error = XS_DRIVER_STUFFUP;
2192 break;
2193 }
2194
2195 scsipi_done(xs);
2196 mfii_put_ccb(sc, ccb);
2197 }
2198
2199 static int
2200 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2201 struct scsipi_xfer *xs)
2202 {
2203 struct scsipi_periph *periph = xs->xs_periph;
2204 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2205 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2206 int segs;
2207
2208 io->dev_handle = htole16(periph->periph_target);
2209 io->function = MFII_FUNCTION_LDIO_REQUEST;
2210 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2211 io->sgl_flags = htole16(0x02); /* XXX */
2212 io->sense_buffer_length = sizeof(xs->sense);
2213 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2214 io->data_length = htole32(xs->datalen);
2215 io->io_flags = htole16(xs->cmdlen);
2216 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2217 case XS_CTL_DATA_IN:
2218 ccb->ccb_direction = MFII_DATA_IN;
2219 io->direction = MPII_SCSIIO_DIR_READ;
2220 break;
2221 case XS_CTL_DATA_OUT:
2222 ccb->ccb_direction = MFII_DATA_OUT;
2223 io->direction = MPII_SCSIIO_DIR_WRITE;
2224 break;
2225 default:
2226 ccb->ccb_direction = MFII_DATA_NONE;
2227 io->direction = MPII_SCSIIO_DIR_NONE;
2228 break;
2229 }
2230 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2231
2232 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2233 ctx->timeout_value = htole16(0x14); /* XXX */
2234 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2235 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2236
2237 if (mfii_load_ccb(sc, ccb, ctx + 1,
2238 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2239 return (1);
2240
2241 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2242 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2243 switch (sc->sc_iop->num_sge_loc) {
2244 case MFII_IOP_NUM_SGE_LOC_ORIG:
2245 ctx->num_sge = segs;
2246 break;
2247 case MFII_IOP_NUM_SGE_LOC_35:
2248 /* 12 bit field, but we're only using the lower 8 */
2249 ctx->span_arm = segs;
2250 break;
2251 }
2252
2253 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2254 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2255
2256 return (0);
2257 }
2258
2259 static int
2260 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2261 struct scsipi_xfer *xs)
2262 {
2263 struct scsipi_periph *periph = xs->xs_periph;
2264 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2265 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2266
2267 io->dev_handle = htole16(periph->periph_target);
2268 io->function = MFII_FUNCTION_LDIO_REQUEST;
2269 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2270 io->sgl_flags = htole16(0x02); /* XXX */
2271 io->sense_buffer_length = sizeof(xs->sense);
2272 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2273 io->data_length = htole32(xs->datalen);
2274 io->io_flags = htole16(xs->cmdlen);
2275 io->lun[0] = htobe16(periph->periph_lun);
2276 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2277 case XS_CTL_DATA_IN:
2278 ccb->ccb_direction = MFII_DATA_IN;
2279 io->direction = MPII_SCSIIO_DIR_READ;
2280 break;
2281 case XS_CTL_DATA_OUT:
2282 ccb->ccb_direction = MFII_DATA_OUT;
2283 io->direction = MPII_SCSIIO_DIR_WRITE;
2284 break;
2285 default:
2286 ccb->ccb_direction = MFII_DATA_NONE;
2287 io->direction = MPII_SCSIIO_DIR_NONE;
2288 break;
2289 }
2290 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2291
2292 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2293
2294 if (mfii_load_ccb(sc, ccb, ctx + 1,
2295 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2296 return (1);
2297
2298 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2299 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2300
2301 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2302 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2303
2304 return (0);
2305 }
2306
2307 #if 0
2308 void
2309 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2310 {
2311 struct scsi_link *link = xs->sc_link;
2312 struct mfii_softc *sc = link->adapter_softc;
2313 struct mfii_ccb *ccb = xs->io;
2314
2315 mfii_scrub_ccb(ccb);
2316 ccb->ccb_cookie = xs;
2317 ccb->ccb_done = mfii_scsi_cmd_done;
2318 ccb->ccb_data = xs->data;
2319 ccb->ccb_len = xs->datalen;
2320
2321 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2322
2323 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2324 if (xs->error != XS_NOERROR)
2325 goto done;
2326
2327 xs->resid = 0;
2328
2329 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2330 if (mfii_poll(sc, ccb) != 0)
2331 goto stuffup;
2332 return;
2333 }
2334
2335 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2336 mfii_start(sc, ccb);
2337
2338 return;
2339
2340 stuffup:
2341 xs->error = XS_DRIVER_STUFFUP;
2342 done:
2343 scsi_done(xs);
2344 }
2345
2346 int
2347 mfii_pd_scsi_probe(struct scsi_link *link)
2348 {
2349 struct mfii_softc *sc = link->adapter_softc;
2350 struct mfi_pd_details mpd;
2351 union mfi_mbox mbox;
2352 int rv;
2353
2354 if (link->lun > 0)
2355 return (0);
2356
2357 memset(&mbox, 0, sizeof(mbox));
2358 mbox.s[0] = htole16(link->target);
2359
2360 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2361 MFII_DATA_IN, true);
2362 if (rv != 0)
2363 return (EIO);
2364
2365 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2366 return (ENXIO);
2367
2368 return (0);
2369 }
2370
2371 int
2372 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2373 struct scsipi_xfer *xs)
2374 {
2375 struct scsi_link *link = xs->sc_link;
2376 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2377 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2378 uint16_t dev_handle;
2379
2380 dev_handle = mfii_dev_handle(sc, link->target);
2381 if (dev_handle == htole16(0xffff))
2382 return (XS_SELTIMEOUT);
2383
2384 io->dev_handle = dev_handle;
2385 io->function = 0;
2386 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2387 io->sgl_flags = htole16(0x02); /* XXX */
2388 io->sense_buffer_length = sizeof(xs->sense);
2389 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2390 io->data_length = htole32(xs->datalen);
2391 io->io_flags = htole16(xs->cmdlen);
2392 io->lun[0] = htobe16(link->lun);
2393 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2394 case XS_CTL_DATA_IN:
2395 ccb->ccb_direction = MFII_DATA_IN;
2396 io->direction = MPII_SCSIIO_DIR_READ;
2397 break;
2398 case XS_CTL_DATA_OUT:
2399 ccb->ccb_direction = MFII_DATA_OUT;
2400 io->direction = MPII_SCSIIO_DIR_WRITE;
2401 break;
2402 default:
2403 ccb->ccb_direction = MFII_DATA_NONE;
2404 io->direction = MPII_SCSIIO_DIR_NONE;
2405 break;
2406 }
2407 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2408
2409 ctx->virtual_disk_target_id = htole16(link->target);
2410 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2411 ctx->timeout_value = sc->sc_pd->pd_timeout;
2412
2413 if (mfii_load_ccb(sc, ccb, ctx + 1,
2414 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2415 return (XS_DRIVER_STUFFUP);
2416
2417 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2418 KASSERT(ccb->ccb_dma64);
2419
2420 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2421 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2422 ccb->ccb_req.dev_handle = dev_handle;
2423
2424 return (XS_NOERROR);
2425 }
2426 #endif
2427
2428 static int
2429 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2430 int nosleep)
2431 {
2432 struct mpii_msg_request *req = ccb->ccb_request;
2433 struct mfii_sge *sge = NULL, *nsge = sglp;
2434 struct mfii_sge *ce = NULL;
2435 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2436 u_int space;
2437 int i;
2438
2439 int error;
2440
2441 if (ccb->ccb_len == 0)
2442 return (0);
2443
2444 ccb->ccb_dma64 = true;
2445 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2446 ccb->ccb_data, ccb->ccb_len, NULL,
2447 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2448 if (error) {
2449 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2450 return (1);
2451 }
2452
2453 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2454 sizeof(*nsge);
2455 if (dmap->dm_nsegs > space) {
2456 space--;
2457
2458 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2459 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2460
2461 ce = nsge + space;
2462 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2463 ce->sg_len = htole32(ccb->ccb_sgl_len);
2464 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2465
2466 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2467 }
2468
2469 for (i = 0; i < dmap->dm_nsegs; i++) {
2470 if (nsge == ce)
2471 nsge = ccb->ccb_sgl;
2472
2473 sge = nsge;
2474
2475 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2476 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2477 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2478
2479 nsge = sge + 1;
2480 }
2481 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2482
2483 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2484 ccb->ccb_direction == MFII_DATA_OUT ?
2485 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2486
2487 if (ccb->ccb_sgl_len > 0) {
2488 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2489 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2490 BUS_DMASYNC_PREWRITE);
2491 }
2492
2493 return (0);
2494 }
2495
2496 static void
2497 mfii_scsi_cmd_tmo(void *p)
2498 {
2499 struct mfii_ccb *ccb = p;
2500 struct mfii_softc *sc = ccb->ccb_sc;
2501 bool start_abort;
2502
2503 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2504
2505 mutex_enter(&sc->sc_abort_mtx);
2506 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2507 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2508 if (start_abort)
2509 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2510 mutex_exit(&sc->sc_abort_mtx);
2511 }
2512
2513 static void
2514 mfii_abort_task(struct work *wk, void *scp)
2515 {
2516 struct mfii_softc *sc = scp;
2517 struct mfii_ccb *list;
2518
2519 mutex_enter(&sc->sc_abort_mtx);
2520 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2521 SIMPLEQ_INIT(&sc->sc_abort_list);
2522 mutex_exit(&sc->sc_abort_mtx);
2523
2524 while (list != NULL) {
2525 struct mfii_ccb *ccb = list;
2526 struct scsipi_xfer *xs = ccb->ccb_cookie;
2527 struct scsipi_periph *periph = xs->xs_periph;
2528 struct mfii_ccb *accb;
2529
2530 list = SIMPLEQ_NEXT(ccb, ccb_link);
2531
2532 if (!sc->sc_ld[periph->periph_target].ld_present) {
2533 /* device is gone */
2534 xs->error = XS_SELTIMEOUT;
2535 scsipi_done(xs);
2536 mfii_put_ccb(sc, ccb);
2537 continue;
2538 }
2539
2540 accb = mfii_get_ccb(sc);
2541 mfii_scrub_ccb(accb);
2542 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2543 MPII_SCSI_TASK_ABORT_TASK,
2544 htole32(MFII_TASK_MGMT_FLAGS_PD));
2545
2546 accb->ccb_cookie = ccb;
2547 accb->ccb_done = mfii_scsi_cmd_abort_done;
2548
2549 mfii_start(sc, accb);
2550 }
2551 }
2552
2553 static void
2554 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2555 uint16_t smid, uint8_t type, uint32_t flags)
2556 {
2557 struct mfii_task_mgmt *msg;
2558 struct mpii_msg_scsi_task_request *req;
2559
2560 msg = accb->ccb_request;
2561 req = &msg->mpii_request;
2562 req->dev_handle = dev_handle;
2563 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2564 req->task_type = type;
2565 req->task_mid = htole16( smid);
2566 msg->flags = flags;
2567
2568 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2569 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2570 }
2571
2572 static void
2573 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2574 {
2575 struct mfii_ccb *ccb = accb->ccb_cookie;
2576 struct scsipi_xfer *xs = ccb->ccb_cookie;
2577
2578 /* XXX check accb completion? */
2579
2580 mfii_put_ccb(sc, accb);
2581 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2582
2583 xs->error = XS_TIMEOUT;
2584 scsipi_done(xs);
2585 mfii_put_ccb(sc, ccb);
2586 }
2587
2588 static struct mfii_ccb *
2589 mfii_get_ccb(struct mfii_softc *sc)
2590 {
2591 struct mfii_ccb *ccb;
2592
2593 mutex_enter(&sc->sc_ccb_mtx);
2594 if (!sc->sc_running) {
2595 ccb = NULL;
2596 } else {
2597 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2598 if (ccb != NULL)
2599 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2600 }
2601 mutex_exit(&sc->sc_ccb_mtx);
2602 return (ccb);
2603 }
2604
2605 static void
2606 mfii_scrub_ccb(struct mfii_ccb *ccb)
2607 {
2608 ccb->ccb_cookie = NULL;
2609 ccb->ccb_done = NULL;
2610 ccb->ccb_flags = 0;
2611 ccb->ccb_data = NULL;
2612 ccb->ccb_direction = MFII_DATA_NONE;
2613 ccb->ccb_dma64 = false;
2614 ccb->ccb_len = 0;
2615 ccb->ccb_sgl_len = 0;
2616 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2617 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2618 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2619 }
2620
2621 static void
2622 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2623 {
2624 mutex_enter(&sc->sc_ccb_mtx);
2625 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2626 mutex_exit(&sc->sc_ccb_mtx);
2627 }
2628
2629 static int
2630 mfii_init_ccb(struct mfii_softc *sc)
2631 {
2632 struct mfii_ccb *ccb;
2633 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2634 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2635 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2636 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2637 u_int i;
2638 int error;
2639
2640 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2641 M_DEVBUF, M_WAITOK|M_ZERO);
2642
2643 for (i = 0; i < sc->sc_max_cmds; i++) {
2644 ccb = &sc->sc_ccb[i];
2645 ccb->ccb_sc = sc;
2646
2647 /* create a dma map for transfer */
2648 error = bus_dmamap_create(sc->sc_dmat,
2649 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2650 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2651 if (error) {
2652 printf("%s: cannot create ccb dmamap32 (%d)\n",
2653 DEVNAME(sc), error);
2654 goto destroy;
2655 }
2656 error = bus_dmamap_create(sc->sc_dmat64,
2657 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2658 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2659 if (error) {
2660 printf("%s: cannot create ccb dmamap64 (%d)\n",
2661 DEVNAME(sc), error);
2662 goto destroy32;
2663 }
2664
2665 /* select i + 1'th request. 0 is reserved for events */
2666 ccb->ccb_smid = i + 1;
2667 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2668 ccb->ccb_request = request + ccb->ccb_request_offset;
2669 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2670 ccb->ccb_request_offset;
2671
2672 /* select i'th MFI command frame */
2673 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2674 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2675 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2676 ccb->ccb_mfi_offset;
2677
2678 /* select i'th sense */
2679 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2680 ccb->ccb_sense = (struct mfi_sense *)(sense +
2681 ccb->ccb_sense_offset);
2682 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2683 ccb->ccb_sense_offset;
2684
2685 /* select i'th sgl */
2686 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2687 sc->sc_max_sgl * i;
2688 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2689 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2690 ccb->ccb_sgl_offset;
2691
2692 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2693 cv_init(&ccb->ccb_cv, "mfiiexec");
2694
2695 /* add ccb to queue */
2696 mfii_put_ccb(sc, ccb);
2697 }
2698
2699 return (0);
2700
2701 destroy32:
2702 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2703 destroy:
2704 /* free dma maps and ccb memory */
2705 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2706 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2707 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2708 }
2709
2710 free(sc->sc_ccb, M_DEVBUF);
2711
2712 return (1);
2713 }
2714
2715 #if NBIO > 0
2716 static int
2717 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2718 {
2719 struct mfii_softc *sc = device_private(dev);
2720 int error = 0;
2721
2722 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2723
2724 mutex_enter(&sc->sc_lock);
2725
2726 switch (cmd) {
2727 case BIOCINQ:
2728 DNPRINTF(MFII_D_IOCTL, "inq\n");
2729 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2730 break;
2731
2732 case BIOCVOL:
2733 DNPRINTF(MFII_D_IOCTL, "vol\n");
2734 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2735 break;
2736
2737 case BIOCDISK:
2738 DNPRINTF(MFII_D_IOCTL, "disk\n");
2739 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2740 break;
2741
2742 case BIOCALARM:
2743 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2744 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2745 break;
2746
2747 case BIOCBLINK:
2748 DNPRINTF(MFII_D_IOCTL, "blink\n");
2749 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2750 break;
2751
2752 case BIOCSETSTATE:
2753 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2754 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2755 break;
2756
2757 #if 0
2758 case BIOCPATROL:
2759 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2760 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2761 break;
2762 #endif
2763
2764 default:
2765 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2766 error = ENOTTY;
2767 }
2768
2769 mutex_exit(&sc->sc_lock);
2770
2771 return (error);
2772 }
2773
2774 static int
2775 mfii_bio_getitall(struct mfii_softc *sc)
2776 {
2777 int i, d, rv = EINVAL;
2778 size_t size;
2779 union mfi_mbox mbox;
2780 struct mfi_conf *cfg = NULL;
2781 struct mfi_ld_details *ld_det = NULL;
2782
2783 /* get info */
2784 if (mfii_get_info(sc)) {
2785 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2786 DEVNAME(sc));
2787 goto done;
2788 }
2789
2790 /* send single element command to retrieve size for full structure */
2791 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2792 if (cfg == NULL)
2793 goto done;
2794 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2795 MFII_DATA_IN, false)) {
2796 free(cfg, M_DEVBUF);
2797 goto done;
2798 }
2799
2800 size = cfg->mfc_size;
2801 free(cfg, M_DEVBUF);
2802
2803 /* memory for read config */
2804 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2805 if (cfg == NULL)
2806 goto done;
2807 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2808 MFII_DATA_IN, false)) {
2809 free(cfg, M_DEVBUF);
2810 goto done;
2811 }
2812
2813 /* replace current pointer with new one */
2814 if (sc->sc_cfg)
2815 free(sc->sc_cfg, M_DEVBUF);
2816 sc->sc_cfg = cfg;
2817
2818 /* get all ld info */
2819 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2820 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2821 goto done;
2822
2823 /* get memory for all ld structures */
2824 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2825 if (sc->sc_ld_sz != size) {
2826 if (sc->sc_ld_details)
2827 free(sc->sc_ld_details, M_DEVBUF);
2828
2829 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2830 if (ld_det == NULL)
2831 goto done;
2832 sc->sc_ld_sz = size;
2833 sc->sc_ld_details = ld_det;
2834 }
2835
2836 /* find used physical disks */
2837 size = sizeof(struct mfi_ld_details);
2838 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2839 memset(&mbox, 0, sizeof(mbox));
2840 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2841 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2842 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2843 goto done;
2844
2845 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2846 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2847 }
2848 sc->sc_no_pd = d;
2849
2850 rv = 0;
2851 done:
2852 return (rv);
2853 }
2854
2855 static int
2856 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2857 {
2858 int rv = EINVAL;
2859 struct mfi_conf *cfg = NULL;
2860
2861 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2862
2863 if (mfii_bio_getitall(sc)) {
2864 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2865 DEVNAME(sc));
2866 goto done;
2867 }
2868
2869 /* count unused disks as volumes */
2870 if (sc->sc_cfg == NULL)
2871 goto done;
2872 cfg = sc->sc_cfg;
2873
2874 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2875 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2876 #if notyet
2877 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2878 (bi->bi_nodisk - sc->sc_no_pd);
2879 #endif
2880 /* tell bio who we are */
2881 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2882
2883 rv = 0;
2884 done:
2885 return (rv);
2886 }
2887
2888 static int
2889 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2890 {
2891 int i, per, rv = EINVAL;
2892
2893 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2894 DEVNAME(sc), bv->bv_volid);
2895
2896 /* we really could skip and expect that inq took care of it */
2897 if (mfii_bio_getitall(sc)) {
2898 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2899 DEVNAME(sc));
2900 goto done;
2901 }
2902
2903 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2904 /* go do hotspares & unused disks */
2905 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2906 goto done;
2907 }
2908
2909 i = bv->bv_volid;
2910 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2911 sizeof(bv->bv_dev));
2912
2913 switch (sc->sc_ld_list.mll_list[i].mll_state) {
2914 case MFI_LD_OFFLINE:
2915 bv->bv_status = BIOC_SVOFFLINE;
2916 break;
2917
2918 case MFI_LD_PART_DEGRADED:
2919 case MFI_LD_DEGRADED:
2920 bv->bv_status = BIOC_SVDEGRADED;
2921 break;
2922
2923 case MFI_LD_ONLINE:
2924 bv->bv_status = BIOC_SVONLINE;
2925 break;
2926
2927 default:
2928 bv->bv_status = BIOC_SVINVALID;
2929 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2930 DEVNAME(sc),
2931 sc->sc_ld_list.mll_list[i].mll_state);
2932 }
2933
2934 /* additional status can modify MFI status */
2935 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2936 case MFI_LD_PROG_CC:
2937 case MFI_LD_PROG_BGI:
2938 bv->bv_status = BIOC_SVSCRUB;
2939 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2940 bv->bv_percent = (per * 100) / 0xffff;
2941 bv->bv_seconds =
2942 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2943 break;
2944
2945 case MFI_LD_PROG_FGI:
2946 case MFI_LD_PROG_RECONSTRUCT:
2947 /* nothing yet */
2948 break;
2949 }
2950
2951 #if 0
2952 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2953 bv->bv_cache = BIOC_CVWRITEBACK;
2954 else
2955 bv->bv_cache = BIOC_CVWRITETHROUGH;
2956 #endif
2957
2958 /*
2959 * The RAID levels are determined per the SNIA DDF spec, this is only
2960 * a subset that is valid for the MFI controller.
2961 */
2962 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2963 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2964 bv->bv_level *= 10;
2965
2966 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2967 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2968
2969 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2970
2971 rv = 0;
2972 done:
2973 return (rv);
2974 }
2975
2976 static int
2977 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2978 {
2979 struct mfi_conf *cfg;
2980 struct mfi_array *ar;
2981 struct mfi_ld_cfg *ld;
2982 struct mfi_pd_details *pd;
2983 struct mfi_pd_list *pl;
2984 struct scsipi_inquiry_data *inqbuf;
2985 char vend[8+16+4+1], *vendp;
2986 int i, rv = EINVAL;
2987 int arr, vol, disk, span;
2988 union mfi_mbox mbox;
2989
2990 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
2991 DEVNAME(sc), bd->bd_diskid);
2992
2993 /* we really could skip and expect that inq took care of it */
2994 if (mfii_bio_getitall(sc)) {
2995 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2996 DEVNAME(sc));
2997 return (rv);
2998 }
2999 cfg = sc->sc_cfg;
3000
3001 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3002 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3003
3004 ar = cfg->mfc_array;
3005 vol = bd->bd_volid;
3006 if (vol >= cfg->mfc_no_ld) {
3007 /* do hotspares */
3008 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3009 goto freeme;
3010 }
3011
3012 /* calculate offset to ld structure */
3013 ld = (struct mfi_ld_cfg *)(
3014 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3015 cfg->mfc_array_size * cfg->mfc_no_array);
3016
3017 /* use span 0 only when raid group is not spanned */
3018 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3019 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3020 else
3021 span = 0;
3022 arr = ld[vol].mlc_span[span].mls_index;
3023
3024 /* offset disk into pd list */
3025 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3026
3027 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3028 /* disk is missing but succeed command */
3029 bd->bd_status = BIOC_SDFAILED;
3030 rv = 0;
3031
3032 /* try to find an unused disk for the target to rebuild */
3033 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3034 MFII_DATA_IN, false))
3035 goto freeme;
3036
3037 for (i = 0; i < pl->mpl_no_pd; i++) {
3038 if (pl->mpl_address[i].mpa_scsi_type != 0)
3039 continue;
3040
3041 memset(&mbox, 0, sizeof(mbox));
3042 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3043 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3044 pd, sizeof(*pd), MFII_DATA_IN, false))
3045 continue;
3046
3047 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3048 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3049 break;
3050 }
3051
3052 if (i == pl->mpl_no_pd)
3053 goto freeme;
3054 } else {
3055 memset(&mbox, 0, sizeof(mbox));
3056 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3057 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3058 MFII_DATA_IN, false)) {
3059 bd->bd_status = BIOC_SDINVALID;
3060 goto freeme;
3061 }
3062 }
3063
3064 /* get the remaining fields */
3065 bd->bd_channel = pd->mpd_enc_idx;
3066 bd->bd_target = pd->mpd_enc_slot;
3067
3068 /* get status */
3069 switch (pd->mpd_fw_state){
3070 case MFI_PD_UNCONFIG_GOOD:
3071 case MFI_PD_UNCONFIG_BAD:
3072 bd->bd_status = BIOC_SDUNUSED;
3073 break;
3074
3075 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3076 bd->bd_status = BIOC_SDHOTSPARE;
3077 break;
3078
3079 case MFI_PD_OFFLINE:
3080 bd->bd_status = BIOC_SDOFFLINE;
3081 break;
3082
3083 case MFI_PD_FAILED:
3084 bd->bd_status = BIOC_SDFAILED;
3085 break;
3086
3087 case MFI_PD_REBUILD:
3088 bd->bd_status = BIOC_SDREBUILD;
3089 break;
3090
3091 case MFI_PD_ONLINE:
3092 bd->bd_status = BIOC_SDONLINE;
3093 break;
3094
3095 case MFI_PD_COPYBACK:
3096 case MFI_PD_SYSTEM:
3097 default:
3098 bd->bd_status = BIOC_SDINVALID;
3099 break;
3100 }
3101
3102 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3103
3104 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3105 vendp = inqbuf->vendor;
3106 memcpy(vend, vendp, sizeof vend - 1);
3107 vend[sizeof vend - 1] = '\0';
3108 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3109
3110 /* XXX find a way to retrieve serial nr from drive */
3111 /* XXX find a way to get bd_procdev */
3112
3113 #if 0
3114 mfp = &pd->mpd_progress;
3115 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3116 mp = &mfp->mfp_patrol_read;
3117 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3118 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3119 }
3120 #endif
3121
3122 rv = 0;
3123 freeme:
3124 free(pd, M_DEVBUF);
3125 free(pl, M_DEVBUF);
3126
3127 return (rv);
3128 }
3129
3130 static int
3131 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3132 {
3133 uint32_t opc;
3134 int rv = 0;
3135 int8_t ret;
3136 mfii_direction_t dir = MFII_DATA_NONE;
3137
3138 switch (ba->ba_opcode) {
3139 case BIOC_SADISABLE:
3140 opc = MR_DCMD_SPEAKER_DISABLE;
3141 break;
3142
3143 case BIOC_SAENABLE:
3144 opc = MR_DCMD_SPEAKER_ENABLE;
3145 break;
3146
3147 case BIOC_SASILENCE:
3148 opc = MR_DCMD_SPEAKER_SILENCE;
3149 break;
3150
3151 case BIOC_GASTATUS:
3152 opc = MR_DCMD_SPEAKER_GET;
3153 dir = MFII_DATA_IN;
3154 break;
3155
3156 case BIOC_SATEST:
3157 opc = MR_DCMD_SPEAKER_TEST;
3158 break;
3159
3160 default:
3161 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3162 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3163 return (EINVAL);
3164 }
3165
3166 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3167 rv = EINVAL;
3168 else
3169 if (ba->ba_opcode == BIOC_GASTATUS)
3170 ba->ba_status = ret;
3171 else
3172 ba->ba_status = 0;
3173
3174 return (rv);
3175 }
3176
3177 static int
3178 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3179 {
3180 int i, found, rv = EINVAL;
3181 union mfi_mbox mbox;
3182 uint32_t cmd;
3183 struct mfi_pd_list *pd;
3184
3185 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3186 bb->bb_status);
3187
3188 /* channel 0 means not in an enclosure so can't be blinked */
3189 if (bb->bb_channel == 0)
3190 return (EINVAL);
3191
3192 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3193
3194 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3195 MFII_DATA_IN, false))
3196 goto done;
3197
3198 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3199 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3200 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3201 found = 1;
3202 break;
3203 }
3204
3205 if (!found)
3206 goto done;
3207
3208 memset(&mbox, 0, sizeof(mbox));
3209 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3210
3211 switch (bb->bb_status) {
3212 case BIOC_SBUNBLINK:
3213 cmd = MR_DCMD_PD_UNBLINK;
3214 break;
3215
3216 case BIOC_SBBLINK:
3217 cmd = MR_DCMD_PD_BLINK;
3218 break;
3219
3220 case BIOC_SBALARM:
3221 default:
3222 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3223 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3224 goto done;
3225 }
3226
3227
3228 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3229 goto done;
3230
3231 rv = 0;
3232 done:
3233 free(pd, M_DEVBUF);
3234 return (rv);
3235 }
3236
3237 static int
3238 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3239 {
3240 struct mfii_foreign_scan_info *fsi;
3241 struct mfi_pd_details *pd;
3242 union mfi_mbox mbox;
3243 int rv;
3244
3245 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3246 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3247
3248 memset(&mbox, 0, sizeof mbox);
3249 mbox.s[0] = pd_id;
3250 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3251 MFII_DATA_IN, false);
3252 if (rv != 0)
3253 goto done;
3254
3255 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3256 mbox.s[0] = pd_id;
3257 mbox.s[1] = pd->mpd_pd.mfp_seq;
3258 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3259 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3260 MFII_DATA_NONE, false);
3261 if (rv != 0)
3262 goto done;
3263 }
3264
3265 memset(&mbox, 0, sizeof mbox);
3266 mbox.s[0] = pd_id;
3267 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3268 MFII_DATA_IN, false);
3269 if (rv != 0)
3270 goto done;
3271
3272 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3273 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3274 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3275 if (rv != 0)
3276 goto done;
3277
3278 if (fsi->count > 0) {
3279 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3280 NULL, 0, MFII_DATA_NONE, false);
3281 if (rv != 0)
3282 goto done;
3283 }
3284 }
3285
3286 memset(&mbox, 0, sizeof mbox);
3287 mbox.s[0] = pd_id;
3288 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3289 MFII_DATA_IN, false);
3290 if (rv != 0)
3291 goto done;
3292
3293 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3294 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3295 rv = ENXIO;
3296
3297 done:
3298 free(fsi, M_DEVBUF);
3299 free(pd, M_DEVBUF);
3300
3301 return (rv);
3302 }
3303
3304 static int
3305 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3306 {
3307 struct mfi_hotspare *hs;
3308 struct mfi_pd_details *pd;
3309 union mfi_mbox mbox;
3310 size_t size;
3311 int rv = EINVAL;
3312
3313 /* we really could skip and expect that inq took care of it */
3314 if (mfii_bio_getitall(sc)) {
3315 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3316 DEVNAME(sc));
3317 return (rv);
3318 }
3319 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3320
3321 hs = malloc(size, M_DEVBUF, M_WAITOK);
3322 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3323
3324 memset(&mbox, 0, sizeof mbox);
3325 mbox.s[0] = pd_id;
3326 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3327 MFII_DATA_IN, false);
3328 if (rv != 0)
3329 goto done;
3330
3331 memset(hs, 0, size);
3332 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3333 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3334 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3335 MFII_DATA_OUT, false);
3336
3337 done:
3338 free(hs, M_DEVBUF);
3339 free(pd, M_DEVBUF);
3340
3341 return (rv);
3342 }
3343
3344 static int
3345 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3346 {
3347 struct mfi_pd_details *pd;
3348 struct mfi_pd_list *pl;
3349 int i, found, rv = EINVAL;
3350 union mfi_mbox mbox;
3351
3352 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3353 bs->bs_status);
3354
3355 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3356 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3357
3358 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3359 MFII_DATA_IN, false))
3360 goto done;
3361
3362 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3363 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3364 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3365 found = 1;
3366 break;
3367 }
3368
3369 if (!found)
3370 goto done;
3371
3372 memset(&mbox, 0, sizeof(mbox));
3373 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3374
3375 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3376 MFII_DATA_IN, false))
3377 goto done;
3378
3379 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3380 mbox.s[1] = pd->mpd_pd.mfp_seq;
3381
3382 switch (bs->bs_status) {
3383 case BIOC_SSONLINE:
3384 mbox.b[4] = MFI_PD_ONLINE;
3385 break;
3386
3387 case BIOC_SSOFFLINE:
3388 mbox.b[4] = MFI_PD_OFFLINE;
3389 break;
3390
3391 case BIOC_SSHOTSPARE:
3392 mbox.b[4] = MFI_PD_HOTSPARE;
3393 break;
3394
3395 case BIOC_SSREBUILD:
3396 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3397 if ((rv = mfii_makegood(sc,
3398 pl->mpl_address[i].mpa_pd_id)))
3399 goto done;
3400
3401 if ((rv = mfii_makespare(sc,
3402 pl->mpl_address[i].mpa_pd_id)))
3403 goto done;
3404
3405 memset(&mbox, 0, sizeof(mbox));
3406 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3407 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3408 pd, sizeof(*pd), MFII_DATA_IN, false);
3409 if (rv != 0)
3410 goto done;
3411
3412 /* rebuilding might be started by mfii_makespare() */
3413 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3414 rv = 0;
3415 goto done;
3416 }
3417
3418 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3419 mbox.s[1] = pd->mpd_pd.mfp_seq;
3420 }
3421 mbox.b[4] = MFI_PD_REBUILD;
3422 break;
3423
3424 default:
3425 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3426 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3427 goto done;
3428 }
3429
3430
3431 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3432 MFII_DATA_NONE, false);
3433 done:
3434 free(pd, M_DEVBUF);
3435 free(pl, M_DEVBUF);
3436 return (rv);
3437 }
3438
3439 #if 0
3440 int
3441 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3442 {
3443 uint32_t opc;
3444 int rv = 0;
3445 struct mfi_pr_properties prop;
3446 struct mfi_pr_status status;
3447 uint32_t time, exec_freq;
3448
3449 switch (bp->bp_opcode) {
3450 case BIOC_SPSTOP:
3451 case BIOC_SPSTART:
3452 if (bp->bp_opcode == BIOC_SPSTART)
3453 opc = MR_DCMD_PR_START;
3454 else
3455 opc = MR_DCMD_PR_STOP;
3456 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3457 return (EINVAL);
3458 break;
3459
3460 case BIOC_SPMANUAL:
3461 case BIOC_SPDISABLE:
3462 case BIOC_SPAUTO:
3463 /* Get device's time. */
3464 opc = MR_DCMD_TIME_SECS_GET;
3465 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3466 MFII_DATA_IN, false))
3467 return (EINVAL);
3468
3469 opc = MR_DCMD_PR_GET_PROPERTIES;
3470 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3471 MFII_DATA_IN, false))
3472 return (EINVAL);
3473
3474 switch (bp->bp_opcode) {
3475 case BIOC_SPMANUAL:
3476 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3477 break;
3478 case BIOC_SPDISABLE:
3479 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3480 break;
3481 case BIOC_SPAUTO:
3482 if (bp->bp_autoival != 0) {
3483 if (bp->bp_autoival == -1)
3484 /* continuously */
3485 exec_freq = 0xffffffffU;
3486 else if (bp->bp_autoival > 0)
3487 exec_freq = bp->bp_autoival;
3488 else
3489 return (EINVAL);
3490 prop.exec_freq = exec_freq;
3491 }
3492 if (bp->bp_autonext != 0) {
3493 if (bp->bp_autonext < 0)
3494 return (EINVAL);
3495 else
3496 prop.next_exec = time + bp->bp_autonext;
3497 }
3498 prop.op_mode = MFI_PR_OPMODE_AUTO;
3499 break;
3500 }
3501
3502 opc = MR_DCMD_PR_SET_PROPERTIES;
3503 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3504 MFII_DATA_OUT, false))
3505 return (EINVAL);
3506
3507 break;
3508
3509 case BIOC_GPSTATUS:
3510 opc = MR_DCMD_PR_GET_PROPERTIES;
3511 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3512 MFII_DATA_IN, false))
3513 return (EINVAL);
3514
3515 opc = MR_DCMD_PR_GET_STATUS;
3516 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3517 MFII_DATA_IN, false))
3518 return (EINVAL);
3519
3520 /* Get device's time. */
3521 opc = MR_DCMD_TIME_SECS_GET;
3522 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3523 MFII_DATA_IN, false))
3524 return (EINVAL);
3525
3526 switch (prop.op_mode) {
3527 case MFI_PR_OPMODE_AUTO:
3528 bp->bp_mode = BIOC_SPMAUTO;
3529 bp->bp_autoival = prop.exec_freq;
3530 bp->bp_autonext = prop.next_exec;
3531 bp->bp_autonow = time;
3532 break;
3533 case MFI_PR_OPMODE_MANUAL:
3534 bp->bp_mode = BIOC_SPMMANUAL;
3535 break;
3536 case MFI_PR_OPMODE_DISABLED:
3537 bp->bp_mode = BIOC_SPMDISABLED;
3538 break;
3539 default:
3540 printf("%s: unknown patrol mode %d\n",
3541 DEVNAME(sc), prop.op_mode);
3542 break;
3543 }
3544
3545 switch (status.state) {
3546 case MFI_PR_STATE_STOPPED:
3547 bp->bp_status = BIOC_SPSSTOPPED;
3548 break;
3549 case MFI_PR_STATE_READY:
3550 bp->bp_status = BIOC_SPSREADY;
3551 break;
3552 case MFI_PR_STATE_ACTIVE:
3553 bp->bp_status = BIOC_SPSACTIVE;
3554 break;
3555 case MFI_PR_STATE_ABORTED:
3556 bp->bp_status = BIOC_SPSABORTED;
3557 break;
3558 default:
3559 printf("%s: unknown patrol state %d\n",
3560 DEVNAME(sc), status.state);
3561 break;
3562 }
3563
3564 break;
3565
3566 default:
3567 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3568 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3569 return (EINVAL);
3570 }
3571
3572 return (rv);
3573 }
3574 #endif
3575
3576 static int
3577 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3578 {
3579 struct mfi_conf *cfg;
3580 struct mfi_hotspare *hs;
3581 struct mfi_pd_details *pd;
3582 struct bioc_disk *sdhs;
3583 struct bioc_vol *vdhs;
3584 struct scsipi_inquiry_data *inqbuf;
3585 char vend[8+16+4+1], *vendp;
3586 int i, rv = EINVAL;
3587 uint32_t size;
3588 union mfi_mbox mbox;
3589
3590 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3591
3592 if (!bio_hs)
3593 return (EINVAL);
3594
3595 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3596
3597 /* send single element command to retrieve size for full structure */
3598 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3599 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3600 MFII_DATA_IN, false))
3601 goto freeme;
3602
3603 size = cfg->mfc_size;
3604 free(cfg, M_DEVBUF);
3605
3606 /* memory for read config */
3607 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3608 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3609 MFII_DATA_IN, false))
3610 goto freeme;
3611
3612 /* calculate offset to hs structure */
3613 hs = (struct mfi_hotspare *)(
3614 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3615 cfg->mfc_array_size * cfg->mfc_no_array +
3616 cfg->mfc_ld_size * cfg->mfc_no_ld);
3617
3618 if (volid < cfg->mfc_no_ld)
3619 goto freeme; /* not a hotspare */
3620
3621 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3622 goto freeme; /* not a hotspare */
3623
3624 /* offset into hotspare structure */
3625 i = volid - cfg->mfc_no_ld;
3626
3627 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3628 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3629 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3630
3631 /* get pd fields */
3632 memset(&mbox, 0, sizeof(mbox));
3633 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3634 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3635 MFII_DATA_IN, false)) {
3636 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3637 DEVNAME(sc));
3638 goto freeme;
3639 }
3640
3641 switch (type) {
3642 case MFI_MGMT_VD:
3643 vdhs = bio_hs;
3644 vdhs->bv_status = BIOC_SVONLINE;
3645 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3646 vdhs->bv_level = -1; /* hotspare */
3647 vdhs->bv_nodisk = 1;
3648 break;
3649
3650 case MFI_MGMT_SD:
3651 sdhs = bio_hs;
3652 sdhs->bd_status = BIOC_SDHOTSPARE;
3653 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3654 sdhs->bd_channel = pd->mpd_enc_idx;
3655 sdhs->bd_target = pd->mpd_enc_slot;
3656 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3657 vendp = inqbuf->vendor;
3658 memcpy(vend, vendp, sizeof vend - 1);
3659 vend[sizeof vend - 1] = '\0';
3660 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3661 break;
3662
3663 default:
3664 goto freeme;
3665 }
3666
3667 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3668 rv = 0;
3669 freeme:
3670 free(pd, M_DEVBUF);
3671 free(cfg, M_DEVBUF);
3672
3673 return (rv);
3674 }
3675
3676 #endif /* NBIO > 0 */
3677
3678 #define MFI_BBU_SENSORS 4
3679
3680 static void
3681 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3682 {
3683 struct mfi_bbu_status bbu;
3684 u_int32_t status;
3685 u_int32_t mask;
3686 u_int32_t soh_bad;
3687 int rv;
3688
3689 mutex_enter(&sc->sc_lock);
3690 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3691 sizeof(bbu), MFII_DATA_IN, false);
3692 mutex_exit(&sc->sc_lock);
3693 if (rv != 0) {
3694 edata->state = ENVSYS_SINVALID;
3695 edata->value_cur = 0;
3696 return;
3697 }
3698
3699 switch (bbu.battery_type) {
3700 case MFI_BBU_TYPE_IBBU:
3701 mask = MFI_BBU_STATE_BAD_IBBU;
3702 soh_bad = 0;
3703 break;
3704 case MFI_BBU_TYPE_BBU:
3705 mask = MFI_BBU_STATE_BAD_BBU;
3706 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3707 break;
3708
3709 case MFI_BBU_TYPE_NONE:
3710 default:
3711 edata->state = ENVSYS_SCRITICAL;
3712 edata->value_cur = 0;
3713 return;
3714 }
3715
3716 status = le32toh(bbu.fw_status) & mask;
3717 switch (edata->sensor) {
3718 case 0:
3719 edata->value_cur = (status || soh_bad) ? 0 : 1;
3720 edata->state =
3721 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3722 return;
3723 case 1:
3724 edata->value_cur = le16toh(bbu.voltage) * 1000;
3725 edata->state = ENVSYS_SVALID;
3726 return;
3727 case 2:
3728 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3729 edata->state = ENVSYS_SVALID;
3730 return;
3731 case 3:
3732 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3733 edata->state = ENVSYS_SVALID;
3734 return;
3735 }
3736 }
3737
3738 static void
3739 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3740 {
3741 struct bioc_vol bv;
3742 int error;
3743
3744 memset(&bv, 0, sizeof(bv));
3745 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3746 mutex_enter(&sc->sc_lock);
3747 error = mfii_ioctl_vol(sc, &bv);
3748 mutex_exit(&sc->sc_lock);
3749 if (error)
3750 bv.bv_status = BIOC_SVINVALID;
3751 bio_vol_to_envsys(edata, &bv);
3752 }
3753
3754 static void
3755 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3756 {
3757 sensor->units = ENVSYS_DRIVE;
3758 sensor->state = ENVSYS_SINVALID;
3759 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3760 /* Enable monitoring for drive state changes */
3761 sensor->flags |= ENVSYS_FMONSTCHANGED;
3762 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3763 }
3764
3765 static void
3766 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3767 {
3768 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3769 aprint_error_dev(sc->sc_dev,
3770 "failed to attach sensor %s\n", s->desc);
3771 }
3772
3773 static int
3774 mfii_create_sensors(struct mfii_softc *sc)
3775 {
3776 int i, rv;
3777 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3778
3779 sc->sc_sme = sysmon_envsys_create();
3780 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3781 M_DEVBUF, M_WAITOK | M_ZERO);
3782
3783 /* BBU */
3784 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3785 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3786 sc->sc_sensors[0].value_cur = 0;
3787 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3788 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3789 sc->sc_sensors[1].value_cur = 0;
3790 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3791 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3792 sc->sc_sensors[2].value_cur = 0;
3793 sc->sc_sensors[3].units = ENVSYS_STEMP;
3794 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3795 sc->sc_sensors[3].value_cur = 0;
3796
3797 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3798 sc->sc_bbuok = true;
3799 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3800 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3801 "%s BBU state", DEVNAME(sc));
3802 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3803 "%s BBU voltage", DEVNAME(sc));
3804 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3805 "%s BBU current", DEVNAME(sc));
3806 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3807 "%s BBU temperature", DEVNAME(sc));
3808 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3809 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3810 }
3811 }
3812
3813 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3814 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3815 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3816 }
3817
3818 sc->sc_sme->sme_name = DEVNAME(sc);
3819 sc->sc_sme->sme_cookie = sc;
3820 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3821 rv = sysmon_envsys_register(sc->sc_sme);
3822 if (rv) {
3823 aprint_error_dev(sc->sc_dev,
3824 "unable to register with sysmon (rv = %d)\n", rv);
3825 }
3826 return rv;
3827
3828 }
3829
3830 static int
3831 mfii_destroy_sensors(struct mfii_softc *sc)
3832 {
3833 if (sc->sc_sme == NULL)
3834 return 0;
3835 sysmon_envsys_unregister(sc->sc_sme);
3836 sc->sc_sme = NULL;
3837 free(sc->sc_sensors, M_DEVBUF);
3838 return 0;
3839 }
3840
3841 static void
3842 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3843 {
3844 struct mfii_softc *sc = sme->sme_cookie;
3845
3846 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3847 return;
3848
3849 if (edata->sensor < MFI_BBU_SENSORS) {
3850 if (sc->sc_bbuok)
3851 mfii_bbu(sc, edata);
3852 } else {
3853 mfii_refresh_ld_sensor(sc, edata);
3854 }
3855 }
3856