mfii.c revision 1.7.4.2 1 /* $NetBSD: mfii.c,v 1.7.4.2 2021/03/22 16:23:45 thorpej Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.7.4.2 2021/03/22 16:23:45 thorpej Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271
272 struct mfii_iop {
273 int bar;
274 int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
276 #define MFII_IOP_NUM_SGE_LOC_35 1
277 u_int16_t ldio_ctx_reg_lock_flags;
278 u_int8_t ldio_req_type;
279 u_int8_t ldio_ctx_type_nseg;
280 u_int8_t sge_flag_chain;
281 u_int8_t sge_flag_eol;
282 };
283
284 struct mfii_softc {
285 device_t sc_dev;
286 struct scsipi_channel sc_chan;
287 struct scsipi_adapter sc_adapt;
288
289 const struct mfii_iop *sc_iop;
290
291 pci_chipset_tag_t sc_pc;
292 pcitag_t sc_tag;
293
294 bus_space_tag_t sc_iot;
295 bus_space_handle_t sc_ioh;
296 bus_size_t sc_ios;
297 bus_dma_tag_t sc_dmat;
298 bus_dma_tag_t sc_dmat64;
299 bool sc_64bit_dma;
300
301 void *sc_ih;
302
303 kmutex_t sc_ccb_mtx;
304 kmutex_t sc_post_mtx;
305
306 u_int sc_max_fw_cmds;
307 u_int sc_max_cmds;
308 u_int sc_max_sgl;
309
310 u_int sc_reply_postq_depth;
311 u_int sc_reply_postq_index;
312 kmutex_t sc_reply_postq_mtx;
313 struct mfii_dmamem *sc_reply_postq;
314
315 struct mfii_dmamem *sc_requests;
316 struct mfii_dmamem *sc_mfi;
317 struct mfii_dmamem *sc_sense;
318 struct mfii_dmamem *sc_sgl;
319
320 struct mfii_ccb *sc_ccb;
321 struct mfii_ccb_list sc_ccb_freeq;
322
323 struct mfii_ccb *sc_aen_ccb;
324 struct workqueue *sc_aen_wq;
325 struct work sc_aen_work;
326
327 kmutex_t sc_abort_mtx;
328 struct mfii_ccb_list sc_abort_list;
329 struct workqueue *sc_abort_wq;
330 struct work sc_abort_work;
331
332 /* save some useful information for logical drives that is missing
333 * in sc_ld_list
334 */
335 struct {
336 bool ld_present;
337 char ld_dev[16]; /* device name sd? */
338 } sc_ld[MFI_MAX_LD];
339 int sc_target_lds[MFI_MAX_LD];
340
341 /* bio */
342 struct mfi_conf *sc_cfg;
343 struct mfi_ctrl_info sc_info;
344 struct mfi_ld_list sc_ld_list;
345 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
346 int sc_no_pd; /* used physical disks */
347 int sc_ld_sz; /* sizeof sc_ld_details */
348
349 /* mgmt lock */
350 kmutex_t sc_lock;
351 bool sc_running;
352
353 /* sensors */
354 struct sysmon_envsys *sc_sme;
355 envsys_data_t *sc_sensors;
356 bool sc_bbuok;
357
358 device_t sc_child;
359 };
360
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
365 #define MFII_D_CMD 0x0001
366 #define MFII_D_INTR 0x0002
367 #define MFII_D_MISC 0x0004
368 #define MFII_D_DMA 0x0008
369 #define MFII_D_IOCTL 0x0010
370 #define MFII_D_RW 0x0020
371 #define MFII_D_MEM 0x0040
372 #define MFII_D_CCB 0x0080
373 uint32_t mfii_debug = 0
374 /* | MFII_D_CMD */
375 /* | MFII_D_INTR */
376 | MFII_D_MISC
377 /* | MFII_D_DMA */
378 /* | MFII_D_IOCTL */
379 /* | MFII_D_RW */
380 /* | MFII_D_MEM */
381 /* | MFII_D_CCB */
382 ;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387
388 static int mfii_match(device_t, cfdata_t, void *);
389 static void mfii_attach(device_t, device_t, void *);
390 static int mfii_detach(device_t, int);
391 static int mfii_rescan(device_t, const char *, const int *);
392 static void mfii_childdetached(device_t, device_t);
393 static bool mfii_suspend(device_t, const pmf_qual_t *);
394 static bool mfii_resume(device_t, const pmf_qual_t *);
395 static bool mfii_shutdown(device_t, int);
396
397
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 mfii_childdetached, DVF_DETACH_SHUTDOWN);
401
402 static void mfii_scsipi_request(struct scsipi_channel *,
403 scsipi_adapter_req_t, void *);
404 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405
406 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
407
408 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
409 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410
411 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 static void mfii_dmamem_free(struct mfii_softc *,
413 struct mfii_dmamem *);
414
415 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
416 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 static int mfii_init_ccb(struct mfii_softc *);
418 static void mfii_scrub_ccb(struct mfii_ccb *);
419
420 static int mfii_transition_firmware(struct mfii_softc *);
421 static int mfii_initialise_firmware(struct mfii_softc *);
422 static int mfii_get_info(struct mfii_softc *);
423
424 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 static int mfii_my_intr(struct mfii_softc *);
431 static int mfii_intr(void *);
432 static void mfii_postq(struct mfii_softc *);
433
434 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 void *, int);
436 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 void *, int);
438
439 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440
441 static int mfii_mgmt(struct mfii_softc *, uint32_t,
442 const union mfi_mbox *, void *, size_t,
443 mfii_direction_t, bool);
444 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 uint32_t, const union mfi_mbox *, void *, size_t,
446 mfii_direction_t, bool);
447 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448
449 static int mfii_scsi_cmd_io(struct mfii_softc *,
450 struct mfii_ccb *, struct scsipi_xfer *);
451 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
452 struct mfii_ccb *, struct scsipi_xfer *);
453 static void mfii_scsi_cmd_tmo(void *);
454
455 static void mfii_abort_task(struct work *, void *);
456 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
457 uint16_t, uint16_t, uint8_t, uint32_t);
458 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
459 struct mfii_ccb *);
460
461 static int mfii_aen_register(struct mfii_softc *);
462 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
463 struct mfii_dmamem *, uint32_t);
464 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
465 static void mfii_aen(struct work *, void *);
466 static void mfii_aen_unregister(struct mfii_softc *);
467
468 static void mfii_aen_pd_insert(struct mfii_softc *,
469 const struct mfi_evtarg_pd_address *);
470 static void mfii_aen_pd_remove(struct mfii_softc *,
471 const struct mfi_evtarg_pd_address *);
472 static void mfii_aen_pd_state_change(struct mfii_softc *,
473 const struct mfi_evtarg_pd_state *);
474 static void mfii_aen_ld_update(struct mfii_softc *);
475
476 #if NBIO > 0
477 static int mfii_ioctl(device_t, u_long, void *);
478 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
479 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
480 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
481 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
482 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
483 static int mfii_ioctl_setstate(struct mfii_softc *,
484 struct bioc_setstate *);
485 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
486 static int mfii_bio_getitall(struct mfii_softc *);
487 #endif /* NBIO > 0 */
488
489 #if 0
490 static const char *mfi_bbu_indicators[] = {
491 "pack missing",
492 "voltage low",
493 "temp high",
494 "charge active",
495 "discharge active",
496 "learn cycle req'd",
497 "learn cycle active",
498 "learn cycle failed",
499 "learn cycle timeout",
500 "I2C errors",
501 "replace pack",
502 "low capacity",
503 "periodic learn req'd"
504 };
505 #endif
506
507 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
508 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
509 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
510 static int mfii_create_sensors(struct mfii_softc *);
511 static int mfii_destroy_sensors(struct mfii_softc *);
512 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
513 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
514
515 /*
516 * mfii boards support asynchronous (and non-polled) completion of
517 * dcmds by proxying them through a passthru mpii command that points
518 * at a dcmd frame. since the passthru command is submitted like
519 * the scsi commands using an SMID in the request descriptor,
520 * ccb_request memory * must contain the passthru command because
521 * that is what the SMID refers to. this means ccb_request cannot
522 * contain the dcmd. rather than allocating separate dma memory to
523 * hold the dcmd, we reuse the sense memory buffer for it.
524 */
525
526 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
527
528 static inline void
529 mfii_dcmd_scrub(struct mfii_ccb *ccb)
530 {
531 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
532 }
533
534 static inline struct mfi_dcmd_frame *
535 mfii_dcmd_frame(struct mfii_ccb *ccb)
536 {
537 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
538 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
539 }
540
541 static inline void
542 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
543 {
544 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
545 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
546 }
547
548 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
549
550 static const struct mfii_iop mfii_iop_thunderbolt = {
551 MFII_BAR,
552 MFII_IOP_NUM_SGE_LOC_ORIG,
553 0,
554 MFII_REQ_TYPE_LDIO,
555 0,
556 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
557 0
558 };
559
560 /*
561 * a lot of these values depend on us not implementing fastpath yet.
562 */
563 static const struct mfii_iop mfii_iop_25 = {
564 MFII_BAR,
565 MFII_IOP_NUM_SGE_LOC_ORIG,
566 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
567 MFII_REQ_TYPE_NO_LOCK,
568 MFII_RAID_CTX_TYPE_CUDA | 0x1,
569 MFII_SGE_CHAIN_ELEMENT,
570 MFII_SGE_END_OF_LIST
571 };
572
573 static const struct mfii_iop mfii_iop_35 = {
574 MFII_BAR_35,
575 MFII_IOP_NUM_SGE_LOC_35,
576 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
577 MFII_REQ_TYPE_NO_LOCK,
578 MFII_RAID_CTX_TYPE_CUDA | 0x1,
579 MFII_SGE_CHAIN_ELEMENT,
580 MFII_SGE_END_OF_LIST
581 };
582
583 struct mfii_device {
584 pcireg_t mpd_vendor;
585 pcireg_t mpd_product;
586 const struct mfii_iop *mpd_iop;
587 };
588
589 static const struct mfii_device mfii_devices[] = {
590 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
591 &mfii_iop_thunderbolt },
592 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
593 &mfii_iop_25 },
594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
595 &mfii_iop_25 },
596 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
597 &mfii_iop_35 },
598 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
599 &mfii_iop_35 },
600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
601 &mfii_iop_35 },
602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
603 &mfii_iop_35 },
604 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
605 &mfii_iop_35 },
606 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
607 &mfii_iop_35 }
608 };
609
610 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
611
612 static const struct mfii_iop *
613 mfii_find_iop(struct pci_attach_args *pa)
614 {
615 const struct mfii_device *mpd;
616 int i;
617
618 for (i = 0; i < __arraycount(mfii_devices); i++) {
619 mpd = &mfii_devices[i];
620
621 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
622 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
623 return (mpd->mpd_iop);
624 }
625
626 return (NULL);
627 }
628
629 static int
630 mfii_match(device_t parent, cfdata_t match, void *aux)
631 {
632 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
633 }
634
635 static void
636 mfii_attach(device_t parent, device_t self, void *aux)
637 {
638 struct mfii_softc *sc = device_private(self);
639 struct pci_attach_args *pa = aux;
640 pcireg_t memtype;
641 pci_intr_handle_t ih;
642 char intrbuf[PCI_INTRSTR_LEN];
643 const char *intrstr;
644 u_int32_t status, scpad2, scpad3;
645 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
646 struct scsipi_adapter *adapt = &sc->sc_adapt;
647 struct scsipi_channel *chan = &sc->sc_chan;
648
649 /* init sc */
650 sc->sc_dev = self;
651 sc->sc_iop = mfii_find_iop(aux);
652 sc->sc_dmat = pa->pa_dmat;
653 if (pci_dma64_available(pa)) {
654 sc->sc_dmat64 = pa->pa_dmat64;
655 sc->sc_64bit_dma = 1;
656 } else {
657 sc->sc_dmat64 = pa->pa_dmat;
658 sc->sc_64bit_dma = 0;
659 }
660 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
661 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
662 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
663 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
664
665 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
666
667 sc->sc_aen_ccb = NULL;
668 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
669 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
670 PRI_BIO, IPL_BIO, WQ_MPSAFE);
671
672 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
673 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
674 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
675
676 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
677 SIMPLEQ_INIT(&sc->sc_abort_list);
678
679 /* wire up the bus shizz */
680 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
681 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
682 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
683 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
684 aprint_error(": unable to map registers\n");
685 return;
686 }
687
688 /* disable interrupts */
689 mfii_write(sc, MFI_OMSK, 0xffffffff);
690
691 if (pci_intr_map(pa, &ih) != 0) {
692 aprint_error(": unable to map interrupt\n");
693 goto pci_unmap;
694 }
695 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
696 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
697
698 /* lets get started */
699 if (mfii_transition_firmware(sc))
700 goto pci_unmap;
701 sc->sc_running = true;
702
703 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
704 scpad3 = mfii_read(sc, MFII_OSP3);
705 status = mfii_fw_state(sc);
706 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
707 if (sc->sc_max_fw_cmds == 0)
708 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
709 /*
710 * reduce max_cmds by 1 to ensure that the reply queue depth does not
711 * exceed FW supplied max_fw_cmds.
712 */
713 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
714
715 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
716 scpad2 = mfii_read(sc, MFII_OSP2);
717 chain_frame_sz =
718 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
719 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
720 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
721 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
722
723 nsge_in_io = (MFII_REQUEST_SIZE -
724 sizeof(struct mpii_msg_scsi_io) -
725 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
726 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
727
728 /* round down to nearest power of two */
729 sc->sc_max_sgl = 1;
730 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
731 sc->sc_max_sgl <<= 1;
732
733 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
734 DEVNAME(sc), status, scpad2, scpad3);
735 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
736 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
737 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
738 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
739 sc->sc_max_sgl);
740
741 /* sense memory */
742 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
743 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
744 if (sc->sc_sense == NULL) {
745 aprint_error(": unable to allocate sense memory\n");
746 goto pci_unmap;
747 }
748
749 /* reply post queue */
750 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
751
752 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
753 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
754 if (sc->sc_reply_postq == NULL)
755 goto free_sense;
756
757 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
758 MFII_DMA_LEN(sc->sc_reply_postq));
759
760 /* MPII request frame array */
761 sc->sc_requests = mfii_dmamem_alloc(sc,
762 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
763 if (sc->sc_requests == NULL)
764 goto free_reply_postq;
765
766 /* MFI command frame array */
767 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
768 if (sc->sc_mfi == NULL)
769 goto free_requests;
770
771 /* MPII SGL array */
772 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
773 sizeof(struct mfii_sge) * sc->sc_max_sgl);
774 if (sc->sc_sgl == NULL)
775 goto free_mfi;
776
777 if (mfii_init_ccb(sc) != 0) {
778 aprint_error(": could not init ccb list\n");
779 goto free_sgl;
780 }
781
782 /* kickstart firmware with all addresses and pointers */
783 if (mfii_initialise_firmware(sc) != 0) {
784 aprint_error(": could not initialize firmware\n");
785 goto free_sgl;
786 }
787
788 mutex_enter(&sc->sc_lock);
789 if (mfii_get_info(sc) != 0) {
790 mutex_exit(&sc->sc_lock);
791 aprint_error(": could not retrieve controller information\n");
792 goto free_sgl;
793 }
794 mutex_exit(&sc->sc_lock);
795
796 aprint_normal(": \"%s\", firmware %s",
797 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
798 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
799 aprint_normal(", %uMB cache",
800 le16toh(sc->sc_info.mci_memory_size));
801 }
802 aprint_normal("\n");
803 aprint_naive("\n");
804
805 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
806 mfii_intr, sc, DEVNAME(sc));
807 if (sc->sc_ih == NULL) {
808 aprint_error_dev(self, "can't establish interrupt");
809 if (intrstr)
810 aprint_error(" at %s", intrstr);
811 aprint_error("\n");
812 goto free_sgl;
813 }
814 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
815
816 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
817 sc->sc_ld[i].ld_present = 1;
818
819 memset(adapt, 0, sizeof(*adapt));
820 adapt->adapt_dev = sc->sc_dev;
821 adapt->adapt_nchannels = 1;
822 /* keep a few commands for management */
823 if (sc->sc_max_cmds > 4)
824 adapt->adapt_openings = sc->sc_max_cmds - 4;
825 else
826 adapt->adapt_openings = sc->sc_max_cmds;
827 adapt->adapt_max_periph = adapt->adapt_openings;
828 adapt->adapt_request = mfii_scsipi_request;
829 adapt->adapt_minphys = minphys;
830 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
831
832 memset(chan, 0, sizeof(*chan));
833 chan->chan_adapter = adapt;
834 chan->chan_bustype = &scsi_sas_bustype;
835 chan->chan_channel = 0;
836 chan->chan_flags = 0;
837 chan->chan_nluns = 8;
838 chan->chan_ntargets = sc->sc_info.mci_max_lds;
839 chan->chan_id = sc->sc_info.mci_max_lds;
840
841 mfii_rescan(sc->sc_dev, NULL, NULL);
842
843 if (mfii_aen_register(sc) != 0) {
844 /* error printed by mfii_aen_register */
845 goto intr_disestablish;
846 }
847
848 mutex_enter(&sc->sc_lock);
849 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
850 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
851 mutex_exit(&sc->sc_lock);
852 aprint_error_dev(self,
853 "getting list of logical disks failed\n");
854 goto intr_disestablish;
855 }
856 mutex_exit(&sc->sc_lock);
857 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
858 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
859 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
860 sc->sc_target_lds[target] = i;
861 }
862
863 /* enable interrupts */
864 mfii_write(sc, MFI_OSTS, 0xffffffff);
865 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
866
867 #if NBIO > 0
868 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
869 panic("%s: controller registration failed", DEVNAME(sc));
870 #endif /* NBIO > 0 */
871
872 if (mfii_create_sensors(sc) != 0)
873 aprint_error_dev(self, "unable to create sensors\n");
874
875 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
876 mfii_shutdown))
877 aprint_error_dev(self, "couldn't establish power handler\n");
878 return;
879 intr_disestablish:
880 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
881 free_sgl:
882 mfii_dmamem_free(sc, sc->sc_sgl);
883 free_mfi:
884 mfii_dmamem_free(sc, sc->sc_mfi);
885 free_requests:
886 mfii_dmamem_free(sc, sc->sc_requests);
887 free_reply_postq:
888 mfii_dmamem_free(sc, sc->sc_reply_postq);
889 free_sense:
890 mfii_dmamem_free(sc, sc->sc_sense);
891 pci_unmap:
892 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
893 }
894
895 #if 0
896 struct srp_gc mfii_dev_handles_gc =
897 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
898
899 static inline uint16_t
900 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
901 {
902 struct srp_ref sr;
903 uint16_t *map, handle;
904
905 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
906 handle = map[target];
907 srp_leave(&sr);
908
909 return (handle);
910 }
911
912 static int
913 mfii_dev_handles_update(struct mfii_softc *sc)
914 {
915 struct mfii_ld_map *lm;
916 uint16_t *dev_handles = NULL;
917 int i;
918 int rv = 0;
919
920 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
921
922 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
923 MFII_DATA_IN, false);
924
925 if (rv != 0) {
926 rv = EIO;
927 goto free_lm;
928 }
929
930 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
931 M_DEVBUF, M_WAITOK);
932
933 for (i = 0; i < MFI_MAX_PD; i++)
934 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
935
936 /* commit the updated info */
937 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
938 srp_update_locked(&mfii_dev_handles_gc,
939 &sc->sc_pd->pd_dev_handles, dev_handles);
940
941 free_lm:
942 free(lm, M_TEMP, sizeof(*lm));
943
944 return (rv);
945 }
946
947 static void
948 mfii_dev_handles_dtor(void *null, void *v)
949 {
950 uint16_t *dev_handles = v;
951
952 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
953 }
954 #endif /* 0 */
955
956 static int
957 mfii_detach(device_t self, int flags)
958 {
959 struct mfii_softc *sc = device_private(self);
960 int error;
961
962 if (sc->sc_ih == NULL)
963 return (0);
964
965 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
966 return error;
967
968 mfii_destroy_sensors(sc);
969 #if NBIO > 0
970 bio_unregister(sc->sc_dev);
971 #endif
972 mfii_shutdown(sc->sc_dev, 0);
973 mfii_write(sc, MFI_OMSK, 0xffffffff);
974
975 mfii_aen_unregister(sc);
976 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
977 mfii_dmamem_free(sc, sc->sc_sgl);
978 mfii_dmamem_free(sc, sc->sc_mfi);
979 mfii_dmamem_free(sc, sc->sc_requests);
980 mfii_dmamem_free(sc, sc->sc_reply_postq);
981 mfii_dmamem_free(sc, sc->sc_sense);
982 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
983
984 return (0);
985 }
986
987 static int
988 mfii_rescan(device_t self, const char *ifattr, const int *locators)
989 {
990 struct mfii_softc *sc = device_private(self);
991 if (sc->sc_child != NULL)
992 return 0;
993
994 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
995 CFARG_IATTR, ifattr,
996 CFARG_LOCATORS, locators,
997 CFARG_EOL);
998 return 0;
999 }
1000
1001 static void
1002 mfii_childdetached(device_t self, device_t child)
1003 {
1004 struct mfii_softc *sc = device_private(self);
1005
1006 KASSERT(self == sc->sc_dev);
1007 KASSERT(child == sc->sc_child);
1008
1009 if (child == sc->sc_child)
1010 sc->sc_child = NULL;
1011 }
1012
1013 static bool
1014 mfii_suspend(device_t dev, const pmf_qual_t *q)
1015 {
1016 /* XXX to be implemented */
1017 return false;
1018 }
1019
1020 static bool
1021 mfii_resume(device_t dev, const pmf_qual_t *q)
1022 {
1023 /* XXX to be implemented */
1024 return false;
1025 }
1026
1027 static bool
1028 mfii_shutdown(device_t dev, int how)
1029 {
1030 struct mfii_softc *sc = device_private(dev);
1031 struct mfii_ccb *ccb;
1032 union mfi_mbox mbox;
1033 bool rv = true;
1034
1035 memset(&mbox, 0, sizeof(mbox));
1036
1037 mutex_enter(&sc->sc_lock);
1038 DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1039 ccb = mfii_get_ccb(sc);
1040 if (ccb == NULL)
1041 return false;
1042 mutex_enter(&sc->sc_ccb_mtx);
1043 if (sc->sc_running) {
1044 sc->sc_running = 0; /* prevent new commands */
1045 mutex_exit(&sc->sc_ccb_mtx);
1046 #if 0 /* XXX why does this hang ? */
1047 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1048 mfii_scrub_ccb(ccb);
1049 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1050 NULL, 0, MFII_DATA_NONE, true)) {
1051 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1052 rv = false;
1053 goto fail;
1054 }
1055 printf("ok1\n");
1056 #endif
1057 mbox.b[0] = 0;
1058 mfii_scrub_ccb(ccb);
1059 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1060 NULL, 0, MFII_DATA_NONE, true)) {
1061 aprint_error_dev(dev, "shutdown: "
1062 "firmware shutdown failed\n");
1063 rv = false;
1064 goto fail;
1065 }
1066 } else {
1067 mutex_exit(&sc->sc_ccb_mtx);
1068 }
1069 fail:
1070 mfii_put_ccb(sc, ccb);
1071 mutex_exit(&sc->sc_lock);
1072 return rv;
1073 }
1074
1075 static u_int32_t
1076 mfii_read(struct mfii_softc *sc, bus_size_t r)
1077 {
1078 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1079 BUS_SPACE_BARRIER_READ);
1080 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1081 }
1082
1083 static void
1084 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1085 {
1086 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1087 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1088 BUS_SPACE_BARRIER_WRITE);
1089 }
1090
1091 static struct mfii_dmamem *
1092 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1093 {
1094 struct mfii_dmamem *m;
1095 int nsegs;
1096
1097 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1098 m->mdm_size = size;
1099
1100 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1101 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1102 goto mdmfree;
1103
1104 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1105 &nsegs, BUS_DMA_NOWAIT) != 0)
1106 goto destroy;
1107
1108 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1109 BUS_DMA_NOWAIT) != 0)
1110 goto free;
1111
1112 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1113 BUS_DMA_NOWAIT) != 0)
1114 goto unmap;
1115
1116 memset(m->mdm_kva, 0, size);
1117 return (m);
1118
1119 unmap:
1120 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1121 free:
1122 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1123 destroy:
1124 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1125 mdmfree:
1126 free(m, M_DEVBUF);
1127
1128 return (NULL);
1129 }
1130
1131 static void
1132 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1133 {
1134 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1135 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1136 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1137 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1138 free(m, M_DEVBUF);
1139 }
1140
1141 static void
1142 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1143 {
1144 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1145 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1146 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1147
1148 io->function = MFII_FUNCTION_PASSTHRU_IO;
1149 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1150 io->chain_offset = io->sgl_offset0 / 4;
1151
1152 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1153 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1154 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1155
1156 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1157 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1158
1159 mfii_start(sc, ccb);
1160 }
1161
1162 static int
1163 mfii_aen_register(struct mfii_softc *sc)
1164 {
1165 struct mfi_evt_log_info mel;
1166 struct mfii_ccb *ccb;
1167 struct mfii_dmamem *mdm;
1168 int rv;
1169
1170 ccb = mfii_get_ccb(sc);
1171 if (ccb == NULL) {
1172 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1173 return (ENOMEM);
1174 }
1175
1176 memset(&mel, 0, sizeof(mel));
1177 mfii_scrub_ccb(ccb);
1178
1179 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1180 &mel, sizeof(mel), MFII_DATA_IN, true);
1181 if (rv != 0) {
1182 mfii_put_ccb(sc, ccb);
1183 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1184 return (EIO);
1185 }
1186
1187 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1188 if (mdm == NULL) {
1189 mfii_put_ccb(sc, ccb);
1190 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1191 return (ENOMEM);
1192 }
1193
1194 /* replay all the events from boot */
1195 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1196
1197 return (0);
1198 }
1199
1200 static void
1201 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1202 struct mfii_dmamem *mdm, uint32_t seq)
1203 {
1204 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1205 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1206 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1207 union mfi_evt_class_locale mec;
1208
1209 mfii_scrub_ccb(ccb);
1210 mfii_dcmd_scrub(ccb);
1211 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1212
1213 ccb->ccb_cookie = mdm;
1214 ccb->ccb_done = mfii_aen_done;
1215 sc->sc_aen_ccb = ccb;
1216
1217 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1218 mec.mec_members.reserved = 0;
1219 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1220
1221 hdr->mfh_cmd = MFI_CMD_DCMD;
1222 hdr->mfh_sg_count = 1;
1223 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1224 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1225 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1226 dcmd->mdf_mbox.w[0] = htole32(seq);
1227 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1228 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1229 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1230
1231 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1232 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1233
1234 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1235 mfii_dcmd_start(sc, ccb);
1236 }
1237
1238 static void
1239 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1240 {
1241 KASSERT(sc->sc_aen_ccb == ccb);
1242
1243 /*
1244 * defer to a thread with KERNEL_LOCK so we can run autoconf
1245 * We shouldn't have more than one AEN command pending at a time,
1246 * so no need to lock
1247 */
1248 if (sc->sc_running)
1249 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1250 }
1251
1252 static void
1253 mfii_aen(struct work *wk, void *arg)
1254 {
1255 struct mfii_softc *sc = arg;
1256 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1257 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1258 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1259
1260 mfii_dcmd_sync(sc, ccb,
1261 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1262 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1263 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1264
1265 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1266 le32toh(med->med_seq_num), le32toh(med->med_code),
1267 med->med_arg_type, med->med_description);
1268
1269 switch (le32toh(med->med_code)) {
1270 case MR_EVT_PD_INSERTED_EXT:
1271 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1272 break;
1273
1274 mfii_aen_pd_insert(sc, &med->args.pd_address);
1275 break;
1276 case MR_EVT_PD_REMOVED_EXT:
1277 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1278 break;
1279
1280 mfii_aen_pd_remove(sc, &med->args.pd_address);
1281 break;
1282
1283 case MR_EVT_PD_STATE_CHANGE:
1284 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1285 break;
1286
1287 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1288 break;
1289
1290 case MR_EVT_LD_CREATED:
1291 case MR_EVT_LD_DELETED:
1292 mfii_aen_ld_update(sc);
1293 break;
1294
1295 default:
1296 break;
1297 }
1298
1299 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1300 }
1301
1302 static void
1303 mfii_aen_pd_insert(struct mfii_softc *sc,
1304 const struct mfi_evtarg_pd_address *pd)
1305 {
1306 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1307 le16toh(pd->device_id), le16toh(pd->encl_id));
1308 }
1309
1310 static void
1311 mfii_aen_pd_remove(struct mfii_softc *sc,
1312 const struct mfi_evtarg_pd_address *pd)
1313 {
1314 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1315 le16toh(pd->device_id), le16toh(pd->encl_id));
1316 }
1317
1318 static void
1319 mfii_aen_pd_state_change(struct mfii_softc *sc,
1320 const struct mfi_evtarg_pd_state *state)
1321 {
1322 return;
1323 }
1324
1325 static void
1326 mfii_aen_ld_update(struct mfii_softc *sc)
1327 {
1328 int i, target, old, nld;
1329 int newlds[MFI_MAX_LD];
1330
1331 mutex_enter(&sc->sc_lock);
1332 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1333 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1334 mutex_exit(&sc->sc_lock);
1335 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1336 DEVNAME(sc));
1337 return;
1338 }
1339 mutex_exit(&sc->sc_lock);
1340
1341 memset(newlds, -1, sizeof(newlds));
1342
1343 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1344 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1345 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1346 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1347 newlds[target] = i;
1348 }
1349
1350 for (i = 0; i < MFI_MAX_LD; i++) {
1351 old = sc->sc_target_lds[i];
1352 nld = newlds[i];
1353
1354 if (old == -1 && nld != -1) {
1355 printf("%s: logical drive %d added (target %d)\n",
1356 DEVNAME(sc), i, nld);
1357
1358 // XXX scsi_probe_target(sc->sc_scsibus, i);
1359
1360 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1361 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1362 } else if (nld == -1 && old != -1) {
1363 printf("%s: logical drive %d removed (target %d)\n",
1364 DEVNAME(sc), i, old);
1365
1366 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1367 sysmon_envsys_sensor_detach(sc->sc_sme,
1368 &sc->sc_sensors[i]);
1369 }
1370 }
1371
1372 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1373 }
1374
1375 static void
1376 mfii_aen_unregister(struct mfii_softc *sc)
1377 {
1378 /* XXX */
1379 }
1380
1381 static int
1382 mfii_transition_firmware(struct mfii_softc *sc)
1383 {
1384 int32_t fw_state, cur_state;
1385 int max_wait, i;
1386
1387 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1388
1389 while (fw_state != MFI_STATE_READY) {
1390 cur_state = fw_state;
1391 switch (fw_state) {
1392 case MFI_STATE_FAULT:
1393 printf("%s: firmware fault\n", DEVNAME(sc));
1394 return (1);
1395 case MFI_STATE_WAIT_HANDSHAKE:
1396 mfii_write(sc, MFI_SKINNY_IDB,
1397 MFI_INIT_CLEAR_HANDSHAKE);
1398 max_wait = 2;
1399 break;
1400 case MFI_STATE_OPERATIONAL:
1401 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1402 max_wait = 10;
1403 break;
1404 case MFI_STATE_UNDEFINED:
1405 case MFI_STATE_BB_INIT:
1406 max_wait = 2;
1407 break;
1408 case MFI_STATE_FW_INIT:
1409 case MFI_STATE_DEVICE_SCAN:
1410 case MFI_STATE_FLUSH_CACHE:
1411 max_wait = 20;
1412 break;
1413 default:
1414 printf("%s: unknown firmware state %d\n",
1415 DEVNAME(sc), fw_state);
1416 return (1);
1417 }
1418 for (i = 0; i < (max_wait * 10); i++) {
1419 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1420 if (fw_state == cur_state)
1421 DELAY(100000);
1422 else
1423 break;
1424 }
1425 if (fw_state == cur_state) {
1426 printf("%s: firmware stuck in state %#x\n",
1427 DEVNAME(sc), fw_state);
1428 return (1);
1429 }
1430 }
1431
1432 return (0);
1433 }
1434
1435 static int
1436 mfii_get_info(struct mfii_softc *sc)
1437 {
1438 int i, rv;
1439
1440 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1441 sizeof(sc->sc_info), MFII_DATA_IN, true);
1442
1443 if (rv != 0)
1444 return (rv);
1445
1446 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1447 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1448 DEVNAME(sc),
1449 sc->sc_info.mci_image_component[i].mic_name,
1450 sc->sc_info.mci_image_component[i].mic_version,
1451 sc->sc_info.mci_image_component[i].mic_build_date,
1452 sc->sc_info.mci_image_component[i].mic_build_time);
1453 }
1454
1455 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1456 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1457 DEVNAME(sc),
1458 sc->sc_info.mci_pending_image_component[i].mic_name,
1459 sc->sc_info.mci_pending_image_component[i].mic_version,
1460 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1461 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1462 }
1463
1464 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1465 DEVNAME(sc),
1466 sc->sc_info.mci_max_arms,
1467 sc->sc_info.mci_max_spans,
1468 sc->sc_info.mci_max_arrays,
1469 sc->sc_info.mci_max_lds,
1470 sc->sc_info.mci_product_name);
1471
1472 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1473 DEVNAME(sc),
1474 sc->sc_info.mci_serial_number,
1475 sc->sc_info.mci_hw_present,
1476 sc->sc_info.mci_current_fw_time,
1477 sc->sc_info.mci_max_cmds,
1478 sc->sc_info.mci_max_sg_elements);
1479
1480 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1481 DEVNAME(sc),
1482 sc->sc_info.mci_max_request_size,
1483 sc->sc_info.mci_lds_present,
1484 sc->sc_info.mci_lds_degraded,
1485 sc->sc_info.mci_lds_offline,
1486 sc->sc_info.mci_pd_present);
1487
1488 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1489 DEVNAME(sc),
1490 sc->sc_info.mci_pd_disks_present,
1491 sc->sc_info.mci_pd_disks_pred_failure,
1492 sc->sc_info.mci_pd_disks_failed);
1493
1494 DPRINTF("%s: nvram %d mem %d flash %d\n",
1495 DEVNAME(sc),
1496 sc->sc_info.mci_nvram_size,
1497 sc->sc_info.mci_memory_size,
1498 sc->sc_info.mci_flash_size);
1499
1500 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1501 DEVNAME(sc),
1502 sc->sc_info.mci_ram_correctable_errors,
1503 sc->sc_info.mci_ram_uncorrectable_errors,
1504 sc->sc_info.mci_cluster_allowed,
1505 sc->sc_info.mci_cluster_active);
1506
1507 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1508 DEVNAME(sc),
1509 sc->sc_info.mci_max_strips_per_io,
1510 sc->sc_info.mci_raid_levels,
1511 sc->sc_info.mci_adapter_ops,
1512 sc->sc_info.mci_ld_ops);
1513
1514 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1515 DEVNAME(sc),
1516 sc->sc_info.mci_stripe_sz_ops.min,
1517 sc->sc_info.mci_stripe_sz_ops.max,
1518 sc->sc_info.mci_pd_ops,
1519 sc->sc_info.mci_pd_mix_support);
1520
1521 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1522 DEVNAME(sc),
1523 sc->sc_info.mci_ecc_bucket_count,
1524 sc->sc_info.mci_package_version);
1525
1526 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1527 DEVNAME(sc),
1528 sc->sc_info.mci_properties.mcp_seq_num,
1529 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1530 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1531 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1532
1533 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1534 DEVNAME(sc),
1535 sc->sc_info.mci_properties.mcp_rebuild_rate,
1536 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1537 sc->sc_info.mci_properties.mcp_bgi_rate,
1538 sc->sc_info.mci_properties.mcp_cc_rate);
1539
1540 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1541 DEVNAME(sc),
1542 sc->sc_info.mci_properties.mcp_recon_rate,
1543 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1544 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1545 sc->sc_info.mci_properties.mcp_spinup_delay,
1546 sc->sc_info.mci_properties.mcp_cluster_enable);
1547
1548 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1549 DEVNAME(sc),
1550 sc->sc_info.mci_properties.mcp_coercion_mode,
1551 sc->sc_info.mci_properties.mcp_alarm_enable,
1552 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1553 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1554 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1555
1556 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1557 DEVNAME(sc),
1558 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1559 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1560 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1561
1562 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1563 DEVNAME(sc),
1564 sc->sc_info.mci_pci.mip_vendor,
1565 sc->sc_info.mci_pci.mip_device,
1566 sc->sc_info.mci_pci.mip_subvendor,
1567 sc->sc_info.mci_pci.mip_subdevice);
1568
1569 DPRINTF("%s: type %#x port_count %d port_addr ",
1570 DEVNAME(sc),
1571 sc->sc_info.mci_host.mih_type,
1572 sc->sc_info.mci_host.mih_port_count);
1573
1574 for (i = 0; i < 8; i++)
1575 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1576 DPRINTF("\n");
1577
1578 DPRINTF("%s: type %.x port_count %d port_addr ",
1579 DEVNAME(sc),
1580 sc->sc_info.mci_device.mid_type,
1581 sc->sc_info.mci_device.mid_port_count);
1582
1583 for (i = 0; i < 8; i++)
1584 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1585 DPRINTF("\n");
1586
1587 return (0);
1588 }
1589
1590 static int
1591 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1592 {
1593 struct mfi_frame_header *hdr = ccb->ccb_request;
1594 u_int64_t r;
1595 int to = 0, rv = 0;
1596
1597 #ifdef DIAGNOSTIC
1598 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1599 panic("mfii_mfa_poll called with cookie or done set");
1600 #endif
1601
1602 hdr->mfh_context = ccb->ccb_smid;
1603 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1604 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1605
1606 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1607 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1608
1609 mfii_start(sc, ccb);
1610
1611 for (;;) {
1612 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1613 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1614 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1615
1616 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1617 break;
1618
1619 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1620 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1621 ccb->ccb_smid);
1622 ccb->ccb_flags |= MFI_CCB_F_ERR;
1623 rv = 1;
1624 break;
1625 }
1626
1627 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1628 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1630
1631 delay(1000);
1632 }
1633
1634 if (ccb->ccb_len > 0) {
1635 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1636 0, ccb->ccb_dmamap32->dm_mapsize,
1637 (ccb->ccb_direction == MFII_DATA_IN) ?
1638 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1639
1640 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1641 }
1642
1643 return (rv);
1644 }
1645
1646 static int
1647 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1648 {
1649 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1650 void *cookie;
1651 int rv = 1;
1652
1653 done = ccb->ccb_done;
1654 cookie = ccb->ccb_cookie;
1655
1656 ccb->ccb_done = mfii_poll_done;
1657 ccb->ccb_cookie = &rv;
1658
1659 mfii_start(sc, ccb);
1660
1661 do {
1662 delay(10);
1663 mfii_postq(sc);
1664 } while (rv == 1);
1665
1666 ccb->ccb_cookie = cookie;
1667 done(sc, ccb);
1668
1669 return (0);
1670 }
1671
1672 static void
1673 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1674 {
1675 int *rv = ccb->ccb_cookie;
1676
1677 *rv = 0;
1678 }
1679
1680 static int
1681 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1682 {
1683 #ifdef DIAGNOSTIC
1684 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1685 panic("mfii_exec called with cookie or done set");
1686 #endif
1687
1688 ccb->ccb_cookie = ccb;
1689 ccb->ccb_done = mfii_exec_done;
1690
1691 mfii_start(sc, ccb);
1692
1693 mutex_enter(&ccb->ccb_mtx);
1694 while (ccb->ccb_cookie != NULL)
1695 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1696 mutex_exit(&ccb->ccb_mtx);
1697
1698 return (0);
1699 }
1700
1701 static void
1702 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1703 {
1704 mutex_enter(&ccb->ccb_mtx);
1705 ccb->ccb_cookie = NULL;
1706 cv_signal(&ccb->ccb_cv);
1707 mutex_exit(&ccb->ccb_mtx);
1708 }
1709
1710 static int
1711 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1712 void *buf, size_t len, mfii_direction_t dir, bool poll)
1713 {
1714 struct mfii_ccb *ccb;
1715 int rv;
1716
1717 KASSERT(mutex_owned(&sc->sc_lock));
1718 if (!sc->sc_running)
1719 return EAGAIN;
1720
1721 ccb = mfii_get_ccb(sc);
1722 if (ccb == NULL)
1723 return (ENOMEM);
1724
1725 mfii_scrub_ccb(ccb);
1726 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1727 mfii_put_ccb(sc, ccb);
1728
1729 return (rv);
1730 }
1731
1732 static int
1733 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1734 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1735 bool poll)
1736 {
1737 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1738 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1739 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1740 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1741 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1742 int rv = EIO;
1743
1744 if (cold)
1745 poll = true;
1746
1747 ccb->ccb_data = buf;
1748 ccb->ccb_len = len;
1749 ccb->ccb_direction = dir;
1750 switch (dir) {
1751 case MFII_DATA_IN:
1752 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1753 break;
1754 case MFII_DATA_OUT:
1755 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1756 break;
1757 case MFII_DATA_NONE:
1758 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1759 break;
1760 }
1761
1762 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1763 rv = ENOMEM;
1764 goto done;
1765 }
1766
1767 hdr->mfh_cmd = MFI_CMD_DCMD;
1768 hdr->mfh_context = ccb->ccb_smid;
1769 hdr->mfh_data_len = htole32(len);
1770 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1771 KASSERT(!ccb->ccb_dma64);
1772
1773 dcmd->mdf_opcode = opc;
1774 /* handle special opcodes */
1775 if (mbox != NULL)
1776 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1777
1778 io->function = MFII_FUNCTION_PASSTHRU_IO;
1779 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1780 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1781
1782 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1783 sge->sg_len = htole32(MFI_FRAME_SIZE);
1784 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1785
1786 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1787 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1788
1789 if (poll) {
1790 ccb->ccb_done = mfii_empty_done;
1791 mfii_poll(sc, ccb);
1792 } else
1793 mfii_exec(sc, ccb);
1794
1795 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1796 rv = 0;
1797 }
1798
1799 done:
1800 return (rv);
1801 }
1802
1803 static void
1804 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1805 {
1806 return;
1807 }
1808
1809 static int
1810 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1811 void *sglp, int nosleep)
1812 {
1813 union mfi_sgl *sgl = sglp;
1814 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1815 int error;
1816 int i;
1817
1818 KASSERT(!ccb->ccb_dma64);
1819 if (ccb->ccb_len == 0)
1820 return (0);
1821
1822 error = bus_dmamap_load(sc->sc_dmat, dmap,
1823 ccb->ccb_data, ccb->ccb_len, NULL,
1824 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1825 if (error) {
1826 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1827 return (1);
1828 }
1829
1830 for (i = 0; i < dmap->dm_nsegs; i++) {
1831 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1832 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1833 }
1834
1835 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1836 ccb->ccb_direction == MFII_DATA_OUT ?
1837 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1838
1839 return (0);
1840 }
1841
1842 static void
1843 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1844 {
1845 u_long *r = (u_long *)&ccb->ccb_req;
1846
1847 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1848 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1850
1851 #if defined(__LP64__) && 0
1852 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1853 #else
1854 mutex_enter(&sc->sc_post_mtx);
1855 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1856 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1857 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1858
1859 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1860 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1861 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1862 mutex_exit(&sc->sc_post_mtx);
1863 #endif
1864 }
1865
1866 static void
1867 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1868 {
1869 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1870 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1871 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1872
1873 if (ccb->ccb_sgl_len > 0) {
1874 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1875 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1876 BUS_DMASYNC_POSTWRITE);
1877 }
1878
1879 if (ccb->ccb_dma64) {
1880 KASSERT(ccb->ccb_len > 0);
1881 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1882 0, ccb->ccb_dmamap64->dm_mapsize,
1883 (ccb->ccb_direction == MFII_DATA_IN) ?
1884 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1885
1886 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1887 } else if (ccb->ccb_len > 0) {
1888 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1889 0, ccb->ccb_dmamap32->dm_mapsize,
1890 (ccb->ccb_direction == MFII_DATA_IN) ?
1891 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1892
1893 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1894 }
1895
1896 ccb->ccb_done(sc, ccb);
1897 }
1898
1899 static int
1900 mfii_initialise_firmware(struct mfii_softc *sc)
1901 {
1902 struct mpii_msg_iocinit_request *iiq;
1903 struct mfii_dmamem *m;
1904 struct mfii_ccb *ccb;
1905 struct mfi_init_frame *init;
1906 int rv;
1907
1908 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1909 if (m == NULL)
1910 return (1);
1911
1912 iiq = MFII_DMA_KVA(m);
1913 memset(iiq, 0, sizeof(*iiq));
1914
1915 iiq->function = MPII_FUNCTION_IOC_INIT;
1916 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1917
1918 iiq->msg_version_maj = 0x02;
1919 iiq->msg_version_min = 0x00;
1920 iiq->hdr_version_unit = 0x10;
1921 iiq->hdr_version_dev = 0x0;
1922
1923 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1924
1925 iiq->reply_descriptor_post_queue_depth =
1926 htole16(sc->sc_reply_postq_depth);
1927 iiq->reply_free_queue_depth = htole16(0);
1928
1929 iiq->sense_buffer_address_high = htole32(
1930 MFII_DMA_DVA(sc->sc_sense) >> 32);
1931
1932 iiq->reply_descriptor_post_queue_address_lo =
1933 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1934 iiq->reply_descriptor_post_queue_address_hi =
1935 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1936
1937 iiq->system_request_frame_base_address_lo =
1938 htole32(MFII_DMA_DVA(sc->sc_requests));
1939 iiq->system_request_frame_base_address_hi =
1940 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1941
1942 iiq->timestamp = htole64(time_uptime);
1943
1944 ccb = mfii_get_ccb(sc);
1945 if (ccb == NULL) {
1946 /* shouldn't ever run out of ccbs during attach */
1947 return (1);
1948 }
1949 mfii_scrub_ccb(ccb);
1950 init = ccb->ccb_request;
1951
1952 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1953 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1954 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1955 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1956
1957 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1958 0, MFII_DMA_LEN(sc->sc_reply_postq),
1959 BUS_DMASYNC_PREREAD);
1960
1961 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1962 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1963
1964 rv = mfii_mfa_poll(sc, ccb);
1965
1966 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1967 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1968
1969 mfii_put_ccb(sc, ccb);
1970 mfii_dmamem_free(sc, m);
1971
1972 return (rv);
1973 }
1974
1975 static int
1976 mfii_my_intr(struct mfii_softc *sc)
1977 {
1978 u_int32_t status;
1979
1980 status = mfii_read(sc, MFI_OSTS);
1981
1982 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1983 if (ISSET(status, 0x1)) {
1984 mfii_write(sc, MFI_OSTS, status);
1985 return (1);
1986 }
1987
1988 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1989 }
1990
1991 static int
1992 mfii_intr(void *arg)
1993 {
1994 struct mfii_softc *sc = arg;
1995
1996 if (!mfii_my_intr(sc))
1997 return (0);
1998
1999 mfii_postq(sc);
2000
2001 return (1);
2002 }
2003
2004 static void
2005 mfii_postq(struct mfii_softc *sc)
2006 {
2007 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2008 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2009 struct mpii_reply_descr *rdp;
2010 struct mfii_ccb *ccb;
2011 int rpi = 0;
2012
2013 mutex_enter(&sc->sc_reply_postq_mtx);
2014
2015 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2016 0, MFII_DMA_LEN(sc->sc_reply_postq),
2017 BUS_DMASYNC_POSTREAD);
2018
2019 for (;;) {
2020 rdp = &postq[sc->sc_reply_postq_index];
2021 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2022 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2023 rdp->data == 0xffffffff);
2024 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2025 MPII_REPLY_DESCR_UNUSED)
2026 break;
2027 if (rdp->data == 0xffffffff) {
2028 /*
2029 * ioc is still writing to the reply post queue
2030 * race condition - bail!
2031 */
2032 break;
2033 }
2034
2035 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2036 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2037 memset(rdp, 0xff, sizeof(*rdp));
2038
2039 sc->sc_reply_postq_index++;
2040 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2041 rpi = 1;
2042 }
2043
2044 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2045 0, MFII_DMA_LEN(sc->sc_reply_postq),
2046 BUS_DMASYNC_PREREAD);
2047
2048 if (rpi)
2049 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2050
2051 mutex_exit(&sc->sc_reply_postq_mtx);
2052
2053 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2054 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2055 mfii_done(sc, ccb);
2056 }
2057 }
2058
2059 static void
2060 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2061 void *arg)
2062 {
2063 struct scsipi_periph *periph;
2064 struct scsipi_xfer *xs;
2065 struct scsipi_adapter *adapt = chan->chan_adapter;
2066 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2067 struct mfii_ccb *ccb;
2068 int timeout;
2069 int target;
2070
2071 switch(req) {
2072 case ADAPTER_REQ_GROW_RESOURCES:
2073 /* Not supported. */
2074 return;
2075 case ADAPTER_REQ_SET_XFER_MODE:
2076 {
2077 struct scsipi_xfer_mode *xm = arg;
2078 xm->xm_mode = PERIPH_CAP_TQING;
2079 xm->xm_period = 0;
2080 xm->xm_offset = 0;
2081 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2082 return;
2083 }
2084 case ADAPTER_REQ_RUN_XFER:
2085 break;
2086 }
2087
2088 xs = arg;
2089 periph = xs->xs_periph;
2090 target = periph->periph_target;
2091
2092 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2093 periph->periph_lun != 0) {
2094 xs->error = XS_SELTIMEOUT;
2095 scsipi_done(xs);
2096 return;
2097 }
2098
2099 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2100 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2101 /* the cache is stable storage, don't flush */
2102 xs->error = XS_NOERROR;
2103 xs->status = SCSI_OK;
2104 xs->resid = 0;
2105 scsipi_done(xs);
2106 return;
2107 }
2108
2109 ccb = mfii_get_ccb(sc);
2110 if (ccb == NULL) {
2111 xs->error = XS_RESOURCE_SHORTAGE;
2112 scsipi_done(xs);
2113 return;
2114 }
2115 mfii_scrub_ccb(ccb);
2116 ccb->ccb_cookie = xs;
2117 ccb->ccb_done = mfii_scsi_cmd_done;
2118 ccb->ccb_data = xs->data;
2119 ccb->ccb_len = xs->datalen;
2120
2121 timeout = mstohz(xs->timeout);
2122 if (timeout == 0)
2123 timeout = 1;
2124 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2125
2126 switch (xs->cmd->opcode) {
2127 case SCSI_READ_6_COMMAND:
2128 case READ_10:
2129 case READ_12:
2130 case READ_16:
2131 case SCSI_WRITE_6_COMMAND:
2132 case WRITE_10:
2133 case WRITE_12:
2134 case WRITE_16:
2135 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2136 goto stuffup;
2137 break;
2138
2139 default:
2140 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2141 goto stuffup;
2142 break;
2143 }
2144
2145 xs->error = XS_NOERROR;
2146 xs->resid = 0;
2147
2148 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2149 xs->cmd->opcode);
2150
2151 if (xs->xs_control & XS_CTL_POLL) {
2152 if (mfii_poll(sc, ccb) != 0)
2153 goto stuffup;
2154 return;
2155 }
2156
2157 mfii_start(sc, ccb);
2158
2159 return;
2160
2161 stuffup:
2162 xs->error = XS_DRIVER_STUFFUP;
2163 scsipi_done(xs);
2164 mfii_put_ccb(sc, ccb);
2165 }
2166
2167 static void
2168 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2169 {
2170 struct scsipi_xfer *xs = ccb->ccb_cookie;
2171 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2172 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2173
2174 if (callout_stop(&xs->xs_callout) != 0)
2175 return;
2176
2177 switch (ctx->status) {
2178 case MFI_STAT_OK:
2179 break;
2180
2181 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2182 xs->error = XS_SENSE;
2183 memset(&xs->sense, 0, sizeof(xs->sense));
2184 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2185 break;
2186
2187 case MFI_STAT_LD_OFFLINE:
2188 case MFI_STAT_DEVICE_NOT_FOUND:
2189 xs->error = XS_SELTIMEOUT;
2190 break;
2191
2192 default:
2193 xs->error = XS_DRIVER_STUFFUP;
2194 break;
2195 }
2196
2197 scsipi_done(xs);
2198 mfii_put_ccb(sc, ccb);
2199 }
2200
2201 static int
2202 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2203 struct scsipi_xfer *xs)
2204 {
2205 struct scsipi_periph *periph = xs->xs_periph;
2206 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2207 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2208 int segs;
2209
2210 io->dev_handle = htole16(periph->periph_target);
2211 io->function = MFII_FUNCTION_LDIO_REQUEST;
2212 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2213 io->sgl_flags = htole16(0x02); /* XXX */
2214 io->sense_buffer_length = sizeof(xs->sense);
2215 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2216 io->data_length = htole32(xs->datalen);
2217 io->io_flags = htole16(xs->cmdlen);
2218 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2219 case XS_CTL_DATA_IN:
2220 ccb->ccb_direction = MFII_DATA_IN;
2221 io->direction = MPII_SCSIIO_DIR_READ;
2222 break;
2223 case XS_CTL_DATA_OUT:
2224 ccb->ccb_direction = MFII_DATA_OUT;
2225 io->direction = MPII_SCSIIO_DIR_WRITE;
2226 break;
2227 default:
2228 ccb->ccb_direction = MFII_DATA_NONE;
2229 io->direction = MPII_SCSIIO_DIR_NONE;
2230 break;
2231 }
2232 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2233
2234 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2235 ctx->timeout_value = htole16(0x14); /* XXX */
2236 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2237 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2238
2239 if (mfii_load_ccb(sc, ccb, ctx + 1,
2240 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2241 return (1);
2242
2243 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2244 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2245 switch (sc->sc_iop->num_sge_loc) {
2246 case MFII_IOP_NUM_SGE_LOC_ORIG:
2247 ctx->num_sge = segs;
2248 break;
2249 case MFII_IOP_NUM_SGE_LOC_35:
2250 /* 12 bit field, but we're only using the lower 8 */
2251 ctx->span_arm = segs;
2252 break;
2253 }
2254
2255 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2256 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2257
2258 return (0);
2259 }
2260
2261 static int
2262 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2263 struct scsipi_xfer *xs)
2264 {
2265 struct scsipi_periph *periph = xs->xs_periph;
2266 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2267 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2268
2269 io->dev_handle = htole16(periph->periph_target);
2270 io->function = MFII_FUNCTION_LDIO_REQUEST;
2271 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2272 io->sgl_flags = htole16(0x02); /* XXX */
2273 io->sense_buffer_length = sizeof(xs->sense);
2274 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2275 io->data_length = htole32(xs->datalen);
2276 io->io_flags = htole16(xs->cmdlen);
2277 io->lun[0] = htobe16(periph->periph_lun);
2278 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2279 case XS_CTL_DATA_IN:
2280 ccb->ccb_direction = MFII_DATA_IN;
2281 io->direction = MPII_SCSIIO_DIR_READ;
2282 break;
2283 case XS_CTL_DATA_OUT:
2284 ccb->ccb_direction = MFII_DATA_OUT;
2285 io->direction = MPII_SCSIIO_DIR_WRITE;
2286 break;
2287 default:
2288 ccb->ccb_direction = MFII_DATA_NONE;
2289 io->direction = MPII_SCSIIO_DIR_NONE;
2290 break;
2291 }
2292 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2293
2294 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2295
2296 if (mfii_load_ccb(sc, ccb, ctx + 1,
2297 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2298 return (1);
2299
2300 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2301 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2302
2303 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2304 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2305
2306 return (0);
2307 }
2308
2309 #if 0
2310 void
2311 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2312 {
2313 struct scsi_link *link = xs->sc_link;
2314 struct mfii_softc *sc = link->adapter_softc;
2315 struct mfii_ccb *ccb = xs->io;
2316
2317 mfii_scrub_ccb(ccb);
2318 ccb->ccb_cookie = xs;
2319 ccb->ccb_done = mfii_scsi_cmd_done;
2320 ccb->ccb_data = xs->data;
2321 ccb->ccb_len = xs->datalen;
2322
2323 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2324
2325 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2326 if (xs->error != XS_NOERROR)
2327 goto done;
2328
2329 xs->resid = 0;
2330
2331 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2332 if (mfii_poll(sc, ccb) != 0)
2333 goto stuffup;
2334 return;
2335 }
2336
2337 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2338 mfii_start(sc, ccb);
2339
2340 return;
2341
2342 stuffup:
2343 xs->error = XS_DRIVER_STUFFUP;
2344 done:
2345 scsi_done(xs);
2346 }
2347
2348 int
2349 mfii_pd_scsi_probe(struct scsi_link *link)
2350 {
2351 struct mfii_softc *sc = link->adapter_softc;
2352 struct mfi_pd_details mpd;
2353 union mfi_mbox mbox;
2354 int rv;
2355
2356 if (link->lun > 0)
2357 return (0);
2358
2359 memset(&mbox, 0, sizeof(mbox));
2360 mbox.s[0] = htole16(link->target);
2361
2362 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2363 MFII_DATA_IN, true);
2364 if (rv != 0)
2365 return (EIO);
2366
2367 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2368 return (ENXIO);
2369
2370 return (0);
2371 }
2372
2373 int
2374 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2375 struct scsipi_xfer *xs)
2376 {
2377 struct scsi_link *link = xs->sc_link;
2378 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2379 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2380 uint16_t dev_handle;
2381
2382 dev_handle = mfii_dev_handle(sc, link->target);
2383 if (dev_handle == htole16(0xffff))
2384 return (XS_SELTIMEOUT);
2385
2386 io->dev_handle = dev_handle;
2387 io->function = 0;
2388 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2389 io->sgl_flags = htole16(0x02); /* XXX */
2390 io->sense_buffer_length = sizeof(xs->sense);
2391 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2392 io->data_length = htole32(xs->datalen);
2393 io->io_flags = htole16(xs->cmdlen);
2394 io->lun[0] = htobe16(link->lun);
2395 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2396 case XS_CTL_DATA_IN:
2397 ccb->ccb_direction = MFII_DATA_IN;
2398 io->direction = MPII_SCSIIO_DIR_READ;
2399 break;
2400 case XS_CTL_DATA_OUT:
2401 ccb->ccb_direction = MFII_DATA_OUT;
2402 io->direction = MPII_SCSIIO_DIR_WRITE;
2403 break;
2404 default:
2405 ccb->ccb_direction = MFII_DATA_NONE;
2406 io->direction = MPII_SCSIIO_DIR_NONE;
2407 break;
2408 }
2409 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2410
2411 ctx->virtual_disk_target_id = htole16(link->target);
2412 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2413 ctx->timeout_value = sc->sc_pd->pd_timeout;
2414
2415 if (mfii_load_ccb(sc, ccb, ctx + 1,
2416 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2417 return (XS_DRIVER_STUFFUP);
2418
2419 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2420 KASSERT(ccb->ccb_dma64);
2421
2422 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2423 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2424 ccb->ccb_req.dev_handle = dev_handle;
2425
2426 return (XS_NOERROR);
2427 }
2428 #endif
2429
2430 static int
2431 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2432 int nosleep)
2433 {
2434 struct mpii_msg_request *req = ccb->ccb_request;
2435 struct mfii_sge *sge = NULL, *nsge = sglp;
2436 struct mfii_sge *ce = NULL;
2437 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2438 u_int space;
2439 int i;
2440
2441 int error;
2442
2443 if (ccb->ccb_len == 0)
2444 return (0);
2445
2446 ccb->ccb_dma64 = true;
2447 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2448 ccb->ccb_data, ccb->ccb_len, NULL,
2449 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2450 if (error) {
2451 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2452 return (1);
2453 }
2454
2455 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2456 sizeof(*nsge);
2457 if (dmap->dm_nsegs > space) {
2458 space--;
2459
2460 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2461 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2462
2463 ce = nsge + space;
2464 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2465 ce->sg_len = htole32(ccb->ccb_sgl_len);
2466 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2467
2468 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2469 }
2470
2471 for (i = 0; i < dmap->dm_nsegs; i++) {
2472 if (nsge == ce)
2473 nsge = ccb->ccb_sgl;
2474
2475 sge = nsge;
2476
2477 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2478 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2479 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2480
2481 nsge = sge + 1;
2482 }
2483 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2484
2485 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2486 ccb->ccb_direction == MFII_DATA_OUT ?
2487 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2488
2489 if (ccb->ccb_sgl_len > 0) {
2490 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2491 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2492 BUS_DMASYNC_PREWRITE);
2493 }
2494
2495 return (0);
2496 }
2497
2498 static void
2499 mfii_scsi_cmd_tmo(void *p)
2500 {
2501 struct mfii_ccb *ccb = p;
2502 struct mfii_softc *sc = ccb->ccb_sc;
2503 bool start_abort;
2504
2505 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2506
2507 mutex_enter(&sc->sc_abort_mtx);
2508 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2509 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2510 if (start_abort)
2511 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2512 mutex_exit(&sc->sc_abort_mtx);
2513 }
2514
2515 static void
2516 mfii_abort_task(struct work *wk, void *scp)
2517 {
2518 struct mfii_softc *sc = scp;
2519 struct mfii_ccb *list;
2520
2521 mutex_enter(&sc->sc_abort_mtx);
2522 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2523 SIMPLEQ_INIT(&sc->sc_abort_list);
2524 mutex_exit(&sc->sc_abort_mtx);
2525
2526 while (list != NULL) {
2527 struct mfii_ccb *ccb = list;
2528 struct scsipi_xfer *xs = ccb->ccb_cookie;
2529 struct scsipi_periph *periph = xs->xs_periph;
2530 struct mfii_ccb *accb;
2531
2532 list = SIMPLEQ_NEXT(ccb, ccb_link);
2533
2534 if (!sc->sc_ld[periph->periph_target].ld_present) {
2535 /* device is gone */
2536 xs->error = XS_SELTIMEOUT;
2537 scsipi_done(xs);
2538 mfii_put_ccb(sc, ccb);
2539 continue;
2540 }
2541
2542 accb = mfii_get_ccb(sc);
2543 mfii_scrub_ccb(accb);
2544 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2545 MPII_SCSI_TASK_ABORT_TASK,
2546 htole32(MFII_TASK_MGMT_FLAGS_PD));
2547
2548 accb->ccb_cookie = ccb;
2549 accb->ccb_done = mfii_scsi_cmd_abort_done;
2550
2551 mfii_start(sc, accb);
2552 }
2553 }
2554
2555 static void
2556 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2557 uint16_t smid, uint8_t type, uint32_t flags)
2558 {
2559 struct mfii_task_mgmt *msg;
2560 struct mpii_msg_scsi_task_request *req;
2561
2562 msg = accb->ccb_request;
2563 req = &msg->mpii_request;
2564 req->dev_handle = dev_handle;
2565 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2566 req->task_type = type;
2567 req->task_mid = htole16( smid);
2568 msg->flags = flags;
2569
2570 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2571 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2572 }
2573
2574 static void
2575 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2576 {
2577 struct mfii_ccb *ccb = accb->ccb_cookie;
2578 struct scsipi_xfer *xs = ccb->ccb_cookie;
2579
2580 /* XXX check accb completion? */
2581
2582 mfii_put_ccb(sc, accb);
2583 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2584
2585 xs->error = XS_TIMEOUT;
2586 scsipi_done(xs);
2587 mfii_put_ccb(sc, ccb);
2588 }
2589
2590 static struct mfii_ccb *
2591 mfii_get_ccb(struct mfii_softc *sc)
2592 {
2593 struct mfii_ccb *ccb;
2594
2595 mutex_enter(&sc->sc_ccb_mtx);
2596 if (!sc->sc_running) {
2597 ccb = NULL;
2598 } else {
2599 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2600 if (ccb != NULL)
2601 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2602 }
2603 mutex_exit(&sc->sc_ccb_mtx);
2604 return (ccb);
2605 }
2606
2607 static void
2608 mfii_scrub_ccb(struct mfii_ccb *ccb)
2609 {
2610 ccb->ccb_cookie = NULL;
2611 ccb->ccb_done = NULL;
2612 ccb->ccb_flags = 0;
2613 ccb->ccb_data = NULL;
2614 ccb->ccb_direction = MFII_DATA_NONE;
2615 ccb->ccb_dma64 = false;
2616 ccb->ccb_len = 0;
2617 ccb->ccb_sgl_len = 0;
2618 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2619 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2620 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2621 }
2622
2623 static void
2624 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2625 {
2626 mutex_enter(&sc->sc_ccb_mtx);
2627 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2628 mutex_exit(&sc->sc_ccb_mtx);
2629 }
2630
2631 static int
2632 mfii_init_ccb(struct mfii_softc *sc)
2633 {
2634 struct mfii_ccb *ccb;
2635 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2636 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2637 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2638 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2639 u_int i;
2640 int error;
2641
2642 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2643 M_DEVBUF, M_WAITOK|M_ZERO);
2644
2645 for (i = 0; i < sc->sc_max_cmds; i++) {
2646 ccb = &sc->sc_ccb[i];
2647 ccb->ccb_sc = sc;
2648
2649 /* create a dma map for transfer */
2650 error = bus_dmamap_create(sc->sc_dmat,
2651 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2652 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2653 if (error) {
2654 printf("%s: cannot create ccb dmamap32 (%d)\n",
2655 DEVNAME(sc), error);
2656 goto destroy;
2657 }
2658 error = bus_dmamap_create(sc->sc_dmat64,
2659 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2660 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2661 if (error) {
2662 printf("%s: cannot create ccb dmamap64 (%d)\n",
2663 DEVNAME(sc), error);
2664 goto destroy32;
2665 }
2666
2667 /* select i + 1'th request. 0 is reserved for events */
2668 ccb->ccb_smid = i + 1;
2669 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2670 ccb->ccb_request = request + ccb->ccb_request_offset;
2671 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2672 ccb->ccb_request_offset;
2673
2674 /* select i'th MFI command frame */
2675 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2676 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2677 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2678 ccb->ccb_mfi_offset;
2679
2680 /* select i'th sense */
2681 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2682 ccb->ccb_sense = (struct mfi_sense *)(sense +
2683 ccb->ccb_sense_offset);
2684 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2685 ccb->ccb_sense_offset;
2686
2687 /* select i'th sgl */
2688 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2689 sc->sc_max_sgl * i;
2690 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2691 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2692 ccb->ccb_sgl_offset;
2693
2694 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2695 cv_init(&ccb->ccb_cv, "mfiiexec");
2696
2697 /* add ccb to queue */
2698 mfii_put_ccb(sc, ccb);
2699 }
2700
2701 return (0);
2702
2703 destroy32:
2704 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2705 destroy:
2706 /* free dma maps and ccb memory */
2707 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2708 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2709 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2710 }
2711
2712 free(sc->sc_ccb, M_DEVBUF);
2713
2714 return (1);
2715 }
2716
2717 #if NBIO > 0
2718 static int
2719 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2720 {
2721 struct mfii_softc *sc = device_private(dev);
2722 int error = 0;
2723
2724 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2725
2726 mutex_enter(&sc->sc_lock);
2727
2728 switch (cmd) {
2729 case BIOCINQ:
2730 DNPRINTF(MFII_D_IOCTL, "inq\n");
2731 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2732 break;
2733
2734 case BIOCVOL:
2735 DNPRINTF(MFII_D_IOCTL, "vol\n");
2736 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2737 break;
2738
2739 case BIOCDISK:
2740 DNPRINTF(MFII_D_IOCTL, "disk\n");
2741 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2742 break;
2743
2744 case BIOCALARM:
2745 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2746 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2747 break;
2748
2749 case BIOCBLINK:
2750 DNPRINTF(MFII_D_IOCTL, "blink\n");
2751 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2752 break;
2753
2754 case BIOCSETSTATE:
2755 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2756 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2757 break;
2758
2759 #if 0
2760 case BIOCPATROL:
2761 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2762 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2763 break;
2764 #endif
2765
2766 default:
2767 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2768 error = ENOTTY;
2769 }
2770
2771 mutex_exit(&sc->sc_lock);
2772
2773 return (error);
2774 }
2775
2776 static int
2777 mfii_bio_getitall(struct mfii_softc *sc)
2778 {
2779 int i, d, rv = EINVAL;
2780 size_t size;
2781 union mfi_mbox mbox;
2782 struct mfi_conf *cfg = NULL;
2783 struct mfi_ld_details *ld_det = NULL;
2784
2785 /* get info */
2786 if (mfii_get_info(sc)) {
2787 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2788 DEVNAME(sc));
2789 goto done;
2790 }
2791
2792 /* send single element command to retrieve size for full structure */
2793 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2794 if (cfg == NULL)
2795 goto done;
2796 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2797 MFII_DATA_IN, false)) {
2798 free(cfg, M_DEVBUF);
2799 goto done;
2800 }
2801
2802 size = cfg->mfc_size;
2803 free(cfg, M_DEVBUF);
2804
2805 /* memory for read config */
2806 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2807 if (cfg == NULL)
2808 goto done;
2809 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2810 MFII_DATA_IN, false)) {
2811 free(cfg, M_DEVBUF);
2812 goto done;
2813 }
2814
2815 /* replace current pointer with new one */
2816 if (sc->sc_cfg)
2817 free(sc->sc_cfg, M_DEVBUF);
2818 sc->sc_cfg = cfg;
2819
2820 /* get all ld info */
2821 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2822 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2823 goto done;
2824
2825 /* get memory for all ld structures */
2826 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2827 if (sc->sc_ld_sz != size) {
2828 if (sc->sc_ld_details)
2829 free(sc->sc_ld_details, M_DEVBUF);
2830
2831 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2832 if (ld_det == NULL)
2833 goto done;
2834 sc->sc_ld_sz = size;
2835 sc->sc_ld_details = ld_det;
2836 }
2837
2838 /* find used physical disks */
2839 size = sizeof(struct mfi_ld_details);
2840 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2841 memset(&mbox, 0, sizeof(mbox));
2842 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2843 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2844 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2845 goto done;
2846
2847 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2848 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2849 }
2850 sc->sc_no_pd = d;
2851
2852 rv = 0;
2853 done:
2854 return (rv);
2855 }
2856
2857 static int
2858 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2859 {
2860 int rv = EINVAL;
2861 struct mfi_conf *cfg = NULL;
2862
2863 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2864
2865 if (mfii_bio_getitall(sc)) {
2866 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2867 DEVNAME(sc));
2868 goto done;
2869 }
2870
2871 /* count unused disks as volumes */
2872 if (sc->sc_cfg == NULL)
2873 goto done;
2874 cfg = sc->sc_cfg;
2875
2876 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2877 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2878 #if notyet
2879 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2880 (bi->bi_nodisk - sc->sc_no_pd);
2881 #endif
2882 /* tell bio who we are */
2883 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2884
2885 rv = 0;
2886 done:
2887 return (rv);
2888 }
2889
2890 static int
2891 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2892 {
2893 int i, per, rv = EINVAL;
2894
2895 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2896 DEVNAME(sc), bv->bv_volid);
2897
2898 /* we really could skip and expect that inq took care of it */
2899 if (mfii_bio_getitall(sc)) {
2900 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2901 DEVNAME(sc));
2902 goto done;
2903 }
2904
2905 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2906 /* go do hotspares & unused disks */
2907 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2908 goto done;
2909 }
2910
2911 i = bv->bv_volid;
2912 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2913 sizeof(bv->bv_dev));
2914
2915 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2916 case MFI_LD_OFFLINE:
2917 bv->bv_status = BIOC_SVOFFLINE;
2918 break;
2919
2920 case MFI_LD_PART_DEGRADED:
2921 case MFI_LD_DEGRADED:
2922 bv->bv_status = BIOC_SVDEGRADED;
2923 break;
2924
2925 case MFI_LD_ONLINE:
2926 bv->bv_status = BIOC_SVONLINE;
2927 break;
2928
2929 default:
2930 bv->bv_status = BIOC_SVINVALID;
2931 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2932 DEVNAME(sc),
2933 sc->sc_ld_list.mll_list[i].mll_state);
2934 }
2935
2936 /* additional status can modify MFI status */
2937 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2938 case MFI_LD_PROG_CC:
2939 case MFI_LD_PROG_BGI:
2940 bv->bv_status = BIOC_SVSCRUB;
2941 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2942 bv->bv_percent = (per * 100) / 0xffff;
2943 bv->bv_seconds =
2944 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2945 break;
2946
2947 case MFI_LD_PROG_FGI:
2948 case MFI_LD_PROG_RECONSTRUCT:
2949 /* nothing yet */
2950 break;
2951 }
2952
2953 #if 0
2954 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2955 bv->bv_cache = BIOC_CVWRITEBACK;
2956 else
2957 bv->bv_cache = BIOC_CVWRITETHROUGH;
2958 #endif
2959
2960 /*
2961 * The RAID levels are determined per the SNIA DDF spec, this is only
2962 * a subset that is valid for the MFI controller.
2963 */
2964 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2965 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2966 bv->bv_level *= 10;
2967
2968 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2969 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2970
2971 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2972
2973 rv = 0;
2974 done:
2975 return (rv);
2976 }
2977
2978 static int
2979 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2980 {
2981 struct mfi_conf *cfg;
2982 struct mfi_array *ar;
2983 struct mfi_ld_cfg *ld;
2984 struct mfi_pd_details *pd;
2985 struct mfi_pd_list *pl;
2986 struct scsipi_inquiry_data *inqbuf;
2987 char vend[8+16+4+1], *vendp;
2988 int i, rv = EINVAL;
2989 int arr, vol, disk, span;
2990 union mfi_mbox mbox;
2991
2992 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
2993 DEVNAME(sc), bd->bd_diskid);
2994
2995 /* we really could skip and expect that inq took care of it */
2996 if (mfii_bio_getitall(sc)) {
2997 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2998 DEVNAME(sc));
2999 return (rv);
3000 }
3001 cfg = sc->sc_cfg;
3002
3003 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3004 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3005
3006 ar = cfg->mfc_array;
3007 vol = bd->bd_volid;
3008 if (vol >= cfg->mfc_no_ld) {
3009 /* do hotspares */
3010 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3011 goto freeme;
3012 }
3013
3014 /* calculate offset to ld structure */
3015 ld = (struct mfi_ld_cfg *)(
3016 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3017 cfg->mfc_array_size * cfg->mfc_no_array);
3018
3019 /* use span 0 only when raid group is not spanned */
3020 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3021 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3022 else
3023 span = 0;
3024 arr = ld[vol].mlc_span[span].mls_index;
3025
3026 /* offset disk into pd list */
3027 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3028
3029 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3030 /* disk is missing but succeed command */
3031 bd->bd_status = BIOC_SDFAILED;
3032 rv = 0;
3033
3034 /* try to find an unused disk for the target to rebuild */
3035 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3036 MFII_DATA_IN, false))
3037 goto freeme;
3038
3039 for (i = 0; i < pl->mpl_no_pd; i++) {
3040 if (pl->mpl_address[i].mpa_scsi_type != 0)
3041 continue;
3042
3043 memset(&mbox, 0, sizeof(mbox));
3044 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3045 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3046 pd, sizeof(*pd), MFII_DATA_IN, false))
3047 continue;
3048
3049 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3050 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3051 break;
3052 }
3053
3054 if (i == pl->mpl_no_pd)
3055 goto freeme;
3056 } else {
3057 memset(&mbox, 0, sizeof(mbox));
3058 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3059 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3060 MFII_DATA_IN, false)) {
3061 bd->bd_status = BIOC_SDINVALID;
3062 goto freeme;
3063 }
3064 }
3065
3066 /* get the remaining fields */
3067 bd->bd_channel = pd->mpd_enc_idx;
3068 bd->bd_target = pd->mpd_enc_slot;
3069
3070 /* get status */
3071 switch (pd->mpd_fw_state){
3072 case MFI_PD_UNCONFIG_GOOD:
3073 case MFI_PD_UNCONFIG_BAD:
3074 bd->bd_status = BIOC_SDUNUSED;
3075 break;
3076
3077 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3078 bd->bd_status = BIOC_SDHOTSPARE;
3079 break;
3080
3081 case MFI_PD_OFFLINE:
3082 bd->bd_status = BIOC_SDOFFLINE;
3083 break;
3084
3085 case MFI_PD_FAILED:
3086 bd->bd_status = BIOC_SDFAILED;
3087 break;
3088
3089 case MFI_PD_REBUILD:
3090 bd->bd_status = BIOC_SDREBUILD;
3091 break;
3092
3093 case MFI_PD_ONLINE:
3094 bd->bd_status = BIOC_SDONLINE;
3095 break;
3096
3097 case MFI_PD_COPYBACK:
3098 case MFI_PD_SYSTEM:
3099 default:
3100 bd->bd_status = BIOC_SDINVALID;
3101 break;
3102 }
3103
3104 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3105
3106 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3107 vendp = inqbuf->vendor;
3108 memcpy(vend, vendp, sizeof vend - 1);
3109 vend[sizeof vend - 1] = '\0';
3110 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3111
3112 /* XXX find a way to retrieve serial nr from drive */
3113 /* XXX find a way to get bd_procdev */
3114
3115 #if 0
3116 mfp = &pd->mpd_progress;
3117 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3118 mp = &mfp->mfp_patrol_read;
3119 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3120 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3121 }
3122 #endif
3123
3124 rv = 0;
3125 freeme:
3126 free(pd, M_DEVBUF);
3127 free(pl, M_DEVBUF);
3128
3129 return (rv);
3130 }
3131
3132 static int
3133 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3134 {
3135 uint32_t opc;
3136 int rv = 0;
3137 int8_t ret;
3138 mfii_direction_t dir = MFII_DATA_NONE;
3139
3140 switch(ba->ba_opcode) {
3141 case BIOC_SADISABLE:
3142 opc = MR_DCMD_SPEAKER_DISABLE;
3143 break;
3144
3145 case BIOC_SAENABLE:
3146 opc = MR_DCMD_SPEAKER_ENABLE;
3147 break;
3148
3149 case BIOC_SASILENCE:
3150 opc = MR_DCMD_SPEAKER_SILENCE;
3151 break;
3152
3153 case BIOC_GASTATUS:
3154 opc = MR_DCMD_SPEAKER_GET;
3155 dir = MFII_DATA_IN;
3156 break;
3157
3158 case BIOC_SATEST:
3159 opc = MR_DCMD_SPEAKER_TEST;
3160 break;
3161
3162 default:
3163 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3164 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3165 return (EINVAL);
3166 }
3167
3168 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3169 rv = EINVAL;
3170 else
3171 if (ba->ba_opcode == BIOC_GASTATUS)
3172 ba->ba_status = ret;
3173 else
3174 ba->ba_status = 0;
3175
3176 return (rv);
3177 }
3178
3179 static int
3180 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3181 {
3182 int i, found, rv = EINVAL;
3183 union mfi_mbox mbox;
3184 uint32_t cmd;
3185 struct mfi_pd_list *pd;
3186
3187 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3188 bb->bb_status);
3189
3190 /* channel 0 means not in an enclosure so can't be blinked */
3191 if (bb->bb_channel == 0)
3192 return (EINVAL);
3193
3194 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3195
3196 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3197 MFII_DATA_IN, false))
3198 goto done;
3199
3200 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3201 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3202 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3203 found = 1;
3204 break;
3205 }
3206
3207 if (!found)
3208 goto done;
3209
3210 memset(&mbox, 0, sizeof(mbox));
3211 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3212
3213 switch (bb->bb_status) {
3214 case BIOC_SBUNBLINK:
3215 cmd = MR_DCMD_PD_UNBLINK;
3216 break;
3217
3218 case BIOC_SBBLINK:
3219 cmd = MR_DCMD_PD_BLINK;
3220 break;
3221
3222 case BIOC_SBALARM:
3223 default:
3224 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3225 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3226 goto done;
3227 }
3228
3229
3230 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3231 goto done;
3232
3233 rv = 0;
3234 done:
3235 free(pd, M_DEVBUF);
3236 return (rv);
3237 }
3238
3239 static int
3240 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3241 {
3242 struct mfii_foreign_scan_info *fsi;
3243 struct mfi_pd_details *pd;
3244 union mfi_mbox mbox;
3245 int rv;
3246
3247 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3248 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3249
3250 memset(&mbox, 0, sizeof mbox);
3251 mbox.s[0] = pd_id;
3252 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3253 MFII_DATA_IN, false);
3254 if (rv != 0)
3255 goto done;
3256
3257 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3258 mbox.s[0] = pd_id;
3259 mbox.s[1] = pd->mpd_pd.mfp_seq;
3260 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3261 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3262 MFII_DATA_NONE, false);
3263 if (rv != 0)
3264 goto done;
3265 }
3266
3267 memset(&mbox, 0, sizeof mbox);
3268 mbox.s[0] = pd_id;
3269 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3270 MFII_DATA_IN, false);
3271 if (rv != 0)
3272 goto done;
3273
3274 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3275 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3276 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3277 if (rv != 0)
3278 goto done;
3279
3280 if (fsi->count > 0) {
3281 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3282 NULL, 0, MFII_DATA_NONE, false);
3283 if (rv != 0)
3284 goto done;
3285 }
3286 }
3287
3288 memset(&mbox, 0, sizeof mbox);
3289 mbox.s[0] = pd_id;
3290 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3291 MFII_DATA_IN, false);
3292 if (rv != 0)
3293 goto done;
3294
3295 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3296 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3297 rv = ENXIO;
3298
3299 done:
3300 free(fsi, M_DEVBUF);
3301 free(pd, M_DEVBUF);
3302
3303 return (rv);
3304 }
3305
3306 static int
3307 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3308 {
3309 struct mfi_hotspare *hs;
3310 struct mfi_pd_details *pd;
3311 union mfi_mbox mbox;
3312 size_t size;
3313 int rv = EINVAL;
3314
3315 /* we really could skip and expect that inq took care of it */
3316 if (mfii_bio_getitall(sc)) {
3317 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3318 DEVNAME(sc));
3319 return (rv);
3320 }
3321 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3322
3323 hs = malloc(size, M_DEVBUF, M_WAITOK);
3324 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3325
3326 memset(&mbox, 0, sizeof mbox);
3327 mbox.s[0] = pd_id;
3328 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3329 MFII_DATA_IN, false);
3330 if (rv != 0)
3331 goto done;
3332
3333 memset(hs, 0, size);
3334 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3335 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3336 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3337 MFII_DATA_OUT, false);
3338
3339 done:
3340 free(hs, M_DEVBUF);
3341 free(pd, M_DEVBUF);
3342
3343 return (rv);
3344 }
3345
3346 static int
3347 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3348 {
3349 struct mfi_pd_details *pd;
3350 struct mfi_pd_list *pl;
3351 int i, found, rv = EINVAL;
3352 union mfi_mbox mbox;
3353
3354 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3355 bs->bs_status);
3356
3357 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3358 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3359
3360 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3361 MFII_DATA_IN, false))
3362 goto done;
3363
3364 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3365 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3366 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3367 found = 1;
3368 break;
3369 }
3370
3371 if (!found)
3372 goto done;
3373
3374 memset(&mbox, 0, sizeof(mbox));
3375 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3376
3377 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3378 MFII_DATA_IN, false))
3379 goto done;
3380
3381 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3382 mbox.s[1] = pd->mpd_pd.mfp_seq;
3383
3384 switch (bs->bs_status) {
3385 case BIOC_SSONLINE:
3386 mbox.b[4] = MFI_PD_ONLINE;
3387 break;
3388
3389 case BIOC_SSOFFLINE:
3390 mbox.b[4] = MFI_PD_OFFLINE;
3391 break;
3392
3393 case BIOC_SSHOTSPARE:
3394 mbox.b[4] = MFI_PD_HOTSPARE;
3395 break;
3396
3397 case BIOC_SSREBUILD:
3398 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3399 if ((rv = mfii_makegood(sc,
3400 pl->mpl_address[i].mpa_pd_id)))
3401 goto done;
3402
3403 if ((rv = mfii_makespare(sc,
3404 pl->mpl_address[i].mpa_pd_id)))
3405 goto done;
3406
3407 memset(&mbox, 0, sizeof(mbox));
3408 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3409 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3410 pd, sizeof(*pd), MFII_DATA_IN, false);
3411 if (rv != 0)
3412 goto done;
3413
3414 /* rebuilding might be started by mfii_makespare() */
3415 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3416 rv = 0;
3417 goto done;
3418 }
3419
3420 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3421 mbox.s[1] = pd->mpd_pd.mfp_seq;
3422 }
3423 mbox.b[4] = MFI_PD_REBUILD;
3424 break;
3425
3426 default:
3427 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3428 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3429 goto done;
3430 }
3431
3432
3433 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3434 MFII_DATA_NONE, false);
3435 done:
3436 free(pd, M_DEVBUF);
3437 free(pl, M_DEVBUF);
3438 return (rv);
3439 }
3440
3441 #if 0
3442 int
3443 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3444 {
3445 uint32_t opc;
3446 int rv = 0;
3447 struct mfi_pr_properties prop;
3448 struct mfi_pr_status status;
3449 uint32_t time, exec_freq;
3450
3451 switch (bp->bp_opcode) {
3452 case BIOC_SPSTOP:
3453 case BIOC_SPSTART:
3454 if (bp->bp_opcode == BIOC_SPSTART)
3455 opc = MR_DCMD_PR_START;
3456 else
3457 opc = MR_DCMD_PR_STOP;
3458 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3459 return (EINVAL);
3460 break;
3461
3462 case BIOC_SPMANUAL:
3463 case BIOC_SPDISABLE:
3464 case BIOC_SPAUTO:
3465 /* Get device's time. */
3466 opc = MR_DCMD_TIME_SECS_GET;
3467 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3468 MFII_DATA_IN, false))
3469 return (EINVAL);
3470
3471 opc = MR_DCMD_PR_GET_PROPERTIES;
3472 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3473 MFII_DATA_IN, false))
3474 return (EINVAL);
3475
3476 switch (bp->bp_opcode) {
3477 case BIOC_SPMANUAL:
3478 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3479 break;
3480 case BIOC_SPDISABLE:
3481 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3482 break;
3483 case BIOC_SPAUTO:
3484 if (bp->bp_autoival != 0) {
3485 if (bp->bp_autoival == -1)
3486 /* continuously */
3487 exec_freq = 0xffffffffU;
3488 else if (bp->bp_autoival > 0)
3489 exec_freq = bp->bp_autoival;
3490 else
3491 return (EINVAL);
3492 prop.exec_freq = exec_freq;
3493 }
3494 if (bp->bp_autonext != 0) {
3495 if (bp->bp_autonext < 0)
3496 return (EINVAL);
3497 else
3498 prop.next_exec = time + bp->bp_autonext;
3499 }
3500 prop.op_mode = MFI_PR_OPMODE_AUTO;
3501 break;
3502 }
3503
3504 opc = MR_DCMD_PR_SET_PROPERTIES;
3505 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3506 MFII_DATA_OUT, false))
3507 return (EINVAL);
3508
3509 break;
3510
3511 case BIOC_GPSTATUS:
3512 opc = MR_DCMD_PR_GET_PROPERTIES;
3513 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3514 MFII_DATA_IN, false))
3515 return (EINVAL);
3516
3517 opc = MR_DCMD_PR_GET_STATUS;
3518 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3519 MFII_DATA_IN, false))
3520 return (EINVAL);
3521
3522 /* Get device's time. */
3523 opc = MR_DCMD_TIME_SECS_GET;
3524 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3525 MFII_DATA_IN, false))
3526 return (EINVAL);
3527
3528 switch (prop.op_mode) {
3529 case MFI_PR_OPMODE_AUTO:
3530 bp->bp_mode = BIOC_SPMAUTO;
3531 bp->bp_autoival = prop.exec_freq;
3532 bp->bp_autonext = prop.next_exec;
3533 bp->bp_autonow = time;
3534 break;
3535 case MFI_PR_OPMODE_MANUAL:
3536 bp->bp_mode = BIOC_SPMMANUAL;
3537 break;
3538 case MFI_PR_OPMODE_DISABLED:
3539 bp->bp_mode = BIOC_SPMDISABLED;
3540 break;
3541 default:
3542 printf("%s: unknown patrol mode %d\n",
3543 DEVNAME(sc), prop.op_mode);
3544 break;
3545 }
3546
3547 switch (status.state) {
3548 case MFI_PR_STATE_STOPPED:
3549 bp->bp_status = BIOC_SPSSTOPPED;
3550 break;
3551 case MFI_PR_STATE_READY:
3552 bp->bp_status = BIOC_SPSREADY;
3553 break;
3554 case MFI_PR_STATE_ACTIVE:
3555 bp->bp_status = BIOC_SPSACTIVE;
3556 break;
3557 case MFI_PR_STATE_ABORTED:
3558 bp->bp_status = BIOC_SPSABORTED;
3559 break;
3560 default:
3561 printf("%s: unknown patrol state %d\n",
3562 DEVNAME(sc), status.state);
3563 break;
3564 }
3565
3566 break;
3567
3568 default:
3569 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3570 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3571 return (EINVAL);
3572 }
3573
3574 return (rv);
3575 }
3576 #endif
3577
3578 static int
3579 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3580 {
3581 struct mfi_conf *cfg;
3582 struct mfi_hotspare *hs;
3583 struct mfi_pd_details *pd;
3584 struct bioc_disk *sdhs;
3585 struct bioc_vol *vdhs;
3586 struct scsipi_inquiry_data *inqbuf;
3587 char vend[8+16+4+1], *vendp;
3588 int i, rv = EINVAL;
3589 uint32_t size;
3590 union mfi_mbox mbox;
3591
3592 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3593
3594 if (!bio_hs)
3595 return (EINVAL);
3596
3597 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3598
3599 /* send single element command to retrieve size for full structure */
3600 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3601 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3602 MFII_DATA_IN, false))
3603 goto freeme;
3604
3605 size = cfg->mfc_size;
3606 free(cfg, M_DEVBUF);
3607
3608 /* memory for read config */
3609 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3610 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3611 MFII_DATA_IN, false))
3612 goto freeme;
3613
3614 /* calculate offset to hs structure */
3615 hs = (struct mfi_hotspare *)(
3616 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3617 cfg->mfc_array_size * cfg->mfc_no_array +
3618 cfg->mfc_ld_size * cfg->mfc_no_ld);
3619
3620 if (volid < cfg->mfc_no_ld)
3621 goto freeme; /* not a hotspare */
3622
3623 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3624 goto freeme; /* not a hotspare */
3625
3626 /* offset into hotspare structure */
3627 i = volid - cfg->mfc_no_ld;
3628
3629 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3630 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3631 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3632
3633 /* get pd fields */
3634 memset(&mbox, 0, sizeof(mbox));
3635 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3636 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3637 MFII_DATA_IN, false)) {
3638 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3639 DEVNAME(sc));
3640 goto freeme;
3641 }
3642
3643 switch (type) {
3644 case MFI_MGMT_VD:
3645 vdhs = bio_hs;
3646 vdhs->bv_status = BIOC_SVONLINE;
3647 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3648 vdhs->bv_level = -1; /* hotspare */
3649 vdhs->bv_nodisk = 1;
3650 break;
3651
3652 case MFI_MGMT_SD:
3653 sdhs = bio_hs;
3654 sdhs->bd_status = BIOC_SDHOTSPARE;
3655 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3656 sdhs->bd_channel = pd->mpd_enc_idx;
3657 sdhs->bd_target = pd->mpd_enc_slot;
3658 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3659 vendp = inqbuf->vendor;
3660 memcpy(vend, vendp, sizeof vend - 1);
3661 vend[sizeof vend - 1] = '\0';
3662 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3663 break;
3664
3665 default:
3666 goto freeme;
3667 }
3668
3669 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3670 rv = 0;
3671 freeme:
3672 free(pd, M_DEVBUF);
3673 free(cfg, M_DEVBUF);
3674
3675 return (rv);
3676 }
3677
3678 #endif /* NBIO > 0 */
3679
3680 #define MFI_BBU_SENSORS 4
3681
3682 static void
3683 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3684 {
3685 struct mfi_bbu_status bbu;
3686 u_int32_t status;
3687 u_int32_t mask;
3688 u_int32_t soh_bad;
3689 int rv;
3690
3691 mutex_enter(&sc->sc_lock);
3692 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3693 sizeof(bbu), MFII_DATA_IN, false);
3694 mutex_exit(&sc->sc_lock);
3695 if (rv != 0) {
3696 edata->state = ENVSYS_SINVALID;
3697 edata->value_cur = 0;
3698 return;
3699 }
3700
3701 switch (bbu.battery_type) {
3702 case MFI_BBU_TYPE_IBBU:
3703 mask = MFI_BBU_STATE_BAD_IBBU;
3704 soh_bad = 0;
3705 break;
3706 case MFI_BBU_TYPE_BBU:
3707 mask = MFI_BBU_STATE_BAD_BBU;
3708 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3709 break;
3710
3711 case MFI_BBU_TYPE_NONE:
3712 default:
3713 edata->state = ENVSYS_SCRITICAL;
3714 edata->value_cur = 0;
3715 return;
3716 }
3717
3718 status = le32toh(bbu.fw_status) & mask;
3719 switch(edata->sensor) {
3720 case 0:
3721 edata->value_cur = (status || soh_bad) ? 0 : 1;
3722 edata->state =
3723 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3724 return;
3725 case 1:
3726 edata->value_cur = le16toh(bbu.voltage) * 1000;
3727 edata->state = ENVSYS_SVALID;
3728 return;
3729 case 2:
3730 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3731 edata->state = ENVSYS_SVALID;
3732 return;
3733 case 3:
3734 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3735 edata->state = ENVSYS_SVALID;
3736 return;
3737 }
3738 }
3739
3740 static void
3741 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3742 {
3743 struct bioc_vol bv;
3744 int error;
3745
3746 memset(&bv, 0, sizeof(bv));
3747 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3748 mutex_enter(&sc->sc_lock);
3749 error = mfii_ioctl_vol(sc, &bv);
3750 mutex_exit(&sc->sc_lock);
3751 if (error)
3752 bv.bv_status = BIOC_SVINVALID;
3753 bio_vol_to_envsys(edata, &bv);
3754 }
3755
3756 static void
3757 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3758 {
3759 sensor->units = ENVSYS_DRIVE;
3760 sensor->state = ENVSYS_SINVALID;
3761 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3762 /* Enable monitoring for drive state changes */
3763 sensor->flags |= ENVSYS_FMONSTCHANGED;
3764 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3765 }
3766
3767 static void
3768 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3769 {
3770 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3771 aprint_error_dev(sc->sc_dev,
3772 "failed to attach sensor %s\n", s->desc);
3773 }
3774
3775 static int
3776 mfii_create_sensors(struct mfii_softc *sc)
3777 {
3778 int i, rv;
3779 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3780
3781 sc->sc_sme = sysmon_envsys_create();
3782 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3783 M_DEVBUF, M_WAITOK | M_ZERO);
3784
3785 /* BBU */
3786 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3787 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3788 sc->sc_sensors[0].value_cur = 0;
3789 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3790 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3791 sc->sc_sensors[1].value_cur = 0;
3792 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3793 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3794 sc->sc_sensors[2].value_cur = 0;
3795 sc->sc_sensors[3].units = ENVSYS_STEMP;
3796 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3797 sc->sc_sensors[3].value_cur = 0;
3798
3799 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3800 sc->sc_bbuok = true;
3801 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3802 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3803 "%s BBU state", DEVNAME(sc));
3804 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3805 "%s BBU voltage", DEVNAME(sc));
3806 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3807 "%s BBU current", DEVNAME(sc));
3808 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3809 "%s BBU temperature", DEVNAME(sc));
3810 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3811 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3812 }
3813 }
3814
3815 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3816 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3817 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3818 }
3819
3820 sc->sc_sme->sme_name = DEVNAME(sc);
3821 sc->sc_sme->sme_cookie = sc;
3822 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3823 rv = sysmon_envsys_register(sc->sc_sme);
3824 if (rv) {
3825 aprint_error_dev(sc->sc_dev,
3826 "unable to register with sysmon (rv = %d)\n", rv);
3827 }
3828 return rv;
3829
3830 }
3831
3832 static int
3833 mfii_destroy_sensors(struct mfii_softc *sc)
3834 {
3835 if (sc->sc_sme == NULL)
3836 return 0;
3837 sysmon_envsys_unregister(sc->sc_sme);
3838 sc->sc_sme = NULL;
3839 free(sc->sc_sensors, M_DEVBUF);
3840 return 0;
3841 }
3842
3843 static void
3844 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3845 {
3846 struct mfii_softc *sc = sme->sme_cookie;
3847
3848 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3849 return;
3850
3851 if (edata->sensor < MFI_BBU_SENSORS) {
3852 if (sc->sc_bbuok)
3853 mfii_bbu(sc, edata);
3854 } else {
3855 mfii_refresh_ld_sensor(sc, edata);
3856 }
3857 }
3858