mfii.c revision 1.2.2.2 1 /* $NetBSD: mfii.c,v 1.2.2.2 2018/11/26 01:52:32 pgoyette Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.2.2.2 2018/11/26 01:52:32 pgoyette Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 u_int ccb_refcnt;
269 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
270 };
271 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
272
273 struct mfii_iop {
274 int bar;
275 int num_sge_loc;
276 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
277 #define MFII_IOP_NUM_SGE_LOC_35 1
278 u_int16_t ldio_ctx_reg_lock_flags;
279 u_int8_t ldio_req_type;
280 u_int8_t ldio_ctx_type_nseg;
281 u_int8_t sge_flag_chain;
282 u_int8_t sge_flag_eol;
283 };
284
285 struct mfii_softc {
286 device_t sc_dev;
287 struct scsipi_channel sc_chan;
288 struct scsipi_adapter sc_adapt;
289
290 const struct mfii_iop *sc_iop;
291
292 pci_chipset_tag_t sc_pc;
293 pcitag_t sc_tag;
294
295 bus_space_tag_t sc_iot;
296 bus_space_handle_t sc_ioh;
297 bus_size_t sc_ios;
298 bus_dma_tag_t sc_dmat;
299 bus_dma_tag_t sc_dmat64;
300 bool sc_64bit_dma;
301
302 void *sc_ih;
303
304 kmutex_t sc_ccb_mtx;
305 kmutex_t sc_post_mtx;
306
307 u_int sc_max_fw_cmds;
308 u_int sc_max_cmds;
309 u_int sc_max_sgl;
310
311 u_int sc_reply_postq_depth;
312 u_int sc_reply_postq_index;
313 kmutex_t sc_reply_postq_mtx;
314 struct mfii_dmamem *sc_reply_postq;
315
316 struct mfii_dmamem *sc_requests;
317 struct mfii_dmamem *sc_mfi;
318 struct mfii_dmamem *sc_sense;
319 struct mfii_dmamem *sc_sgl;
320
321 struct mfii_ccb *sc_ccb;
322 struct mfii_ccb_list sc_ccb_freeq;
323
324 struct mfii_ccb *sc_aen_ccb;
325 struct workqueue *sc_aen_wq;
326 struct work sc_aen_work;
327
328 kmutex_t sc_abort_mtx;
329 struct mfii_ccb_list sc_abort_list;
330 struct workqueue *sc_abort_wq;
331 struct work sc_abort_work;
332
333 /* save some useful information for logical drives that is missing
334 * in sc_ld_list
335 */
336 struct {
337 bool ld_present;
338 char ld_dev[16]; /* device name sd? */
339 } sc_ld[MFI_MAX_LD];
340 int sc_target_lds[MFI_MAX_LD];
341
342 /* bio */
343 struct mfi_conf *sc_cfg;
344 struct mfi_ctrl_info sc_info;
345 struct mfi_ld_list sc_ld_list;
346 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
347 int sc_no_pd; /* used physical disks */
348 int sc_ld_sz; /* sizeof sc_ld_details */
349
350 /* mgmt lock */
351 kmutex_t sc_lock;
352 bool sc_running;
353
354 /* sensors */
355 struct sysmon_envsys *sc_sme;
356 envsys_data_t *sc_sensors;
357 bool sc_bbuok;
358
359 device_t sc_child;
360 };
361
362 // #define MFII_DEBUG
363 #ifdef MFII_DEBUG
364 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
365 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
366 #define MFII_D_CMD 0x0001
367 #define MFII_D_INTR 0x0002
368 #define MFII_D_MISC 0x0004
369 #define MFII_D_DMA 0x0008
370 #define MFII_D_IOCTL 0x0010
371 #define MFII_D_RW 0x0020
372 #define MFII_D_MEM 0x0040
373 #define MFII_D_CCB 0x0080
374 uint32_t mfii_debug = 0
375 /* | MFII_D_CMD */
376 /* | MFII_D_INTR */
377 | MFII_D_MISC
378 /* | MFII_D_DMA */
379 /* | MFII_D_IOCTL */
380 /* | MFII_D_RW */
381 /* | MFII_D_MEM */
382 /* | MFII_D_CCB */
383 ;
384 #else
385 #define DPRINTF(x...)
386 #define DNPRINTF(n,x...)
387 #endif
388
389 int mfii_match(device_t, cfdata_t, void *);
390 void mfii_attach(device_t, device_t, void *);
391 int mfii_detach(device_t, int);
392 int mfii_rescan(device_t, const char *, const int *);
393 void mfii_childdetached(device_t, device_t);
394 static bool mfii_suspend(device_t, const pmf_qual_t *);
395 static bool mfii_resume(device_t, const pmf_qual_t *);
396 static bool mfii_shutdown(device_t, int);
397
398
399 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
400 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
401 mfii_childdetached, DVF_DETACH_SHUTDOWN);
402
403 void mfii_scsipi_request(struct scsipi_channel *,
404 scsipi_adapter_req_t, void *);
405 void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
406
407 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
408
409 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
410 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
411
412 struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
413 void mfii_dmamem_free(struct mfii_softc *,
414 struct mfii_dmamem *);
415
416 struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
417 void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
418 int mfii_init_ccb(struct mfii_softc *);
419 void mfii_scrub_ccb(struct mfii_ccb *);
420
421 int mfii_transition_firmware(struct mfii_softc *);
422 int mfii_initialise_firmware(struct mfii_softc *);
423 int mfii_get_info(struct mfii_softc *);
424
425 void mfii_start(struct mfii_softc *, struct mfii_ccb *);
426 void mfii_done(struct mfii_softc *, struct mfii_ccb *);
427 int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
428 void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
429 int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
430 void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
431 int mfii_my_intr(struct mfii_softc *);
432 int mfii_intr(void *);
433 void mfii_postq(struct mfii_softc *);
434
435 int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
436 void *, int);
437 int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
438 void *, int);
439
440 int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
441
442 int mfii_mgmt(struct mfii_softc *, uint32_t,
443 const union mfi_mbox *, void *, size_t,
444 mfii_direction_t, bool);
445 int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
446 uint32_t, const union mfi_mbox *, void *, size_t,
447 mfii_direction_t, bool);
448 void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
449
450 int mfii_scsi_cmd_io(struct mfii_softc *,
451 struct mfii_ccb *, struct scsipi_xfer *);
452 int mfii_scsi_cmd_cdb(struct mfii_softc *,
453 struct mfii_ccb *, struct scsipi_xfer *);
454 void mfii_scsi_cmd_tmo(void *);
455
456 int mfii_dev_handles_update(struct mfii_softc *sc);
457 void mfii_dev_handles_dtor(void *, void *);
458
459 void mfii_abort_task(struct work *, void *);
460 void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
461 uint16_t, uint16_t, uint8_t, uint32_t);
462 void mfii_scsi_cmd_abort_done(struct mfii_softc *,
463 struct mfii_ccb *);
464
465 int mfii_aen_register(struct mfii_softc *);
466 void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
467 struct mfii_dmamem *, uint32_t);
468 void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
469 void mfii_aen(struct work *, void *);
470 void mfii_aen_unregister(struct mfii_softc *);
471
472 void mfii_aen_pd_insert(struct mfii_softc *,
473 const struct mfi_evtarg_pd_address *);
474 void mfii_aen_pd_remove(struct mfii_softc *,
475 const struct mfi_evtarg_pd_address *);
476 void mfii_aen_pd_state_change(struct mfii_softc *,
477 const struct mfi_evtarg_pd_state *);
478 void mfii_aen_ld_update(struct mfii_softc *);
479
480 #if NBIO > 0
481 int mfii_ioctl(device_t, u_long, void *);
482 int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
483 int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
484 int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
485 int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
486 int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
487 int mfii_ioctl_setstate(struct mfii_softc *,
488 struct bioc_setstate *);
489 int mfii_bio_hs(struct mfii_softc *, int, int, void *);
490 int mfii_bio_getitall(struct mfii_softc *);
491 #endif /* NBIO > 0 */
492
493 #if 0
494 static const char *mfi_bbu_indicators[] = {
495 "pack missing",
496 "voltage low",
497 "temp high",
498 "charge active",
499 "discharge active",
500 "learn cycle req'd",
501 "learn cycle active",
502 "learn cycle failed",
503 "learn cycle timeout",
504 "I2C errors",
505 "replace pack",
506 "low capacity",
507 "periodic learn req'd"
508 };
509 #endif
510
511 void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
512 void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
513 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
514 int mfii_create_sensors(struct mfii_softc *);
515 static int mfii_destroy_sensors(struct mfii_softc *);
516 void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
517 void mfii_bbu(struct mfii_softc *, envsys_data_t *);
518
519 /*
520 * mfii boards support asynchronous (and non-polled) completion of
521 * dcmds by proxying them through a passthru mpii command that points
522 * at a dcmd frame. since the passthru command is submitted like
523 * the scsi commands using an SMID in the request descriptor,
524 * ccb_request memory * must contain the passthru command because
525 * that is what the SMID refers to. this means ccb_request cannot
526 * contain the dcmd. rather than allocating separate dma memory to
527 * hold the dcmd, we reuse the sense memory buffer for it.
528 */
529
530 void mfii_dcmd_start(struct mfii_softc *,
531 struct mfii_ccb *);
532
533 static inline void
534 mfii_dcmd_scrub(struct mfii_ccb *ccb)
535 {
536 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
537 }
538
539 static inline struct mfi_dcmd_frame *
540 mfii_dcmd_frame(struct mfii_ccb *ccb)
541 {
542 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
543 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
544 }
545
546 static inline void
547 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
548 {
549 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
550 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
551 }
552
553 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
554
555 const struct mfii_iop mfii_iop_thunderbolt = {
556 MFII_BAR,
557 MFII_IOP_NUM_SGE_LOC_ORIG,
558 0,
559 MFII_REQ_TYPE_LDIO,
560 0,
561 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
562 0
563 };
564
565 /*
566 * a lot of these values depend on us not implementing fastpath yet.
567 */
568 const struct mfii_iop mfii_iop_25 = {
569 MFII_BAR,
570 MFII_IOP_NUM_SGE_LOC_ORIG,
571 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
572 MFII_REQ_TYPE_NO_LOCK,
573 MFII_RAID_CTX_TYPE_CUDA | 0x1,
574 MFII_SGE_CHAIN_ELEMENT,
575 MFII_SGE_END_OF_LIST
576 };
577
578 const struct mfii_iop mfii_iop_35 = {
579 MFII_BAR_35,
580 MFII_IOP_NUM_SGE_LOC_35,
581 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
582 MFII_REQ_TYPE_NO_LOCK,
583 MFII_RAID_CTX_TYPE_CUDA | 0x1,
584 MFII_SGE_CHAIN_ELEMENT,
585 MFII_SGE_END_OF_LIST
586 };
587
588 struct mfii_device {
589 pcireg_t mpd_vendor;
590 pcireg_t mpd_product;
591 const struct mfii_iop *mpd_iop;
592 };
593
594 const struct mfii_device mfii_devices[] = {
595 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
596 &mfii_iop_thunderbolt },
597 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
598 &mfii_iop_25 },
599 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
600 &mfii_iop_25 },
601 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
602 &mfii_iop_35 },
603 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
604 &mfii_iop_35 },
605 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
606 &mfii_iop_35 },
607 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
608 &mfii_iop_35 },
609 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
610 &mfii_iop_35 },
611 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
612 &mfii_iop_35 }
613 };
614
615 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
616
617 const struct mfii_iop *
618 mfii_find_iop(struct pci_attach_args *pa)
619 {
620 const struct mfii_device *mpd;
621 int i;
622
623 for (i = 0; i < __arraycount(mfii_devices); i++) {
624 mpd = &mfii_devices[i];
625
626 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
627 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
628 return (mpd->mpd_iop);
629 }
630
631 return (NULL);
632 }
633
634 int
635 mfii_match(device_t parent, cfdata_t match, void *aux)
636 {
637 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
638 }
639
640 void
641 mfii_attach(device_t parent, device_t self, void *aux)
642 {
643 struct mfii_softc *sc = device_private(self);
644 struct pci_attach_args *pa = aux;
645 pcireg_t memtype;
646 pci_intr_handle_t ih;
647 char intrbuf[PCI_INTRSTR_LEN];
648 const char *intrstr;
649 u_int32_t status, scpad2, scpad3;
650 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
651 struct scsipi_adapter *adapt = &sc->sc_adapt;
652 struct scsipi_channel *chan = &sc->sc_chan;
653
654 /* init sc */
655 sc->sc_dev = self;
656 sc->sc_iop = mfii_find_iop(aux);
657 sc->sc_dmat = pa->pa_dmat;
658 if (pci_dma64_available(pa)) {
659 sc->sc_dmat64 = pa->pa_dmat64;
660 sc->sc_64bit_dma = 1;
661 } else {
662 sc->sc_dmat64 = pa->pa_dmat;
663 sc->sc_64bit_dma = 0;
664 }
665 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
666 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
667 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
668 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
669
670 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
671
672 sc->sc_aen_ccb = NULL;
673 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
674 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
675 PRI_BIO, IPL_BIO, WQ_MPSAFE);
676
677 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
678 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
679 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
680
681 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
682 SIMPLEQ_INIT(&sc->sc_abort_list);
683
684 /* wire up the bus shizz */
685 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
686 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
687 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
688 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
689 aprint_error(": unable to map registers\n");
690 return;
691 }
692
693 /* disable interrupts */
694 mfii_write(sc, MFI_OMSK, 0xffffffff);
695
696 if (pci_intr_map(pa, &ih) != 0) {
697 aprint_error(": unable to map interrupt\n");
698 goto pci_unmap;
699 }
700 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
701 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
702
703 /* lets get started */
704 if (mfii_transition_firmware(sc))
705 goto pci_unmap;
706 sc->sc_running = true;
707
708 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
709 scpad3 = mfii_read(sc, MFII_OSP3);
710 status = mfii_fw_state(sc);
711 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
712 if (sc->sc_max_fw_cmds == 0)
713 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
714 /*
715 * reduce max_cmds by 1 to ensure that the reply queue depth does not
716 * exceed FW supplied max_fw_cmds.
717 */
718 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
719
720 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
721 scpad2 = mfii_read(sc, MFII_OSP2);
722 chain_frame_sz =
723 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
724 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
725 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
726 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
727
728 nsge_in_io = (MFII_REQUEST_SIZE -
729 sizeof(struct mpii_msg_scsi_io) -
730 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
731 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
732
733 /* round down to nearest power of two */
734 sc->sc_max_sgl = 1;
735 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
736 sc->sc_max_sgl <<= 1;
737
738 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
739 DEVNAME(sc), status, scpad2, scpad3);
740 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
741 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
742 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
743 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
744 sc->sc_max_sgl);
745
746 /* sense memory */
747 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
748 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
749 if (sc->sc_sense == NULL) {
750 aprint_error(": unable to allocate sense memory\n");
751 goto pci_unmap;
752 }
753
754 /* reply post queue */
755 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
756
757 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
758 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
759 if (sc->sc_reply_postq == NULL)
760 goto free_sense;
761
762 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
763 MFII_DMA_LEN(sc->sc_reply_postq));
764
765 /* MPII request frame array */
766 sc->sc_requests = mfii_dmamem_alloc(sc,
767 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
768 if (sc->sc_requests == NULL)
769 goto free_reply_postq;
770
771 /* MFI command frame array */
772 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
773 if (sc->sc_mfi == NULL)
774 goto free_requests;
775
776 /* MPII SGL array */
777 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
778 sizeof(struct mfii_sge) * sc->sc_max_sgl);
779 if (sc->sc_sgl == NULL)
780 goto free_mfi;
781
782 if (mfii_init_ccb(sc) != 0) {
783 aprint_error(": could not init ccb list\n");
784 goto free_sgl;
785 }
786
787 /* kickstart firmware with all addresses and pointers */
788 if (mfii_initialise_firmware(sc) != 0) {
789 aprint_error(": could not initialize firmware\n");
790 goto free_sgl;
791 }
792
793 mutex_enter(&sc->sc_lock);
794 if (mfii_get_info(sc) != 0) {
795 mutex_exit(&sc->sc_lock);
796 aprint_error(": could not retrieve controller information\n");
797 goto free_sgl;
798 }
799 mutex_exit(&sc->sc_lock);
800
801 aprint_normal(": \"%s\", firmware %s",
802 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
803 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
804 aprint_normal(", %uMB cache",
805 le16toh(sc->sc_info.mci_memory_size));
806 }
807 aprint_normal("\n");
808 aprint_naive("\n");
809
810 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
811 mfii_intr, sc, DEVNAME(sc));
812 if (sc->sc_ih == NULL) {
813 aprint_error_dev(self, "can't establish interrupt");
814 if (intrstr)
815 aprint_error(" at %s", intrstr);
816 aprint_error("\n");
817 goto free_sgl;
818 }
819 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
820
821 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
822 sc->sc_ld[i].ld_present = 1;
823
824 memset(adapt, 0, sizeof(*adapt));
825 adapt->adapt_dev = sc->sc_dev;
826 adapt->adapt_nchannels = 1;
827 /* keep a few commands for management */
828 if (sc->sc_max_cmds > 4)
829 adapt->adapt_openings = sc->sc_max_cmds - 4;
830 else
831 adapt->adapt_openings = sc->sc_max_cmds;
832 adapt->adapt_max_periph = adapt->adapt_openings;
833 adapt->adapt_request = mfii_scsipi_request;
834 adapt->adapt_minphys = minphys;
835 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
836
837 memset(chan, 0, sizeof(*chan));
838 chan->chan_adapter = adapt;
839 chan->chan_bustype = &scsi_sas_bustype;
840 chan->chan_channel = 0;
841 chan->chan_flags = 0;
842 chan->chan_nluns = 8;
843 chan->chan_ntargets = sc->sc_info.mci_max_lds;
844 chan->chan_id = sc->sc_info.mci_max_lds;
845
846 mfii_rescan(sc->sc_dev, "scsi", NULL);
847
848 if (mfii_aen_register(sc) != 0) {
849 /* error printed by mfii_aen_register */
850 goto intr_disestablish;
851 }
852
853 mutex_enter(&sc->sc_lock);
854 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
855 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
856 mutex_exit(&sc->sc_lock);
857 aprint_error_dev(self,
858 "getting list of logical disks failed\n");
859 goto intr_disestablish;
860 }
861 mutex_exit(&sc->sc_lock);
862 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
863 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
864 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
865 sc->sc_target_lds[target] = i;
866 }
867
868 /* enable interrupts */
869 mfii_write(sc, MFI_OSTS, 0xffffffff);
870 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
871
872 #if NBIO > 0
873 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
874 panic("%s: controller registration failed", DEVNAME(sc));
875 #endif /* NBIO > 0 */
876
877 if (mfii_create_sensors(sc) != 0)
878 aprint_error_dev(self, "unable to create sensors\n");
879
880 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
881 mfii_shutdown))
882 aprint_error_dev(self, "couldn't establish power handler\n");
883 return;
884 intr_disestablish:
885 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
886 free_sgl:
887 mfii_dmamem_free(sc, sc->sc_sgl);
888 free_mfi:
889 mfii_dmamem_free(sc, sc->sc_mfi);
890 free_requests:
891 mfii_dmamem_free(sc, sc->sc_requests);
892 free_reply_postq:
893 mfii_dmamem_free(sc, sc->sc_reply_postq);
894 free_sense:
895 mfii_dmamem_free(sc, sc->sc_sense);
896 pci_unmap:
897 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
898 }
899
900 #if 0
901 struct srp_gc mfii_dev_handles_gc =
902 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
903
904 static inline uint16_t
905 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
906 {
907 struct srp_ref sr;
908 uint16_t *map, handle;
909
910 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
911 handle = map[target];
912 srp_leave(&sr);
913
914 return (handle);
915 }
916
917 int
918 mfii_dev_handles_update(struct mfii_softc *sc)
919 {
920 struct mfii_ld_map *lm;
921 uint16_t *dev_handles = NULL;
922 int i;
923 int rv = 0;
924
925 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
926
927 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
928 MFII_DATA_IN, false);
929
930 if (rv != 0) {
931 rv = EIO;
932 goto free_lm;
933 }
934
935 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
936 M_DEVBUF, M_WAITOK);
937
938 for (i = 0; i < MFI_MAX_PD; i++)
939 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
940
941 /* commit the updated info */
942 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
943 srp_update_locked(&mfii_dev_handles_gc,
944 &sc->sc_pd->pd_dev_handles, dev_handles);
945
946 free_lm:
947 free(lm, M_TEMP, sizeof(*lm));
948
949 return (rv);
950 }
951
952 void
953 mfii_dev_handles_dtor(void *null, void *v)
954 {
955 uint16_t *dev_handles = v;
956
957 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
958 }
959 #endif /* 0 */
960
961 int
962 mfii_detach(device_t self, int flags)
963 {
964 struct mfii_softc *sc = device_private(self);
965 int error;
966
967 if (sc->sc_ih == NULL)
968 return (0);
969
970 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
971 return error;
972
973 mfii_destroy_sensors(sc);
974 #if NBIO > 0
975 bio_unregister(sc->sc_dev);
976 #endif
977 mfii_shutdown(sc->sc_dev, 0);
978 mfii_write(sc, MFI_OMSK, 0xffffffff);
979
980 mfii_aen_unregister(sc);
981 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
982 mfii_dmamem_free(sc, sc->sc_sgl);
983 mfii_dmamem_free(sc, sc->sc_mfi);
984 mfii_dmamem_free(sc, sc->sc_requests);
985 mfii_dmamem_free(sc, sc->sc_reply_postq);
986 mfii_dmamem_free(sc, sc->sc_sense);
987 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
988
989 return (0);
990 }
991
992 int
993 mfii_rescan(device_t self, const char *ifattr, const int *locators)
994 {
995 struct mfii_softc *sc = device_private(self);
996 if (sc->sc_child != NULL)
997 return 0;
998
999 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
1000 scsiprint, NULL);
1001 return 0;
1002 }
1003
1004 void
1005 mfii_childdetached(device_t self, device_t child)
1006 {
1007 struct mfii_softc *sc = device_private(self);
1008
1009 KASSERT(self == sc->sc_dev);
1010 KASSERT(child == sc->sc_child);
1011
1012 if (child == sc->sc_child)
1013 sc->sc_child = NULL;
1014 }
1015
1016 static bool
1017 mfii_suspend(device_t dev, const pmf_qual_t *q)
1018 {
1019 /* XXX to be implemented */
1020 return false;
1021 }
1022
1023 static bool
1024 mfii_resume(device_t dev, const pmf_qual_t *q)
1025 {
1026 /* XXX to be implemented */
1027 return false;
1028 }
1029
1030 static bool
1031 mfii_shutdown(device_t dev, int how)
1032 {
1033 struct mfii_softc *sc = device_private(dev);
1034 struct mfii_ccb *ccb;
1035 union mfi_mbox mbox;
1036 bool rv = true;;
1037
1038 memset(&mbox, 0, sizeof(mbox));
1039
1040 mutex_enter(&sc->sc_lock);
1041 DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1042 ccb = mfii_get_ccb(sc);
1043 if (ccb == NULL)
1044 return false;
1045 mutex_enter(&sc->sc_ccb_mtx);
1046 if (sc->sc_running) {
1047 sc->sc_running = 0; /* prevent new commands */
1048 mutex_exit(&sc->sc_ccb_mtx);
1049 #if 0 /* XXX why does this hang ? */
1050 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1051 mfii_scrub_ccb(ccb);
1052 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1053 NULL, 0, MFII_DATA_NONE, true)) {
1054 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1055 rv = false;
1056 goto fail;
1057 }
1058 printf("ok1\n");
1059 #endif
1060 mbox.b[0] = 0;
1061 mfii_scrub_ccb(ccb);
1062 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1063 NULL, 0, MFII_DATA_NONE, true)) {
1064 aprint_error_dev(dev, "shutdown: "
1065 "firmware shutdown failed\n");
1066 rv = false;
1067 goto fail;
1068 }
1069 } else {
1070 mutex_exit(&sc->sc_ccb_mtx);
1071 }
1072 fail:
1073 mfii_put_ccb(sc, ccb);
1074 mutex_exit(&sc->sc_lock);
1075 return rv;
1076 }
1077
1078 static u_int32_t
1079 mfii_read(struct mfii_softc *sc, bus_size_t r)
1080 {
1081 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1082 BUS_SPACE_BARRIER_READ);
1083 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1084 }
1085
1086 static void
1087 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1088 {
1089 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1090 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1091 BUS_SPACE_BARRIER_WRITE);
1092 }
1093
1094 struct mfii_dmamem *
1095 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1096 {
1097 struct mfii_dmamem *m;
1098 int nsegs;
1099
1100 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1101 if (m == NULL)
1102 return (NULL);
1103
1104 m->mdm_size = size;
1105
1106 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1107 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1108 goto mdmfree;
1109
1110 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1111 &nsegs, BUS_DMA_NOWAIT) != 0)
1112 goto destroy;
1113
1114 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1115 BUS_DMA_NOWAIT) != 0)
1116 goto free;
1117
1118 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1119 BUS_DMA_NOWAIT) != 0)
1120 goto unmap;
1121
1122 memset(m->mdm_kva, 0, size);
1123 return (m);
1124
1125 unmap:
1126 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1127 free:
1128 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1129 destroy:
1130 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1131 mdmfree:
1132 free(m, M_DEVBUF);
1133
1134 return (NULL);
1135 }
1136
1137 void
1138 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1139 {
1140 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1141 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1142 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1143 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1144 free(m, M_DEVBUF);
1145 }
1146
1147 void
1148 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1149 {
1150 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1151 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1152 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1153
1154 io->function = MFII_FUNCTION_PASSTHRU_IO;
1155 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1156 io->chain_offset = io->sgl_offset0 / 4;
1157
1158 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1159 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1160 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1161
1162 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1163 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1164
1165 mfii_start(sc, ccb);
1166 }
1167
1168 int
1169 mfii_aen_register(struct mfii_softc *sc)
1170 {
1171 struct mfi_evt_log_info mel;
1172 struct mfii_ccb *ccb;
1173 struct mfii_dmamem *mdm;
1174 int rv;
1175
1176 ccb = mfii_get_ccb(sc);
1177 if (ccb == NULL) {
1178 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1179 return (ENOMEM);
1180 }
1181
1182 memset(&mel, 0, sizeof(mel));
1183 mfii_scrub_ccb(ccb);
1184
1185 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1186 &mel, sizeof(mel), MFII_DATA_IN, true);
1187 if (rv != 0) {
1188 mfii_put_ccb(sc, ccb);
1189 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1190 return (EIO);
1191 }
1192
1193 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1194 if (mdm == NULL) {
1195 mfii_put_ccb(sc, ccb);
1196 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1197 return (ENOMEM);
1198 }
1199
1200 /* replay all the events from boot */
1201 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1202
1203 return (0);
1204 }
1205
1206 void
1207 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1208 struct mfii_dmamem *mdm, uint32_t seq)
1209 {
1210 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1211 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1212 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1213 union mfi_evt_class_locale mec;
1214
1215 mfii_scrub_ccb(ccb);
1216 mfii_dcmd_scrub(ccb);
1217 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1218
1219 ccb->ccb_cookie = mdm;
1220 ccb->ccb_done = mfii_aen_done;
1221 sc->sc_aen_ccb = ccb;
1222
1223 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1224 mec.mec_members.reserved = 0;
1225 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1226
1227 hdr->mfh_cmd = MFI_CMD_DCMD;
1228 hdr->mfh_sg_count = 1;
1229 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1230 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1231 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1232 dcmd->mdf_mbox.w[0] = htole32(seq);
1233 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1234 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1235 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1236
1237 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1238 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1239
1240 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1241 mfii_dcmd_start(sc, ccb);
1242 }
1243
1244 void
1245 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1246 {
1247 KASSERT(sc->sc_aen_ccb == ccb);
1248
1249 /*
1250 * defer to a thread with KERNEL_LOCK so we can run autoconf
1251 * We shouldn't have more than one AEN command pending at a time,
1252 * so no need to lock
1253 */
1254 if (sc->sc_running)
1255 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1256 }
1257
1258 void
1259 mfii_aen(struct work *wk, void *arg)
1260 {
1261 struct mfii_softc *sc = arg;
1262 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1263 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1264 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1265
1266 mfii_dcmd_sync(sc, ccb,
1267 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1268 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1269 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1270
1271 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1272 le32toh(med->med_seq_num), le32toh(med->med_code),
1273 med->med_arg_type, med->med_description);
1274
1275 switch (le32toh(med->med_code)) {
1276 case MR_EVT_PD_INSERTED_EXT:
1277 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1278 break;
1279
1280 mfii_aen_pd_insert(sc, &med->args.pd_address);
1281 break;
1282 case MR_EVT_PD_REMOVED_EXT:
1283 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1284 break;
1285
1286 mfii_aen_pd_remove(sc, &med->args.pd_address);
1287 break;
1288
1289 case MR_EVT_PD_STATE_CHANGE:
1290 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1291 break;
1292
1293 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1294 break;
1295
1296 case MR_EVT_LD_CREATED:
1297 case MR_EVT_LD_DELETED:
1298 mfii_aen_ld_update(sc);
1299 break;
1300
1301 default:
1302 break;
1303 }
1304
1305 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1306 }
1307
1308 void
1309 mfii_aen_pd_insert(struct mfii_softc *sc,
1310 const struct mfi_evtarg_pd_address *pd)
1311 {
1312 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1313 le16toh(pd->device_id), le16toh(pd->encl_id));
1314 }
1315
1316 void
1317 mfii_aen_pd_remove(struct mfii_softc *sc,
1318 const struct mfi_evtarg_pd_address *pd)
1319 {
1320 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1321 le16toh(pd->device_id), le16toh(pd->encl_id));
1322 }
1323
1324 void
1325 mfii_aen_pd_state_change(struct mfii_softc *sc,
1326 const struct mfi_evtarg_pd_state *state)
1327 {
1328 return;
1329 }
1330
1331 void
1332 mfii_aen_ld_update(struct mfii_softc *sc)
1333 {
1334 int i, target, old, nld;
1335 int newlds[MFI_MAX_LD];
1336
1337 mutex_enter(&sc->sc_lock);
1338 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1339 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1340 mutex_exit(&sc->sc_lock);
1341 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1342 DEVNAME(sc));
1343 return;
1344 }
1345 mutex_exit(&sc->sc_lock);
1346
1347 memset(newlds, -1, sizeof(newlds));
1348
1349 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1350 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1351 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1352 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1353 newlds[target] = i;
1354 }
1355
1356 for (i = 0; i < MFI_MAX_LD; i++) {
1357 old = sc->sc_target_lds[i];
1358 nld = newlds[i];
1359
1360 if (old == -1 && nld != -1) {
1361 printf("%s: logical drive %d added (target %d)\n",
1362 DEVNAME(sc), i, nld);
1363
1364 // XXX scsi_probe_target(sc->sc_scsibus, i);
1365
1366 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1367 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1368 } else if (nld == -1 && old != -1) {
1369 printf("%s: logical drive %d removed (target %d)\n",
1370 DEVNAME(sc), i, old);
1371
1372 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1373 sysmon_envsys_sensor_detach(sc->sc_sme,
1374 &sc->sc_sensors[i]);
1375 }
1376 }
1377
1378 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1379 }
1380
1381 void
1382 mfii_aen_unregister(struct mfii_softc *sc)
1383 {
1384 /* XXX */
1385 }
1386
1387 int
1388 mfii_transition_firmware(struct mfii_softc *sc)
1389 {
1390 int32_t fw_state, cur_state;
1391 int max_wait, i;
1392
1393 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1394
1395 while (fw_state != MFI_STATE_READY) {
1396 cur_state = fw_state;
1397 switch (fw_state) {
1398 case MFI_STATE_FAULT:
1399 printf("%s: firmware fault\n", DEVNAME(sc));
1400 return (1);
1401 case MFI_STATE_WAIT_HANDSHAKE:
1402 mfii_write(sc, MFI_SKINNY_IDB,
1403 MFI_INIT_CLEAR_HANDSHAKE);
1404 max_wait = 2;
1405 break;
1406 case MFI_STATE_OPERATIONAL:
1407 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1408 max_wait = 10;
1409 break;
1410 case MFI_STATE_UNDEFINED:
1411 case MFI_STATE_BB_INIT:
1412 max_wait = 2;
1413 break;
1414 case MFI_STATE_FW_INIT:
1415 case MFI_STATE_DEVICE_SCAN:
1416 case MFI_STATE_FLUSH_CACHE:
1417 max_wait = 20;
1418 break;
1419 default:
1420 printf("%s: unknown firmware state %d\n",
1421 DEVNAME(sc), fw_state);
1422 return (1);
1423 }
1424 for (i = 0; i < (max_wait * 10); i++) {
1425 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1426 if (fw_state == cur_state)
1427 DELAY(100000);
1428 else
1429 break;
1430 }
1431 if (fw_state == cur_state) {
1432 printf("%s: firmware stuck in state %#x\n",
1433 DEVNAME(sc), fw_state);
1434 return (1);
1435 }
1436 }
1437
1438 return (0);
1439 }
1440
1441 int
1442 mfii_get_info(struct mfii_softc *sc)
1443 {
1444 int i, rv;
1445
1446 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1447 sizeof(sc->sc_info), MFII_DATA_IN, true);
1448
1449 if (rv != 0)
1450 return (rv);
1451
1452 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1453 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1454 DEVNAME(sc),
1455 sc->sc_info.mci_image_component[i].mic_name,
1456 sc->sc_info.mci_image_component[i].mic_version,
1457 sc->sc_info.mci_image_component[i].mic_build_date,
1458 sc->sc_info.mci_image_component[i].mic_build_time);
1459 }
1460
1461 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1462 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1463 DEVNAME(sc),
1464 sc->sc_info.mci_pending_image_component[i].mic_name,
1465 sc->sc_info.mci_pending_image_component[i].mic_version,
1466 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1467 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1468 }
1469
1470 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1471 DEVNAME(sc),
1472 sc->sc_info.mci_max_arms,
1473 sc->sc_info.mci_max_spans,
1474 sc->sc_info.mci_max_arrays,
1475 sc->sc_info.mci_max_lds,
1476 sc->sc_info.mci_product_name);
1477
1478 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1479 DEVNAME(sc),
1480 sc->sc_info.mci_serial_number,
1481 sc->sc_info.mci_hw_present,
1482 sc->sc_info.mci_current_fw_time,
1483 sc->sc_info.mci_max_cmds,
1484 sc->sc_info.mci_max_sg_elements);
1485
1486 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1487 DEVNAME(sc),
1488 sc->sc_info.mci_max_request_size,
1489 sc->sc_info.mci_lds_present,
1490 sc->sc_info.mci_lds_degraded,
1491 sc->sc_info.mci_lds_offline,
1492 sc->sc_info.mci_pd_present);
1493
1494 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1495 DEVNAME(sc),
1496 sc->sc_info.mci_pd_disks_present,
1497 sc->sc_info.mci_pd_disks_pred_failure,
1498 sc->sc_info.mci_pd_disks_failed);
1499
1500 DPRINTF("%s: nvram %d mem %d flash %d\n",
1501 DEVNAME(sc),
1502 sc->sc_info.mci_nvram_size,
1503 sc->sc_info.mci_memory_size,
1504 sc->sc_info.mci_flash_size);
1505
1506 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1507 DEVNAME(sc),
1508 sc->sc_info.mci_ram_correctable_errors,
1509 sc->sc_info.mci_ram_uncorrectable_errors,
1510 sc->sc_info.mci_cluster_allowed,
1511 sc->sc_info.mci_cluster_active);
1512
1513 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1514 DEVNAME(sc),
1515 sc->sc_info.mci_max_strips_per_io,
1516 sc->sc_info.mci_raid_levels,
1517 sc->sc_info.mci_adapter_ops,
1518 sc->sc_info.mci_ld_ops);
1519
1520 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1521 DEVNAME(sc),
1522 sc->sc_info.mci_stripe_sz_ops.min,
1523 sc->sc_info.mci_stripe_sz_ops.max,
1524 sc->sc_info.mci_pd_ops,
1525 sc->sc_info.mci_pd_mix_support);
1526
1527 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1528 DEVNAME(sc),
1529 sc->sc_info.mci_ecc_bucket_count,
1530 sc->sc_info.mci_package_version);
1531
1532 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1533 DEVNAME(sc),
1534 sc->sc_info.mci_properties.mcp_seq_num,
1535 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1536 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1537 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1538
1539 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1540 DEVNAME(sc),
1541 sc->sc_info.mci_properties.mcp_rebuild_rate,
1542 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1543 sc->sc_info.mci_properties.mcp_bgi_rate,
1544 sc->sc_info.mci_properties.mcp_cc_rate);
1545
1546 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1547 DEVNAME(sc),
1548 sc->sc_info.mci_properties.mcp_recon_rate,
1549 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1550 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1551 sc->sc_info.mci_properties.mcp_spinup_delay,
1552 sc->sc_info.mci_properties.mcp_cluster_enable);
1553
1554 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1555 DEVNAME(sc),
1556 sc->sc_info.mci_properties.mcp_coercion_mode,
1557 sc->sc_info.mci_properties.mcp_alarm_enable,
1558 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1559 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1560 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1561
1562 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1563 DEVNAME(sc),
1564 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1565 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1566 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1567
1568 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1569 DEVNAME(sc),
1570 sc->sc_info.mci_pci.mip_vendor,
1571 sc->sc_info.mci_pci.mip_device,
1572 sc->sc_info.mci_pci.mip_subvendor,
1573 sc->sc_info.mci_pci.mip_subdevice);
1574
1575 DPRINTF("%s: type %#x port_count %d port_addr ",
1576 DEVNAME(sc),
1577 sc->sc_info.mci_host.mih_type,
1578 sc->sc_info.mci_host.mih_port_count);
1579
1580 for (i = 0; i < 8; i++)
1581 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1582 DPRINTF("\n");
1583
1584 DPRINTF("%s: type %.x port_count %d port_addr ",
1585 DEVNAME(sc),
1586 sc->sc_info.mci_device.mid_type,
1587 sc->sc_info.mci_device.mid_port_count);
1588
1589 for (i = 0; i < 8; i++)
1590 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1591 DPRINTF("\n");
1592
1593 return (0);
1594 }
1595
1596 int
1597 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1598 {
1599 struct mfi_frame_header *hdr = ccb->ccb_request;
1600 u_int64_t r;
1601 int to = 0, rv = 0;
1602
1603 #ifdef DIAGNOSTIC
1604 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1605 panic("mfii_mfa_poll called with cookie or done set");
1606 #endif
1607
1608 hdr->mfh_context = ccb->ccb_smid;
1609 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1610 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1611
1612 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1613 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1614
1615 mfii_start(sc, ccb);
1616
1617 for (;;) {
1618 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1619 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1620 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1621
1622 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1623 break;
1624
1625 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1626 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1627 ccb->ccb_smid);
1628 ccb->ccb_flags |= MFI_CCB_F_ERR;
1629 rv = 1;
1630 break;
1631 }
1632
1633 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1634 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1635 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1636
1637 delay(1000);
1638 }
1639
1640 if (ccb->ccb_len > 0) {
1641 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1642 0, ccb->ccb_dmamap32->dm_mapsize,
1643 (ccb->ccb_direction == MFII_DATA_IN) ?
1644 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1645
1646 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1647 }
1648
1649 return (rv);
1650 }
1651
1652 int
1653 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1654 {
1655 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1656 void *cookie;
1657 int rv = 1;
1658
1659 done = ccb->ccb_done;
1660 cookie = ccb->ccb_cookie;
1661
1662 ccb->ccb_done = mfii_poll_done;
1663 ccb->ccb_cookie = &rv;
1664
1665 mfii_start(sc, ccb);
1666
1667 do {
1668 delay(10);
1669 mfii_postq(sc);
1670 } while (rv == 1);
1671
1672 ccb->ccb_cookie = cookie;
1673 done(sc, ccb);
1674
1675 return (0);
1676 }
1677
1678 void
1679 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1680 {
1681 int *rv = ccb->ccb_cookie;
1682
1683 *rv = 0;
1684 }
1685
1686 int
1687 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1688 {
1689 #ifdef DIAGNOSTIC
1690 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1691 panic("mfii_exec called with cookie or done set");
1692 #endif
1693
1694 ccb->ccb_cookie = ccb;
1695 ccb->ccb_done = mfii_exec_done;
1696
1697 mfii_start(sc, ccb);
1698
1699 mutex_enter(&ccb->ccb_mtx);
1700 while (ccb->ccb_cookie != NULL)
1701 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1702 mutex_exit(&ccb->ccb_mtx);
1703
1704 return (0);
1705 }
1706
1707 void
1708 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1709 {
1710 mutex_enter(&ccb->ccb_mtx);
1711 ccb->ccb_cookie = NULL;
1712 cv_signal(&ccb->ccb_cv);
1713 mutex_exit(&ccb->ccb_mtx);
1714 }
1715
1716 int
1717 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1718 void *buf, size_t len, mfii_direction_t dir, bool poll)
1719 {
1720 struct mfii_ccb *ccb;
1721 int rv;
1722
1723 KASSERT(mutex_owned(&sc->sc_lock));
1724 if (!sc->sc_running)
1725 return EAGAIN;
1726
1727 ccb = mfii_get_ccb(sc);
1728 if (ccb == NULL)
1729 return (ENOMEM);
1730
1731 mfii_scrub_ccb(ccb);
1732 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1733 mfii_put_ccb(sc, ccb);
1734
1735 return (rv);
1736 }
1737
1738 int
1739 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1740 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1741 bool poll)
1742 {
1743 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1744 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1745 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1746 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1747 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1748 int rv = EIO;
1749
1750 if (cold)
1751 poll = true;
1752
1753 ccb->ccb_data = buf;
1754 ccb->ccb_len = len;
1755 ccb->ccb_direction = dir;
1756 switch (dir) {
1757 case MFII_DATA_IN:
1758 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1759 break;
1760 case MFII_DATA_OUT:
1761 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1762 break;
1763 case MFII_DATA_NONE:
1764 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1765 break;
1766 }
1767
1768 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1769 rv = ENOMEM;
1770 goto done;
1771 }
1772
1773 hdr->mfh_cmd = MFI_CMD_DCMD;
1774 hdr->mfh_context = ccb->ccb_smid;
1775 hdr->mfh_data_len = htole32(len);
1776 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1777 KASSERT(!ccb->ccb_dma64);
1778
1779 dcmd->mdf_opcode = opc;
1780 /* handle special opcodes */
1781 if (mbox != NULL)
1782 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1783
1784 io->function = MFII_FUNCTION_PASSTHRU_IO;
1785 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1786 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1787
1788 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1789 sge->sg_len = htole32(MFI_FRAME_SIZE);
1790 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1791
1792 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1793 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1794
1795 if (poll) {
1796 ccb->ccb_done = mfii_empty_done;
1797 mfii_poll(sc, ccb);
1798 } else
1799 mfii_exec(sc, ccb);
1800
1801 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1802 rv = 0;
1803 }
1804
1805 done:
1806 return (rv);
1807 }
1808
1809 void
1810 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1811 {
1812 return;
1813 }
1814
1815 int
1816 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1817 void *sglp, int nosleep)
1818 {
1819 union mfi_sgl *sgl = sglp;
1820 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1821 int error;
1822 int i;
1823
1824 KASSERT(!ccb->ccb_dma64);
1825 if (ccb->ccb_len == 0)
1826 return (0);
1827
1828 error = bus_dmamap_load(sc->sc_dmat, dmap,
1829 ccb->ccb_data, ccb->ccb_len, NULL,
1830 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1831 if (error) {
1832 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1833 return (1);
1834 }
1835
1836 for (i = 0; i < dmap->dm_nsegs; i++) {
1837 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1838 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1839 }
1840
1841 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1842 ccb->ccb_direction == MFII_DATA_OUT ?
1843 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1844
1845 return (0);
1846 }
1847
1848 void
1849 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1850 {
1851 u_long *r = (u_long *)&ccb->ccb_req;
1852
1853 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1854 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1855 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1856
1857 #if defined(__LP64__) && 0
1858 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1859 #else
1860 mutex_enter(&sc->sc_post_mtx);
1861 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1862 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1863 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1864
1865 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1866 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1867 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1868 mutex_exit(&sc->sc_post_mtx);
1869 #endif
1870 }
1871
1872 void
1873 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1874 {
1875 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1876 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1877 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1878
1879 if (ccb->ccb_sgl_len > 0) {
1880 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1881 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1882 BUS_DMASYNC_POSTWRITE);
1883 }
1884
1885 if (ccb->ccb_dma64) {
1886 KASSERT(ccb->ccb_len > 0);
1887 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1888 0, ccb->ccb_dmamap64->dm_mapsize,
1889 (ccb->ccb_direction == MFII_DATA_IN) ?
1890 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1891
1892 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1893 } else if (ccb->ccb_len > 0) {
1894 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1895 0, ccb->ccb_dmamap32->dm_mapsize,
1896 (ccb->ccb_direction == MFII_DATA_IN) ?
1897 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1898
1899 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1900 }
1901
1902 ccb->ccb_done(sc, ccb);
1903 }
1904
1905 int
1906 mfii_initialise_firmware(struct mfii_softc *sc)
1907 {
1908 struct mpii_msg_iocinit_request *iiq;
1909 struct mfii_dmamem *m;
1910 struct mfii_ccb *ccb;
1911 struct mfi_init_frame *init;
1912 int rv;
1913
1914 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1915 if (m == NULL)
1916 return (1);
1917
1918 iiq = MFII_DMA_KVA(m);
1919 memset(iiq, 0, sizeof(*iiq));
1920
1921 iiq->function = MPII_FUNCTION_IOC_INIT;
1922 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1923
1924 iiq->msg_version_maj = 0x02;
1925 iiq->msg_version_min = 0x00;
1926 iiq->hdr_version_unit = 0x10;
1927 iiq->hdr_version_dev = 0x0;
1928
1929 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1930
1931 iiq->reply_descriptor_post_queue_depth =
1932 htole16(sc->sc_reply_postq_depth);
1933 iiq->reply_free_queue_depth = htole16(0);
1934
1935 iiq->sense_buffer_address_high = htole32(
1936 MFII_DMA_DVA(sc->sc_sense) >> 32);
1937
1938 iiq->reply_descriptor_post_queue_address = htole64(
1939 MFII_DMA_DVA(sc->sc_reply_postq));
1940
1941 iiq->system_request_frame_base_address =
1942 htole64(MFII_DMA_DVA(sc->sc_requests));
1943
1944 iiq->timestamp = htole64(time_uptime);
1945
1946 ccb = mfii_get_ccb(sc);
1947 if (ccb == NULL) {
1948 /* shouldn't ever run out of ccbs during attach */
1949 return (1);
1950 }
1951 mfii_scrub_ccb(ccb);
1952 init = ccb->ccb_request;
1953
1954 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1955 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1956 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1957 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1958
1959 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1960 0, MFII_DMA_LEN(sc->sc_reply_postq),
1961 BUS_DMASYNC_PREREAD);
1962
1963 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1964 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1965
1966 rv = mfii_mfa_poll(sc, ccb);
1967
1968 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1969 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1970
1971 mfii_put_ccb(sc, ccb);
1972 mfii_dmamem_free(sc, m);
1973
1974 return (rv);
1975 }
1976
1977 int
1978 mfii_my_intr(struct mfii_softc *sc)
1979 {
1980 u_int32_t status;
1981
1982 status = mfii_read(sc, MFI_OSTS);
1983
1984 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1985 if (ISSET(status, 0x1)) {
1986 mfii_write(sc, MFI_OSTS, status);
1987 return (1);
1988 }
1989
1990 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1991 }
1992
1993 int
1994 mfii_intr(void *arg)
1995 {
1996 struct mfii_softc *sc = arg;
1997
1998 if (!mfii_my_intr(sc))
1999 return (0);
2000
2001 mfii_postq(sc);
2002
2003 return (1);
2004 }
2005
2006 void
2007 mfii_postq(struct mfii_softc *sc)
2008 {
2009 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2010 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2011 struct mpii_reply_descr *rdp;
2012 struct mfii_ccb *ccb;
2013 int rpi = 0;
2014
2015 mutex_enter(&sc->sc_reply_postq_mtx);
2016
2017 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2018 0, MFII_DMA_LEN(sc->sc_reply_postq),
2019 BUS_DMASYNC_POSTREAD);
2020
2021 for (;;) {
2022 rdp = &postq[sc->sc_reply_postq_index];
2023 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2024 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2025 rdp->data == 0xffffffff);
2026 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2027 MPII_REPLY_DESCR_UNUSED)
2028 break;
2029 if (rdp->data == 0xffffffff) {
2030 /*
2031 * ioc is still writing to the reply post queue
2032 * race condition - bail!
2033 */
2034 break;
2035 }
2036
2037 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2038 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2039 memset(rdp, 0xff, sizeof(*rdp));
2040
2041 sc->sc_reply_postq_index++;
2042 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2043 rpi = 1;
2044 }
2045
2046 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2047 0, MFII_DMA_LEN(sc->sc_reply_postq),
2048 BUS_DMASYNC_PREREAD);
2049
2050 if (rpi)
2051 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2052
2053 mutex_exit(&sc->sc_reply_postq_mtx);
2054
2055 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2056 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2057 mfii_done(sc, ccb);
2058 }
2059 }
2060
2061 void
2062 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2063 void *arg)
2064 {
2065 struct scsipi_periph *periph;
2066 struct scsipi_xfer *xs;
2067 struct scsipi_adapter *adapt = chan->chan_adapter;
2068 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2069 struct mfii_ccb *ccb;
2070 int timeout;
2071 int target;
2072
2073 switch(req) {
2074 case ADAPTER_REQ_GROW_RESOURCES:
2075 /* Not supported. */
2076 return;
2077 case ADAPTER_REQ_SET_XFER_MODE:
2078 {
2079 struct scsipi_xfer_mode *xm = arg;
2080 xm->xm_mode = PERIPH_CAP_TQING;
2081 xm->xm_period = 0;
2082 xm->xm_offset = 0;
2083 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2084 return;
2085 }
2086 case ADAPTER_REQ_RUN_XFER:
2087 break;
2088 }
2089
2090 xs = arg;
2091 periph = xs->xs_periph;
2092 target = periph->periph_target;
2093
2094 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2095 periph->periph_lun != 0) {
2096 xs->error = XS_SELTIMEOUT;
2097 scsipi_done(xs);
2098 return;
2099 }
2100
2101 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2102 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2103 /* the cache is stable storage, don't flush */
2104 xs->error = XS_NOERROR;
2105 xs->status = SCSI_OK;
2106 xs->resid = 0;
2107 scsipi_done(xs);
2108 return;
2109 }
2110
2111 ccb = mfii_get_ccb(sc);
2112 if (ccb == NULL) {
2113 xs->error = XS_RESOURCE_SHORTAGE;
2114 scsipi_done(xs);
2115 return;
2116 }
2117 mfii_scrub_ccb(ccb);
2118 ccb->ccb_cookie = xs;
2119 ccb->ccb_done = mfii_scsi_cmd_done;
2120 ccb->ccb_data = xs->data;
2121 ccb->ccb_len = xs->datalen;
2122
2123 timeout = mstohz(xs->timeout);
2124 if (timeout == 0)
2125 timeout = 1;
2126 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2127
2128 switch (xs->cmd->opcode) {
2129 case SCSI_READ_6_COMMAND:
2130 case READ_10:
2131 case READ_12:
2132 case READ_16:
2133 case SCSI_WRITE_6_COMMAND:
2134 case WRITE_10:
2135 case WRITE_12:
2136 case WRITE_16:
2137 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2138 goto stuffup;
2139 break;
2140
2141 default:
2142 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2143 goto stuffup;
2144 break;
2145 }
2146
2147 xs->error = XS_NOERROR;
2148 xs->resid = 0;
2149
2150 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2151 xs->cmd->opcode);
2152
2153 if (xs->xs_control & XS_CTL_POLL) {
2154 if (mfii_poll(sc, ccb) != 0)
2155 goto stuffup;
2156 return;
2157 }
2158
2159 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2160 mfii_start(sc, ccb);
2161
2162 return;
2163
2164 stuffup:
2165 xs->error = XS_DRIVER_STUFFUP;
2166 scsipi_done(xs);
2167 mfii_put_ccb(sc, ccb);
2168 }
2169
2170 void
2171 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2172 {
2173 struct scsipi_xfer *xs = ccb->ccb_cookie;
2174 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2175 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2176 u_int refs = 2;
2177
2178 if (callout_stop(&xs->xs_callout) != 0)
2179 refs = 1;
2180
2181 switch (ctx->status) {
2182 case MFI_STAT_OK:
2183 break;
2184
2185 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2186 xs->error = XS_SENSE;
2187 memset(&xs->sense, 0, sizeof(xs->sense));
2188 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2189 break;
2190
2191 case MFI_STAT_LD_OFFLINE:
2192 case MFI_STAT_DEVICE_NOT_FOUND:
2193 xs->error = XS_SELTIMEOUT;
2194 break;
2195
2196 default:
2197 xs->error = XS_DRIVER_STUFFUP;
2198 break;
2199 }
2200
2201 if (atomic_add_int_nv(&ccb->ccb_refcnt, -refs) == 0) {
2202 scsipi_done(xs);
2203 mfii_put_ccb(sc, ccb);
2204 }
2205 }
2206
2207 int
2208 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2209 struct scsipi_xfer *xs)
2210 {
2211 struct scsipi_periph *periph = xs->xs_periph;
2212 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2213 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2214 int segs;
2215
2216 io->dev_handle = htole16(periph->periph_target);
2217 io->function = MFII_FUNCTION_LDIO_REQUEST;
2218 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2219 io->sgl_flags = htole16(0x02); /* XXX */
2220 io->sense_buffer_length = sizeof(xs->sense);
2221 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2222 io->data_length = htole32(xs->datalen);
2223 io->io_flags = htole16(xs->cmdlen);
2224 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2225 case XS_CTL_DATA_IN:
2226 ccb->ccb_direction = MFII_DATA_IN;
2227 io->direction = MPII_SCSIIO_DIR_READ;
2228 break;
2229 case XS_CTL_DATA_OUT:
2230 ccb->ccb_direction = MFII_DATA_OUT;
2231 io->direction = MPII_SCSIIO_DIR_WRITE;
2232 break;
2233 default:
2234 ccb->ccb_direction = MFII_DATA_NONE;
2235 io->direction = MPII_SCSIIO_DIR_NONE;
2236 break;
2237 }
2238 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2239
2240 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2241 ctx->timeout_value = htole16(0x14); /* XXX */
2242 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2243 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2244
2245 if (mfii_load_ccb(sc, ccb, ctx + 1,
2246 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2247 return (1);
2248
2249 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2250 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2251 switch (sc->sc_iop->num_sge_loc) {
2252 case MFII_IOP_NUM_SGE_LOC_ORIG:
2253 ctx->num_sge = segs;
2254 break;
2255 case MFII_IOP_NUM_SGE_LOC_35:
2256 /* 12 bit field, but we're only using the lower 8 */
2257 ctx->span_arm = segs;
2258 break;
2259 }
2260
2261 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2262 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2263
2264 return (0);
2265 }
2266
2267 int
2268 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2269 struct scsipi_xfer *xs)
2270 {
2271 struct scsipi_periph *periph = xs->xs_periph;
2272 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2273 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2274
2275 io->dev_handle = htole16(periph->periph_target);
2276 io->function = MFII_FUNCTION_LDIO_REQUEST;
2277 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2278 io->sgl_flags = htole16(0x02); /* XXX */
2279 io->sense_buffer_length = sizeof(xs->sense);
2280 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2281 io->data_length = htole32(xs->datalen);
2282 io->io_flags = htole16(xs->cmdlen);
2283 io->lun[0] = htobe16(periph->periph_lun);
2284 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2285 case XS_CTL_DATA_IN:
2286 ccb->ccb_direction = MFII_DATA_IN;
2287 io->direction = MPII_SCSIIO_DIR_READ;
2288 break;
2289 case XS_CTL_DATA_OUT:
2290 ccb->ccb_direction = MFII_DATA_OUT;
2291 io->direction = MPII_SCSIIO_DIR_WRITE;
2292 break;
2293 default:
2294 ccb->ccb_direction = MFII_DATA_NONE;
2295 io->direction = MPII_SCSIIO_DIR_NONE;
2296 break;
2297 }
2298 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2299
2300 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2301
2302 if (mfii_load_ccb(sc, ccb, ctx + 1,
2303 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2304 return (1);
2305
2306 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2307 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2308
2309 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2310 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2311
2312 return (0);
2313 }
2314
2315 #if 0
2316 void
2317 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2318 {
2319 struct scsi_link *link = xs->sc_link;
2320 struct mfii_softc *sc = link->adapter_softc;
2321 struct mfii_ccb *ccb = xs->io;
2322
2323 mfii_scrub_ccb(ccb);
2324 ccb->ccb_cookie = xs;
2325 ccb->ccb_done = mfii_scsi_cmd_done;
2326 ccb->ccb_data = xs->data;
2327 ccb->ccb_len = xs->datalen;
2328
2329 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2330
2331 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2332 if (xs->error != XS_NOERROR)
2333 goto done;
2334
2335 xs->resid = 0;
2336
2337 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2338 if (mfii_poll(sc, ccb) != 0)
2339 goto stuffup;
2340 return;
2341 }
2342
2343 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2344 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2345 mfii_start(sc, ccb);
2346
2347 return;
2348
2349 stuffup:
2350 xs->error = XS_DRIVER_STUFFUP;
2351 done:
2352 scsi_done(xs);
2353 }
2354
2355 int
2356 mfii_pd_scsi_probe(struct scsi_link *link)
2357 {
2358 struct mfii_softc *sc = link->adapter_softc;
2359 struct mfi_pd_details mpd;
2360 union mfi_mbox mbox;
2361 int rv;
2362
2363 if (link->lun > 0)
2364 return (0);
2365
2366 memset(&mbox, 0, sizeof(mbox));
2367 mbox.s[0] = htole16(link->target);
2368
2369 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2370 MFII_DATA_IN, true);
2371 if (rv != 0)
2372 return (EIO);
2373
2374 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2375 return (ENXIO);
2376
2377 return (0);
2378 }
2379
2380 int
2381 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2382 struct scsipi_xfer *xs)
2383 {
2384 struct scsi_link *link = xs->sc_link;
2385 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2386 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2387 uint16_t dev_handle;
2388
2389 dev_handle = mfii_dev_handle(sc, link->target);
2390 if (dev_handle == htole16(0xffff))
2391 return (XS_SELTIMEOUT);
2392
2393 io->dev_handle = dev_handle;
2394 io->function = 0;
2395 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2396 io->sgl_flags = htole16(0x02); /* XXX */
2397 io->sense_buffer_length = sizeof(xs->sense);
2398 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2399 io->data_length = htole32(xs->datalen);
2400 io->io_flags = htole16(xs->cmdlen);
2401 io->lun[0] = htobe16(link->lun);
2402 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2403 case XS_CTL_DATA_IN:
2404 ccb->ccb_direction = MFII_DATA_IN;
2405 io->direction = MPII_SCSIIO_DIR_READ;
2406 break;
2407 case XS_CTL_DATA_OUT:
2408 ccb->ccb_direction = MFII_DATA_OUT;
2409 io->direction = MPII_SCSIIO_DIR_WRITE;
2410 break;
2411 default:
2412 ccb->ccb_direction = MFII_DATA_NONE;
2413 io->direction = MPII_SCSIIO_DIR_NONE;
2414 break;
2415 }
2416 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2417
2418 ctx->virtual_disk_target_id = htole16(link->target);
2419 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2420 ctx->timeout_value = sc->sc_pd->pd_timeout;
2421
2422 if (mfii_load_ccb(sc, ccb, ctx + 1,
2423 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2424 return (XS_DRIVER_STUFFUP);
2425
2426 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2427 KASSERT(ccb->ccb_dma64);
2428
2429 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2430 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2431 ccb->ccb_req.dev_handle = dev_handle;
2432
2433 return (XS_NOERROR);
2434 }
2435 #endif
2436
2437 int
2438 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2439 int nosleep)
2440 {
2441 struct mpii_msg_request *req = ccb->ccb_request;
2442 struct mfii_sge *sge = NULL, *nsge = sglp;
2443 struct mfii_sge *ce = NULL;
2444 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2445 u_int space;
2446 int i;
2447
2448 int error;
2449
2450 if (ccb->ccb_len == 0)
2451 return (0);
2452
2453 ccb->ccb_dma64 = true;
2454 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2455 ccb->ccb_data, ccb->ccb_len, NULL,
2456 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2457 if (error) {
2458 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2459 return (1);
2460 }
2461
2462 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2463 sizeof(*nsge);
2464 if (dmap->dm_nsegs > space) {
2465 space--;
2466
2467 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2468 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2469
2470 ce = nsge + space;
2471 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2472 ce->sg_len = htole32(ccb->ccb_sgl_len);
2473 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2474
2475 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2476 }
2477
2478 for (i = 0; i < dmap->dm_nsegs; i++) {
2479 if (nsge == ce)
2480 nsge = ccb->ccb_sgl;
2481
2482 sge = nsge;
2483
2484 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2485 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2486 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2487
2488 nsge = sge + 1;
2489 }
2490 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2491
2492 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2493 ccb->ccb_direction == MFII_DATA_OUT ?
2494 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2495
2496 if (ccb->ccb_sgl_len > 0) {
2497 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2498 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2499 BUS_DMASYNC_PREWRITE);
2500 }
2501
2502 return (0);
2503 }
2504
2505 void
2506 mfii_scsi_cmd_tmo(void *p)
2507 {
2508 struct mfii_ccb *ccb = p;
2509 struct mfii_softc *sc = ccb->ccb_sc;
2510 bool start_abort;
2511
2512 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2513
2514 mutex_enter(&sc->sc_abort_mtx);
2515 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2516 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2517 if (start_abort)
2518 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2519 mutex_exit(&sc->sc_abort_mtx);
2520 }
2521
2522 void
2523 mfii_abort_task(struct work *wk, void *scp)
2524 {
2525 struct mfii_softc *sc = scp;
2526 struct mfii_ccb *list;
2527
2528 mutex_enter(&sc->sc_abort_mtx);
2529 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2530 SIMPLEQ_INIT(&sc->sc_abort_list);
2531 mutex_exit(&sc->sc_abort_mtx);
2532
2533 while (list != NULL) {
2534 struct mfii_ccb *ccb = list;
2535 struct scsipi_xfer *xs = ccb->ccb_cookie;
2536 struct scsipi_periph *periph = xs->xs_periph;
2537 struct mfii_ccb *accb;
2538
2539 list = SIMPLEQ_NEXT(ccb, ccb_link);
2540
2541 if (!sc->sc_ld[periph->periph_target].ld_present) {
2542 /* device is gone */
2543 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2544 scsipi_done(xs);
2545 mfii_put_ccb(sc, ccb);
2546 }
2547 continue;
2548 }
2549
2550 accb = mfii_get_ccb(sc);
2551 mfii_scrub_ccb(accb);
2552 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2553 MPII_SCSI_TASK_ABORT_TASK,
2554 htole32(MFII_TASK_MGMT_FLAGS_PD));
2555
2556 accb->ccb_cookie = ccb;
2557 accb->ccb_done = mfii_scsi_cmd_abort_done;
2558
2559 mfii_start(sc, accb);
2560 }
2561 }
2562
2563 void
2564 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2565 uint16_t smid, uint8_t type, uint32_t flags)
2566 {
2567 struct mfii_task_mgmt *msg;
2568 struct mpii_msg_scsi_task_request *req;
2569
2570 msg = accb->ccb_request;
2571 req = &msg->mpii_request;
2572 req->dev_handle = dev_handle;
2573 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2574 req->task_type = type;
2575 req->task_mid = htole16( smid);
2576 msg->flags = flags;
2577
2578 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2579 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2580 }
2581
2582 void
2583 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2584 {
2585 struct mfii_ccb *ccb = accb->ccb_cookie;
2586 struct scsipi_xfer *xs = ccb->ccb_cookie;
2587
2588 /* XXX check accb completion? */
2589
2590 mfii_put_ccb(sc, accb);
2591
2592 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2593 xs->error = XS_TIMEOUT;
2594 scsipi_done(xs);
2595 mfii_put_ccb(sc, ccb);
2596 }
2597 }
2598
2599 struct mfii_ccb *
2600 mfii_get_ccb(struct mfii_softc *sc)
2601 {
2602 struct mfii_ccb *ccb;
2603
2604 mutex_enter(&sc->sc_ccb_mtx);
2605 if (!sc->sc_running) {
2606 ccb = NULL;
2607 } else {
2608 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2609 if (ccb != NULL)
2610 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2611 }
2612 mutex_exit(&sc->sc_ccb_mtx);
2613 return (ccb);
2614 }
2615
2616 void
2617 mfii_scrub_ccb(struct mfii_ccb *ccb)
2618 {
2619 ccb->ccb_cookie = NULL;
2620 ccb->ccb_done = NULL;
2621 ccb->ccb_flags = 0;
2622 ccb->ccb_data = NULL;
2623 ccb->ccb_direction = MFII_DATA_NONE;
2624 ccb->ccb_dma64 = false;
2625 ccb->ccb_len = 0;
2626 ccb->ccb_sgl_len = 0;
2627 ccb->ccb_refcnt = 1;
2628
2629 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2630 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2631 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2632 }
2633
2634 void
2635 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2636 {
2637 mutex_enter(&sc->sc_ccb_mtx);
2638 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2639 mutex_exit(&sc->sc_ccb_mtx);
2640 }
2641
2642 int
2643 mfii_init_ccb(struct mfii_softc *sc)
2644 {
2645 struct mfii_ccb *ccb;
2646 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2647 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2648 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2649 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2650 u_int i;
2651 int error;
2652
2653 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2654 M_DEVBUF, M_WAITOK|M_ZERO);
2655
2656 for (i = 0; i < sc->sc_max_cmds; i++) {
2657 ccb = &sc->sc_ccb[i];
2658 ccb->ccb_sc = sc;
2659
2660 /* create a dma map for transfer */
2661 error = bus_dmamap_create(sc->sc_dmat,
2662 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2663 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2664 if (error) {
2665 printf("%s: cannot create ccb dmamap32 (%d)\n",
2666 DEVNAME(sc), error);
2667 goto destroy;
2668 }
2669 error = bus_dmamap_create(sc->sc_dmat64,
2670 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2671 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2672 if (error) {
2673 printf("%s: cannot create ccb dmamap64 (%d)\n",
2674 DEVNAME(sc), error);
2675 goto destroy32;
2676 }
2677
2678 /* select i + 1'th request. 0 is reserved for events */
2679 ccb->ccb_smid = i + 1;
2680 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2681 ccb->ccb_request = request + ccb->ccb_request_offset;
2682 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2683 ccb->ccb_request_offset;
2684
2685 /* select i'th MFI command frame */
2686 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2687 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2688 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2689 ccb->ccb_mfi_offset;
2690
2691 /* select i'th sense */
2692 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2693 ccb->ccb_sense = (struct mfi_sense *)(sense +
2694 ccb->ccb_sense_offset);
2695 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2696 ccb->ccb_sense_offset;
2697
2698 /* select i'th sgl */
2699 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2700 sc->sc_max_sgl * i;
2701 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2702 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2703 ccb->ccb_sgl_offset;
2704
2705 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2706 cv_init(&ccb->ccb_cv, "mfiiexec");
2707
2708 /* add ccb to queue */
2709 mfii_put_ccb(sc, ccb);
2710 }
2711
2712 return (0);
2713
2714 destroy32:
2715 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2716 destroy:
2717 /* free dma maps and ccb memory */
2718 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2719 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2720 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2721 }
2722
2723 free(sc->sc_ccb, M_DEVBUF);
2724
2725 return (1);
2726 }
2727
2728 #if NBIO > 0
2729 int
2730 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2731 {
2732 struct mfii_softc *sc = device_private(dev);
2733 int error = 0;
2734
2735 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2736
2737 mutex_enter(&sc->sc_lock);
2738
2739 switch (cmd) {
2740 case BIOCINQ:
2741 DNPRINTF(MFII_D_IOCTL, "inq\n");
2742 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2743 break;
2744
2745 case BIOCVOL:
2746 DNPRINTF(MFII_D_IOCTL, "vol\n");
2747 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2748 break;
2749
2750 case BIOCDISK:
2751 DNPRINTF(MFII_D_IOCTL, "disk\n");
2752 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2753 break;
2754
2755 case BIOCALARM:
2756 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2757 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2758 break;
2759
2760 case BIOCBLINK:
2761 DNPRINTF(MFII_D_IOCTL, "blink\n");
2762 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2763 break;
2764
2765 case BIOCSETSTATE:
2766 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2767 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2768 break;
2769
2770 #if 0
2771 case BIOCPATROL:
2772 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2773 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2774 break;
2775 #endif
2776
2777 default:
2778 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2779 error = ENOTTY;
2780 }
2781
2782 mutex_exit(&sc->sc_lock);
2783
2784 return (error);
2785 }
2786
2787 int
2788 mfii_bio_getitall(struct mfii_softc *sc)
2789 {
2790 int i, d, rv = EINVAL;
2791 size_t size;
2792 union mfi_mbox mbox;
2793 struct mfi_conf *cfg = NULL;
2794 struct mfi_ld_details *ld_det = NULL;
2795
2796 /* get info */
2797 if (mfii_get_info(sc)) {
2798 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2799 DEVNAME(sc));
2800 goto done;
2801 }
2802
2803 /* send single element command to retrieve size for full structure */
2804 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2805 if (cfg == NULL)
2806 goto done;
2807 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2808 MFII_DATA_IN, false)) {
2809 free(cfg, M_DEVBUF);
2810 goto done;
2811 }
2812
2813 size = cfg->mfc_size;
2814 free(cfg, M_DEVBUF);
2815
2816 /* memory for read config */
2817 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2818 if (cfg == NULL)
2819 goto done;
2820 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2821 MFII_DATA_IN, false)) {
2822 free(cfg, M_DEVBUF);
2823 goto done;
2824 }
2825
2826 /* replace current pointer with new one */
2827 if (sc->sc_cfg)
2828 free(sc->sc_cfg, M_DEVBUF);
2829 sc->sc_cfg = cfg;
2830
2831 /* get all ld info */
2832 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2833 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2834 goto done;
2835
2836 /* get memory for all ld structures */
2837 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2838 if (sc->sc_ld_sz != size) {
2839 if (sc->sc_ld_details)
2840 free(sc->sc_ld_details, M_DEVBUF);
2841
2842 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2843 if (ld_det == NULL)
2844 goto done;
2845 sc->sc_ld_sz = size;
2846 sc->sc_ld_details = ld_det;
2847 }
2848
2849 /* find used physical disks */
2850 size = sizeof(struct mfi_ld_details);
2851 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2852 memset(&mbox, 0, sizeof(mbox));
2853 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2854 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2855 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2856 goto done;
2857
2858 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2859 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2860 }
2861 sc->sc_no_pd = d;
2862
2863 rv = 0;
2864 done:
2865 return (rv);
2866 }
2867
2868 int
2869 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2870 {
2871 int rv = EINVAL;
2872 struct mfi_conf *cfg = NULL;
2873
2874 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2875
2876 if (mfii_bio_getitall(sc)) {
2877 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2878 DEVNAME(sc));
2879 goto done;
2880 }
2881
2882 /* count unused disks as volumes */
2883 if (sc->sc_cfg == NULL)
2884 goto done;
2885 cfg = sc->sc_cfg;
2886
2887 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2888 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2889 #if notyet
2890 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2891 (bi->bi_nodisk - sc->sc_no_pd);
2892 #endif
2893 /* tell bio who we are */
2894 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2895
2896 rv = 0;
2897 done:
2898 return (rv);
2899 }
2900
2901 int
2902 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2903 {
2904 int i, per, rv = EINVAL;
2905
2906 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2907 DEVNAME(sc), bv->bv_volid);
2908
2909 /* we really could skip and expect that inq took care of it */
2910 if (mfii_bio_getitall(sc)) {
2911 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2912 DEVNAME(sc));
2913 goto done;
2914 }
2915
2916 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2917 /* go do hotspares & unused disks */
2918 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2919 goto done;
2920 }
2921
2922 i = bv->bv_volid;
2923 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2924 sizeof(bv->bv_dev));
2925
2926 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2927 case MFI_LD_OFFLINE:
2928 bv->bv_status = BIOC_SVOFFLINE;
2929 break;
2930
2931 case MFI_LD_PART_DEGRADED:
2932 case MFI_LD_DEGRADED:
2933 bv->bv_status = BIOC_SVDEGRADED;
2934 break;
2935
2936 case MFI_LD_ONLINE:
2937 bv->bv_status = BIOC_SVONLINE;
2938 break;
2939
2940 default:
2941 bv->bv_status = BIOC_SVINVALID;
2942 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2943 DEVNAME(sc),
2944 sc->sc_ld_list.mll_list[i].mll_state);
2945 }
2946
2947 /* additional status can modify MFI status */
2948 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2949 case MFI_LD_PROG_CC:
2950 case MFI_LD_PROG_BGI:
2951 bv->bv_status = BIOC_SVSCRUB;
2952 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2953 bv->bv_percent = (per * 100) / 0xffff;
2954 bv->bv_seconds =
2955 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2956 break;
2957
2958 case MFI_LD_PROG_FGI:
2959 case MFI_LD_PROG_RECONSTRUCT:
2960 /* nothing yet */
2961 break;
2962 }
2963
2964 #if 0
2965 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2966 bv->bv_cache = BIOC_CVWRITEBACK;
2967 else
2968 bv->bv_cache = BIOC_CVWRITETHROUGH;
2969 #endif
2970
2971 /*
2972 * The RAID levels are determined per the SNIA DDF spec, this is only
2973 * a subset that is valid for the MFI controller.
2974 */
2975 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2976 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2977 bv->bv_level *= 10;
2978
2979 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2980 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2981
2982 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2983
2984 rv = 0;
2985 done:
2986 return (rv);
2987 }
2988
2989 int
2990 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2991 {
2992 struct mfi_conf *cfg;
2993 struct mfi_array *ar;
2994 struct mfi_ld_cfg *ld;
2995 struct mfi_pd_details *pd;
2996 struct mfi_pd_list *pl;
2997 struct scsipi_inquiry_data *inqbuf;
2998 char vend[8+16+4+1], *vendp;
2999 int i, rv = EINVAL;
3000 int arr, vol, disk, span;
3001 union mfi_mbox mbox;
3002
3003 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3004 DEVNAME(sc), bd->bd_diskid);
3005
3006 /* we really could skip and expect that inq took care of it */
3007 if (mfii_bio_getitall(sc)) {
3008 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3009 DEVNAME(sc));
3010 return (rv);
3011 }
3012 cfg = sc->sc_cfg;
3013
3014 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3015 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3016
3017 ar = cfg->mfc_array;
3018 vol = bd->bd_volid;
3019 if (vol >= cfg->mfc_no_ld) {
3020 /* do hotspares */
3021 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3022 goto freeme;
3023 }
3024
3025 /* calculate offset to ld structure */
3026 ld = (struct mfi_ld_cfg *)(
3027 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3028 cfg->mfc_array_size * cfg->mfc_no_array);
3029
3030 /* use span 0 only when raid group is not spanned */
3031 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3032 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3033 else
3034 span = 0;
3035 arr = ld[vol].mlc_span[span].mls_index;
3036
3037 /* offset disk into pd list */
3038 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3039
3040 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3041 /* disk is missing but succeed command */
3042 bd->bd_status = BIOC_SDFAILED;
3043 rv = 0;
3044
3045 /* try to find an unused disk for the target to rebuild */
3046 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3047 MFII_DATA_IN, false))
3048 goto freeme;
3049
3050 for (i = 0; i < pl->mpl_no_pd; i++) {
3051 if (pl->mpl_address[i].mpa_scsi_type != 0)
3052 continue;
3053
3054 memset(&mbox, 0, sizeof(mbox));
3055 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3056 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3057 pd, sizeof(*pd), MFII_DATA_IN, false))
3058 continue;
3059
3060 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3061 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3062 break;
3063 }
3064
3065 if (i == pl->mpl_no_pd)
3066 goto freeme;
3067 } else {
3068 memset(&mbox, 0, sizeof(mbox));
3069 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3070 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3071 MFII_DATA_IN, false)) {
3072 bd->bd_status = BIOC_SDINVALID;
3073 goto freeme;
3074 }
3075 }
3076
3077 /* get the remaining fields */
3078 bd->bd_channel = pd->mpd_enc_idx;
3079 bd->bd_target = pd->mpd_enc_slot;
3080
3081 /* get status */
3082 switch (pd->mpd_fw_state){
3083 case MFI_PD_UNCONFIG_GOOD:
3084 case MFI_PD_UNCONFIG_BAD:
3085 bd->bd_status = BIOC_SDUNUSED;
3086 break;
3087
3088 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3089 bd->bd_status = BIOC_SDHOTSPARE;
3090 break;
3091
3092 case MFI_PD_OFFLINE:
3093 bd->bd_status = BIOC_SDOFFLINE;
3094 break;
3095
3096 case MFI_PD_FAILED:
3097 bd->bd_status = BIOC_SDFAILED;
3098 break;
3099
3100 case MFI_PD_REBUILD:
3101 bd->bd_status = BIOC_SDREBUILD;
3102 break;
3103
3104 case MFI_PD_ONLINE:
3105 bd->bd_status = BIOC_SDONLINE;
3106 break;
3107
3108 case MFI_PD_COPYBACK:
3109 case MFI_PD_SYSTEM:
3110 default:
3111 bd->bd_status = BIOC_SDINVALID;
3112 break;
3113 }
3114
3115 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3116
3117 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3118 vendp = inqbuf->vendor;
3119 memcpy(vend, vendp, sizeof vend - 1);
3120 vend[sizeof vend - 1] = '\0';
3121 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3122
3123 /* XXX find a way to retrieve serial nr from drive */
3124 /* XXX find a way to get bd_procdev */
3125
3126 #if 0
3127 mfp = &pd->mpd_progress;
3128 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3129 mp = &mfp->mfp_patrol_read;
3130 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3131 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3132 }
3133 #endif
3134
3135 rv = 0;
3136 freeme:
3137 free(pd, M_DEVBUF);
3138 free(pl, M_DEVBUF);
3139
3140 return (rv);
3141 }
3142
3143 int
3144 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3145 {
3146 uint32_t opc;
3147 int rv = 0;
3148 int8_t ret;
3149 mfii_direction_t dir = MFII_DATA_NONE;
3150
3151 switch(ba->ba_opcode) {
3152 case BIOC_SADISABLE:
3153 opc = MR_DCMD_SPEAKER_DISABLE;
3154 break;
3155
3156 case BIOC_SAENABLE:
3157 opc = MR_DCMD_SPEAKER_ENABLE;
3158 break;
3159
3160 case BIOC_SASILENCE:
3161 opc = MR_DCMD_SPEAKER_SILENCE;
3162 break;
3163
3164 case BIOC_GASTATUS:
3165 opc = MR_DCMD_SPEAKER_GET;
3166 dir = MFII_DATA_IN;
3167 break;
3168
3169 case BIOC_SATEST:
3170 opc = MR_DCMD_SPEAKER_TEST;
3171 break;
3172
3173 default:
3174 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3175 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3176 return (EINVAL);
3177 }
3178
3179 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3180 rv = EINVAL;
3181 else
3182 if (ba->ba_opcode == BIOC_GASTATUS)
3183 ba->ba_status = ret;
3184 else
3185 ba->ba_status = 0;
3186
3187 return (rv);
3188 }
3189
3190 int
3191 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3192 {
3193 int i, found, rv = EINVAL;
3194 union mfi_mbox mbox;
3195 uint32_t cmd;
3196 struct mfi_pd_list *pd;
3197
3198 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3199 bb->bb_status);
3200
3201 /* channel 0 means not in an enclosure so can't be blinked */
3202 if (bb->bb_channel == 0)
3203 return (EINVAL);
3204
3205 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3206
3207 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3208 MFII_DATA_IN, false))
3209 goto done;
3210
3211 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3212 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3213 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3214 found = 1;
3215 break;
3216 }
3217
3218 if (!found)
3219 goto done;
3220
3221 memset(&mbox, 0, sizeof(mbox));
3222 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3223
3224 switch (bb->bb_status) {
3225 case BIOC_SBUNBLINK:
3226 cmd = MR_DCMD_PD_UNBLINK;
3227 break;
3228
3229 case BIOC_SBBLINK:
3230 cmd = MR_DCMD_PD_BLINK;
3231 break;
3232
3233 case BIOC_SBALARM:
3234 default:
3235 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3236 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3237 goto done;
3238 }
3239
3240
3241 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3242 goto done;
3243
3244 rv = 0;
3245 done:
3246 free(pd, M_DEVBUF);
3247 return (rv);
3248 }
3249
3250 static int
3251 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3252 {
3253 struct mfii_foreign_scan_info *fsi;
3254 struct mfi_pd_details *pd;
3255 union mfi_mbox mbox;
3256 int rv;
3257
3258 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3259 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3260
3261 memset(&mbox, 0, sizeof mbox);
3262 mbox.s[0] = pd_id;
3263 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3264 MFII_DATA_IN, false);
3265 if (rv != 0)
3266 goto done;
3267
3268 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3269 mbox.s[0] = pd_id;
3270 mbox.s[1] = pd->mpd_pd.mfp_seq;
3271 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3272 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3273 MFII_DATA_NONE, false);
3274 if (rv != 0)
3275 goto done;
3276 }
3277
3278 memset(&mbox, 0, sizeof mbox);
3279 mbox.s[0] = pd_id;
3280 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3281 MFII_DATA_IN, false);
3282 if (rv != 0)
3283 goto done;
3284
3285 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3286 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3287 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3288 if (rv != 0)
3289 goto done;
3290
3291 if (fsi->count > 0) {
3292 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3293 NULL, 0, MFII_DATA_NONE, false);
3294 if (rv != 0)
3295 goto done;
3296 }
3297 }
3298
3299 memset(&mbox, 0, sizeof mbox);
3300 mbox.s[0] = pd_id;
3301 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3302 MFII_DATA_IN, false);
3303 if (rv != 0)
3304 goto done;
3305
3306 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3307 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3308 rv = ENXIO;
3309
3310 done:
3311 free(fsi, M_DEVBUF);
3312 free(pd, M_DEVBUF);
3313
3314 return (rv);
3315 }
3316
3317 static int
3318 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3319 {
3320 struct mfi_hotspare *hs;
3321 struct mfi_pd_details *pd;
3322 union mfi_mbox mbox;
3323 size_t size;
3324 int rv = EINVAL;
3325
3326 /* we really could skip and expect that inq took care of it */
3327 if (mfii_bio_getitall(sc)) {
3328 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3329 DEVNAME(sc));
3330 return (rv);
3331 }
3332 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3333
3334 hs = malloc(size, M_DEVBUF, M_WAITOK);
3335 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3336
3337 memset(&mbox, 0, sizeof mbox);
3338 mbox.s[0] = pd_id;
3339 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3340 MFII_DATA_IN, false);
3341 if (rv != 0)
3342 goto done;
3343
3344 memset(hs, 0, size);
3345 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3346 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3347 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3348 MFII_DATA_OUT, false);
3349
3350 done:
3351 free(hs, M_DEVBUF);
3352 free(pd, M_DEVBUF);
3353
3354 return (rv);
3355 }
3356
3357 int
3358 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3359 {
3360 struct mfi_pd_details *pd;
3361 struct mfi_pd_list *pl;
3362 int i, found, rv = EINVAL;
3363 union mfi_mbox mbox;
3364
3365 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3366 bs->bs_status);
3367
3368 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3369 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3370
3371 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3372 MFII_DATA_IN, false))
3373 goto done;
3374
3375 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3376 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3377 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3378 found = 1;
3379 break;
3380 }
3381
3382 if (!found)
3383 goto done;
3384
3385 memset(&mbox, 0, sizeof(mbox));
3386 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3387
3388 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3389 MFII_DATA_IN, false))
3390 goto done;
3391
3392 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3393 mbox.s[1] = pd->mpd_pd.mfp_seq;
3394
3395 switch (bs->bs_status) {
3396 case BIOC_SSONLINE:
3397 mbox.b[4] = MFI_PD_ONLINE;
3398 break;
3399
3400 case BIOC_SSOFFLINE:
3401 mbox.b[4] = MFI_PD_OFFLINE;
3402 break;
3403
3404 case BIOC_SSHOTSPARE:
3405 mbox.b[4] = MFI_PD_HOTSPARE;
3406 break;
3407
3408 case BIOC_SSREBUILD:
3409 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3410 if ((rv = mfii_makegood(sc,
3411 pl->mpl_address[i].mpa_pd_id)))
3412 goto done;
3413
3414 if ((rv = mfii_makespare(sc,
3415 pl->mpl_address[i].mpa_pd_id)))
3416 goto done;
3417
3418 memset(&mbox, 0, sizeof(mbox));
3419 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3420 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3421 pd, sizeof(*pd), MFII_DATA_IN, false);
3422 if (rv != 0)
3423 goto done;
3424
3425 /* rebuilding might be started by mfii_makespare() */
3426 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3427 rv = 0;
3428 goto done;
3429 }
3430
3431 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3432 mbox.s[1] = pd->mpd_pd.mfp_seq;
3433 }
3434 mbox.b[4] = MFI_PD_REBUILD;
3435 break;
3436
3437 default:
3438 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3439 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3440 goto done;
3441 }
3442
3443
3444 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3445 MFII_DATA_NONE, false);
3446 done:
3447 free(pd, M_DEVBUF);
3448 free(pl, M_DEVBUF);
3449 return (rv);
3450 }
3451
3452 #if 0
3453 int
3454 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3455 {
3456 uint32_t opc;
3457 int rv = 0;
3458 struct mfi_pr_properties prop;
3459 struct mfi_pr_status status;
3460 uint32_t time, exec_freq;
3461
3462 switch (bp->bp_opcode) {
3463 case BIOC_SPSTOP:
3464 case BIOC_SPSTART:
3465 if (bp->bp_opcode == BIOC_SPSTART)
3466 opc = MR_DCMD_PR_START;
3467 else
3468 opc = MR_DCMD_PR_STOP;
3469 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3470 return (EINVAL);
3471 break;
3472
3473 case BIOC_SPMANUAL:
3474 case BIOC_SPDISABLE:
3475 case BIOC_SPAUTO:
3476 /* Get device's time. */
3477 opc = MR_DCMD_TIME_SECS_GET;
3478 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3479 MFII_DATA_IN, false))
3480 return (EINVAL);
3481
3482 opc = MR_DCMD_PR_GET_PROPERTIES;
3483 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3484 MFII_DATA_IN, false))
3485 return (EINVAL);
3486
3487 switch (bp->bp_opcode) {
3488 case BIOC_SPMANUAL:
3489 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3490 break;
3491 case BIOC_SPDISABLE:
3492 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3493 break;
3494 case BIOC_SPAUTO:
3495 if (bp->bp_autoival != 0) {
3496 if (bp->bp_autoival == -1)
3497 /* continuously */
3498 exec_freq = 0xffffffffU;
3499 else if (bp->bp_autoival > 0)
3500 exec_freq = bp->bp_autoival;
3501 else
3502 return (EINVAL);
3503 prop.exec_freq = exec_freq;
3504 }
3505 if (bp->bp_autonext != 0) {
3506 if (bp->bp_autonext < 0)
3507 return (EINVAL);
3508 else
3509 prop.next_exec = time + bp->bp_autonext;
3510 }
3511 prop.op_mode = MFI_PR_OPMODE_AUTO;
3512 break;
3513 }
3514
3515 opc = MR_DCMD_PR_SET_PROPERTIES;
3516 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3517 MFII_DATA_OUT, false))
3518 return (EINVAL);
3519
3520 break;
3521
3522 case BIOC_GPSTATUS:
3523 opc = MR_DCMD_PR_GET_PROPERTIES;
3524 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3525 MFII_DATA_IN, false))
3526 return (EINVAL);
3527
3528 opc = MR_DCMD_PR_GET_STATUS;
3529 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3530 MFII_DATA_IN, false))
3531 return (EINVAL);
3532
3533 /* Get device's time. */
3534 opc = MR_DCMD_TIME_SECS_GET;
3535 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3536 MFII_DATA_IN, false))
3537 return (EINVAL);
3538
3539 switch (prop.op_mode) {
3540 case MFI_PR_OPMODE_AUTO:
3541 bp->bp_mode = BIOC_SPMAUTO;
3542 bp->bp_autoival = prop.exec_freq;
3543 bp->bp_autonext = prop.next_exec;
3544 bp->bp_autonow = time;
3545 break;
3546 case MFI_PR_OPMODE_MANUAL:
3547 bp->bp_mode = BIOC_SPMMANUAL;
3548 break;
3549 case MFI_PR_OPMODE_DISABLED:
3550 bp->bp_mode = BIOC_SPMDISABLED;
3551 break;
3552 default:
3553 printf("%s: unknown patrol mode %d\n",
3554 DEVNAME(sc), prop.op_mode);
3555 break;
3556 }
3557
3558 switch (status.state) {
3559 case MFI_PR_STATE_STOPPED:
3560 bp->bp_status = BIOC_SPSSTOPPED;
3561 break;
3562 case MFI_PR_STATE_READY:
3563 bp->bp_status = BIOC_SPSREADY;
3564 break;
3565 case MFI_PR_STATE_ACTIVE:
3566 bp->bp_status = BIOC_SPSACTIVE;
3567 break;
3568 case MFI_PR_STATE_ABORTED:
3569 bp->bp_status = BIOC_SPSABORTED;
3570 break;
3571 default:
3572 printf("%s: unknown patrol state %d\n",
3573 DEVNAME(sc), status.state);
3574 break;
3575 }
3576
3577 break;
3578
3579 default:
3580 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3581 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3582 return (EINVAL);
3583 }
3584
3585 return (rv);
3586 }
3587 #endif
3588
3589 int
3590 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3591 {
3592 struct mfi_conf *cfg;
3593 struct mfi_hotspare *hs;
3594 struct mfi_pd_details *pd;
3595 struct bioc_disk *sdhs;
3596 struct bioc_vol *vdhs;
3597 struct scsipi_inquiry_data *inqbuf;
3598 char vend[8+16+4+1], *vendp;
3599 int i, rv = EINVAL;
3600 uint32_t size;
3601 union mfi_mbox mbox;
3602
3603 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3604
3605 if (!bio_hs)
3606 return (EINVAL);
3607
3608 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3609
3610 /* send single element command to retrieve size for full structure */
3611 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3612 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3613 MFII_DATA_IN, false))
3614 goto freeme;
3615
3616 size = cfg->mfc_size;
3617 free(cfg, M_DEVBUF);
3618
3619 /* memory for read config */
3620 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3621 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3622 MFII_DATA_IN, false))
3623 goto freeme;
3624
3625 /* calculate offset to hs structure */
3626 hs = (struct mfi_hotspare *)(
3627 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3628 cfg->mfc_array_size * cfg->mfc_no_array +
3629 cfg->mfc_ld_size * cfg->mfc_no_ld);
3630
3631 if (volid < cfg->mfc_no_ld)
3632 goto freeme; /* not a hotspare */
3633
3634 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3635 goto freeme; /* not a hotspare */
3636
3637 /* offset into hotspare structure */
3638 i = volid - cfg->mfc_no_ld;
3639
3640 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3641 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3642 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3643
3644 /* get pd fields */
3645 memset(&mbox, 0, sizeof(mbox));
3646 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3647 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3648 MFII_DATA_IN, false)) {
3649 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3650 DEVNAME(sc));
3651 goto freeme;
3652 }
3653
3654 switch (type) {
3655 case MFI_MGMT_VD:
3656 vdhs = bio_hs;
3657 vdhs->bv_status = BIOC_SVONLINE;
3658 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3659 vdhs->bv_level = -1; /* hotspare */
3660 vdhs->bv_nodisk = 1;
3661 break;
3662
3663 case MFI_MGMT_SD:
3664 sdhs = bio_hs;
3665 sdhs->bd_status = BIOC_SDHOTSPARE;
3666 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3667 sdhs->bd_channel = pd->mpd_enc_idx;
3668 sdhs->bd_target = pd->mpd_enc_slot;
3669 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3670 vendp = inqbuf->vendor;
3671 memcpy(vend, vendp, sizeof vend - 1);
3672 vend[sizeof vend - 1] = '\0';
3673 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3674 break;
3675
3676 default:
3677 goto freeme;
3678 }
3679
3680 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3681 rv = 0;
3682 freeme:
3683 free(pd, M_DEVBUF);
3684 free(cfg, M_DEVBUF);
3685
3686 return (rv);
3687 }
3688
3689 #endif /* NBIO > 0 */
3690
3691 #define MFI_BBU_SENSORS 4
3692
3693 void
3694 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3695 {
3696 struct mfi_bbu_status bbu;
3697 u_int32_t status;
3698 u_int32_t mask;
3699 u_int32_t soh_bad;
3700 int rv;
3701
3702 mutex_enter(&sc->sc_lock);
3703 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3704 sizeof(bbu), MFII_DATA_IN, false);
3705 mutex_exit(&sc->sc_lock);
3706 if (rv != 0) {
3707 edata->state = ENVSYS_SINVALID;
3708 edata->value_cur = 0;
3709 return;
3710 }
3711
3712 switch (bbu.battery_type) {
3713 case MFI_BBU_TYPE_IBBU:
3714 mask = MFI_BBU_STATE_BAD_IBBU;
3715 soh_bad = 0;
3716 break;
3717 case MFI_BBU_TYPE_BBU:
3718 mask = MFI_BBU_STATE_BAD_BBU;
3719 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3720 break;
3721
3722 case MFI_BBU_TYPE_NONE:
3723 default:
3724 edata->state = ENVSYS_SCRITICAL;
3725 edata->value_cur = 0;
3726 return;
3727 }
3728
3729 status = le32toh(bbu.fw_status) & mask;
3730 switch(edata->sensor) {
3731 case 0:
3732 edata->value_cur = (status || soh_bad) ? 0 : 1;
3733 edata->state =
3734 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3735 return;
3736 case 1:
3737 edata->value_cur = le16toh(bbu.voltage) * 1000;
3738 edata->state = ENVSYS_SVALID;
3739 return;
3740 case 2:
3741 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3742 edata->state = ENVSYS_SVALID;
3743 return;
3744 case 3:
3745 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3746 edata->state = ENVSYS_SVALID;
3747 return;
3748 }
3749 }
3750
3751 void
3752 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3753 {
3754 struct bioc_vol bv;
3755 int error;
3756
3757 memset(&bv, 0, sizeof(bv));
3758 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3759 mutex_enter(&sc->sc_lock);
3760 error = mfii_ioctl_vol(sc, &bv);
3761 mutex_exit(&sc->sc_lock);
3762 if (error)
3763 bv.bv_status = BIOC_SVINVALID;
3764 bio_vol_to_envsys(edata, &bv);
3765 }
3766
3767 void
3768 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3769 {
3770 sensor->units = ENVSYS_DRIVE;
3771 sensor->state = ENVSYS_SINVALID;
3772 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3773 /* Enable monitoring for drive state changes */
3774 sensor->flags |= ENVSYS_FMONSTCHANGED;
3775 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3776 }
3777
3778 static void
3779 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3780 {
3781 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3782 aprint_error_dev(sc->sc_dev,
3783 "failed to attach sensor %s\n", s->desc);
3784 }
3785
3786 int
3787 mfii_create_sensors(struct mfii_softc *sc)
3788 {
3789 int i, rv;
3790 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3791
3792 sc->sc_sme = sysmon_envsys_create();
3793 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3794 M_DEVBUF, M_NOWAIT | M_ZERO);
3795
3796 if (sc->sc_sensors == NULL) {
3797 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3798 return ENOMEM;
3799 }
3800 /* BBU */
3801 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3802 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3803 sc->sc_sensors[0].value_cur = 0;
3804 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3805 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3806 sc->sc_sensors[1].value_cur = 0;
3807 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3808 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3809 sc->sc_sensors[2].value_cur = 0;
3810 sc->sc_sensors[3].units = ENVSYS_STEMP;
3811 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3812 sc->sc_sensors[3].value_cur = 0;
3813
3814 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3815 sc->sc_bbuok = true;
3816 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3817 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3818 "%s BBU state", DEVNAME(sc));
3819 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3820 "%s BBU voltage", DEVNAME(sc));
3821 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3822 "%s BBU current", DEVNAME(sc));
3823 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3824 "%s BBU temperature", DEVNAME(sc));
3825 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3826 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3827 }
3828 }
3829
3830 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3831 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3832 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3833 }
3834
3835 sc->sc_sme->sme_name = DEVNAME(sc);
3836 sc->sc_sme->sme_cookie = sc;
3837 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3838 rv = sysmon_envsys_register(sc->sc_sme);
3839 if (rv) {
3840 aprint_error_dev(sc->sc_dev,
3841 "unable to register with sysmon (rv = %d)\n", rv);
3842 }
3843 return rv;
3844
3845 }
3846
3847 static int
3848 mfii_destroy_sensors(struct mfii_softc *sc)
3849 {
3850 if (sc->sc_sme == NULL)
3851 return 0;
3852 sysmon_envsys_unregister(sc->sc_sme);
3853 sc->sc_sme = NULL;
3854 free(sc->sc_sensors, M_DEVBUF);
3855 return 0;
3856 }
3857
3858 void
3859 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3860 {
3861 struct mfii_softc *sc = sme->sme_cookie;
3862
3863 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3864 return;
3865
3866 if (edata->sensor < MFI_BBU_SENSORS) {
3867 if (sc->sc_bbuok)
3868 mfii_bbu(sc, edata);
3869 } else {
3870 mfii_refresh_ld_sensor(sc, edata);
3871 }
3872 }
3873