mfii.c revision 1.3.2.2 1 /* $NetBSD: mfii.c,v 1.3.2.2 2018/12/07 17:11:37 martin Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.3.2.2 2018/12/07 17:11:37 martin Exp $");
22
23 #include "bio.h"
24
25 #include <sys/atomic.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/buf.h>
29 #include <sys/ioctl.h>
30 #include <sys/device.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/cpu.h>
34 #include <sys/conf.h>
35 #include <sys/kauth.h>
36 #include <sys/workqueue.h>
37 #include <sys/malloc.h>
38
39 #include <uvm/uvm_param.h>
40
41 #include <dev/pci/pcidevs.h>
42 #include <dev/pci/pcivar.h>
43
44 #include <sys/bus.h>
45
46 #include <dev/sysmon/sysmonvar.h>
47 #include <sys/envsys.h>
48
49 #include <dev/scsipi/scsipi_all.h>
50 #include <dev/scsipi/scsi_all.h>
51 #include <dev/scsipi/scsi_spc.h>
52 #include <dev/scsipi/scsipi_disk.h>
53 #include <dev/scsipi/scsi_disk.h>
54 #include <dev/scsipi/scsiconf.h>
55
56 #if NBIO > 0
57 #include <dev/biovar.h>
58 #endif /* NBIO > 0 */
59
60 #include <dev/ic/mfireg.h>
61 #include <dev/pci/mpiireg.h>
62
63 #define MFII_BAR 0x14
64 #define MFII_BAR_35 0x10
65 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
66
67 #define MFII_OSTS_INTR_VALID 0x00000009
68 #define MFII_RPI 0x6c /* reply post host index */
69 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
70 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
71
72 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
73 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
74 #define MFII_REQ_TYPE_MFA (0x1 << 1)
75 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
76 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
77
78 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
79
80 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
81 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
82
83 #define MFII_MAX_CHAIN_UNIT 0x00400000
84 #define MFII_MAX_CHAIN_MASK 0x000003E0
85 #define MFII_MAX_CHAIN_SHIFT 5
86
87 #define MFII_256K_IO 128
88 #define MFII_1MB_IO (MFII_256K_IO * 4)
89
90 #define MFII_CHAIN_FRAME_MIN 1024
91
92 struct mfii_request_descr {
93 u_int8_t flags;
94 u_int8_t msix_index;
95 u_int16_t smid;
96
97 u_int16_t lmid;
98 u_int16_t dev_handle;
99 } __packed;
100
101 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
102 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
103
104 struct mfii_raid_context {
105 u_int8_t type_nseg;
106 u_int8_t _reserved1;
107 u_int16_t timeout_value;
108
109 u_int16_t reg_lock_flags;
110 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
111 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
113 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
114
115 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
116 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
117 u_int16_t virtual_disk_target_id;
118
119 u_int64_t reg_lock_row_lba;
120
121 u_int32_t reg_lock_length;
122
123 u_int16_t next_lm_id;
124 u_int8_t ex_status;
125 u_int8_t status;
126
127 u_int8_t raid_flags;
128 u_int8_t num_sge;
129 u_int16_t config_seq_num;
130
131 u_int8_t span_arm;
132 u_int8_t _reserved3[3];
133 } __packed;
134
135 struct mfii_sge {
136 u_int64_t sg_addr;
137 u_int32_t sg_len;
138 u_int16_t _reserved;
139 u_int8_t sg_next_chain_offset;
140 u_int8_t sg_flags;
141 } __packed;
142
143 #define MFII_SGE_ADDR_MASK (0x03)
144 #define MFII_SGE_ADDR_SYSTEM (0x00)
145 #define MFII_SGE_ADDR_IOCDDR (0x01)
146 #define MFII_SGE_ADDR_IOCPLB (0x02)
147 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
148 #define MFII_SGE_END_OF_LIST (0x40)
149 #define MFII_SGE_CHAIN_ELEMENT (0x80)
150
151 #define MFII_REQUEST_SIZE 256
152
153 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
154
155 #define MFII_MAX_ROW 32
156 #define MFII_MAX_ARRAY 128
157
158 struct mfii_array_map {
159 uint16_t mam_pd[MFII_MAX_ROW];
160 } __packed;
161
162 struct mfii_dev_handle {
163 uint16_t mdh_cur_handle;
164 uint8_t mdh_valid;
165 uint8_t mdh_reserved;
166 uint16_t mdh_handle[2];
167 } __packed;
168
169 struct mfii_ld_map {
170 uint32_t mlm_total_size;
171 uint32_t mlm_reserved1[5];
172 uint32_t mlm_num_lds;
173 uint32_t mlm_reserved2;
174 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
175 uint8_t mlm_pd_timeout;
176 uint8_t mlm_reserved3[7];
177 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
178 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
179 } __packed;
180
181 struct mfii_task_mgmt {
182 union {
183 uint8_t request[128];
184 struct mpii_msg_scsi_task_request
185 mpii_request;
186 } __packed __aligned(8);
187
188 union {
189 uint8_t reply[128];
190 uint32_t flags;
191 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
192 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
193 struct mpii_msg_scsi_task_reply
194 mpii_reply;
195 } __packed __aligned(8);
196 } __packed __aligned(8);
197
198 /* We currently don't know the full details of the following struct */
199 struct mfii_foreign_scan_cfg {
200 char data[24];
201 } __packed;
202
203 struct mfii_foreign_scan_info {
204 uint32_t count; /* Number of foreign configs found */
205 struct mfii_foreign_scan_cfg cfgs[8];
206 } __packed;
207
208 struct mfii_dmamem {
209 bus_dmamap_t mdm_map;
210 bus_dma_segment_t mdm_seg;
211 size_t mdm_size;
212 void * mdm_kva;
213 };
214 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
215 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
216 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
217 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
218
219 struct mfii_softc;
220
221 typedef enum mfii_direction {
222 MFII_DATA_NONE = 0,
223 MFII_DATA_IN,
224 MFII_DATA_OUT
225 } mfii_direction_t;
226
227 struct mfii_ccb {
228 struct mfii_softc *ccb_sc;
229 void *ccb_request;
230 u_int64_t ccb_request_dva;
231 bus_addr_t ccb_request_offset;
232
233 void *ccb_mfi;
234 u_int64_t ccb_mfi_dva;
235 bus_addr_t ccb_mfi_offset;
236
237 struct mfi_sense *ccb_sense;
238 u_int64_t ccb_sense_dva;
239 bus_addr_t ccb_sense_offset;
240
241 struct mfii_sge *ccb_sgl;
242 u_int64_t ccb_sgl_dva;
243 bus_addr_t ccb_sgl_offset;
244 u_int ccb_sgl_len;
245
246 struct mfii_request_descr ccb_req;
247
248 bus_dmamap_t ccb_dmamap64;
249 bus_dmamap_t ccb_dmamap32;
250 bool ccb_dma64;
251
252 /* data for sgl */
253 void *ccb_data;
254 size_t ccb_len;
255
256 mfii_direction_t ccb_direction;
257
258 void *ccb_cookie;
259 kmutex_t ccb_mtx;
260 kcondvar_t ccb_cv;
261 void (*ccb_done)(struct mfii_softc *,
262 struct mfii_ccb *);
263
264 u_int32_t ccb_flags;
265 #define MFI_CCB_F_ERR (1<<0)
266 u_int ccb_smid;
267 u_int ccb_refcnt;
268 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
269 };
270 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
271
272 struct mfii_iop {
273 int bar;
274 int num_sge_loc;
275 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
276 #define MFII_IOP_NUM_SGE_LOC_35 1
277 u_int16_t ldio_ctx_reg_lock_flags;
278 u_int8_t ldio_req_type;
279 u_int8_t ldio_ctx_type_nseg;
280 u_int8_t sge_flag_chain;
281 u_int8_t sge_flag_eol;
282 };
283
284 struct mfii_softc {
285 device_t sc_dev;
286 struct scsipi_channel sc_chan;
287 struct scsipi_adapter sc_adapt;
288
289 const struct mfii_iop *sc_iop;
290
291 pci_chipset_tag_t sc_pc;
292 pcitag_t sc_tag;
293
294 bus_space_tag_t sc_iot;
295 bus_space_handle_t sc_ioh;
296 bus_size_t sc_ios;
297 bus_dma_tag_t sc_dmat;
298 bus_dma_tag_t sc_dmat64;
299 bool sc_64bit_dma;
300
301 void *sc_ih;
302
303 kmutex_t sc_ccb_mtx;
304 kmutex_t sc_post_mtx;
305
306 u_int sc_max_fw_cmds;
307 u_int sc_max_cmds;
308 u_int sc_max_sgl;
309
310 u_int sc_reply_postq_depth;
311 u_int sc_reply_postq_index;
312 kmutex_t sc_reply_postq_mtx;
313 struct mfii_dmamem *sc_reply_postq;
314
315 struct mfii_dmamem *sc_requests;
316 struct mfii_dmamem *sc_mfi;
317 struct mfii_dmamem *sc_sense;
318 struct mfii_dmamem *sc_sgl;
319
320 struct mfii_ccb *sc_ccb;
321 struct mfii_ccb_list sc_ccb_freeq;
322
323 struct mfii_ccb *sc_aen_ccb;
324 struct workqueue *sc_aen_wq;
325 struct work sc_aen_work;
326
327 kmutex_t sc_abort_mtx;
328 struct mfii_ccb_list sc_abort_list;
329 struct workqueue *sc_abort_wq;
330 struct work sc_abort_work;
331
332 /* save some useful information for logical drives that is missing
333 * in sc_ld_list
334 */
335 struct {
336 bool ld_present;
337 char ld_dev[16]; /* device name sd? */
338 } sc_ld[MFI_MAX_LD];
339 int sc_target_lds[MFI_MAX_LD];
340
341 /* bio */
342 struct mfi_conf *sc_cfg;
343 struct mfi_ctrl_info sc_info;
344 struct mfi_ld_list sc_ld_list;
345 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
346 int sc_no_pd; /* used physical disks */
347 int sc_ld_sz; /* sizeof sc_ld_details */
348
349 /* mgmt lock */
350 kmutex_t sc_lock;
351 bool sc_running;
352
353 /* sensors */
354 struct sysmon_envsys *sc_sme;
355 envsys_data_t *sc_sensors;
356 bool sc_bbuok;
357
358 device_t sc_child;
359 };
360
361 // #define MFII_DEBUG
362 #ifdef MFII_DEBUG
363 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
364 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
365 #define MFII_D_CMD 0x0001
366 #define MFII_D_INTR 0x0002
367 #define MFII_D_MISC 0x0004
368 #define MFII_D_DMA 0x0008
369 #define MFII_D_IOCTL 0x0010
370 #define MFII_D_RW 0x0020
371 #define MFII_D_MEM 0x0040
372 #define MFII_D_CCB 0x0080
373 uint32_t mfii_debug = 0
374 /* | MFII_D_CMD */
375 /* | MFII_D_INTR */
376 | MFII_D_MISC
377 /* | MFII_D_DMA */
378 /* | MFII_D_IOCTL */
379 /* | MFII_D_RW */
380 /* | MFII_D_MEM */
381 /* | MFII_D_CCB */
382 ;
383 #else
384 #define DPRINTF(x...)
385 #define DNPRINTF(n,x...)
386 #endif
387
388 int mfii_match(device_t, cfdata_t, void *);
389 void mfii_attach(device_t, device_t, void *);
390 int mfii_detach(device_t, int);
391 int mfii_rescan(device_t, const char *, const int *);
392 void mfii_childdetached(device_t, device_t);
393 static bool mfii_suspend(device_t, const pmf_qual_t *);
394 static bool mfii_resume(device_t, const pmf_qual_t *);
395 static bool mfii_shutdown(device_t, int);
396
397
398 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
399 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
400 mfii_childdetached, DVF_DETACH_SHUTDOWN);
401
402 void mfii_scsipi_request(struct scsipi_channel *,
403 scsipi_adapter_req_t, void *);
404 void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
405
406 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
407
408 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
409 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
410
411 struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
412 void mfii_dmamem_free(struct mfii_softc *,
413 struct mfii_dmamem *);
414
415 struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
416 void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
417 int mfii_init_ccb(struct mfii_softc *);
418 void mfii_scrub_ccb(struct mfii_ccb *);
419
420 int mfii_transition_firmware(struct mfii_softc *);
421 int mfii_initialise_firmware(struct mfii_softc *);
422 int mfii_get_info(struct mfii_softc *);
423
424 void mfii_start(struct mfii_softc *, struct mfii_ccb *);
425 void mfii_done(struct mfii_softc *, struct mfii_ccb *);
426 int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
427 void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
428 int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
429 void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
430 int mfii_my_intr(struct mfii_softc *);
431 int mfii_intr(void *);
432 void mfii_postq(struct mfii_softc *);
433
434 int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
435 void *, int);
436 int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
437 void *, int);
438
439 int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
440
441 int mfii_mgmt(struct mfii_softc *, uint32_t,
442 const union mfi_mbox *, void *, size_t,
443 mfii_direction_t, bool);
444 int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
445 uint32_t, const union mfi_mbox *, void *, size_t,
446 mfii_direction_t, bool);
447 void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
448
449 int mfii_scsi_cmd_io(struct mfii_softc *,
450 struct mfii_ccb *, struct scsipi_xfer *);
451 int mfii_scsi_cmd_cdb(struct mfii_softc *,
452 struct mfii_ccb *, struct scsipi_xfer *);
453 void mfii_scsi_cmd_tmo(void *);
454
455 int mfii_dev_handles_update(struct mfii_softc *sc);
456 void mfii_dev_handles_dtor(void *, void *);
457
458 void mfii_abort_task(struct work *, void *);
459 void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
460 uint16_t, uint16_t, uint8_t, uint32_t);
461 void mfii_scsi_cmd_abort_done(struct mfii_softc *,
462 struct mfii_ccb *);
463
464 int mfii_aen_register(struct mfii_softc *);
465 void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
466 struct mfii_dmamem *, uint32_t);
467 void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
468 void mfii_aen(struct work *, void *);
469 void mfii_aen_unregister(struct mfii_softc *);
470
471 void mfii_aen_pd_insert(struct mfii_softc *,
472 const struct mfi_evtarg_pd_address *);
473 void mfii_aen_pd_remove(struct mfii_softc *,
474 const struct mfi_evtarg_pd_address *);
475 void mfii_aen_pd_state_change(struct mfii_softc *,
476 const struct mfi_evtarg_pd_state *);
477 void mfii_aen_ld_update(struct mfii_softc *);
478
479 #if NBIO > 0
480 int mfii_ioctl(device_t, u_long, void *);
481 int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
482 int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
483 int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
484 int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
485 int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
486 int mfii_ioctl_setstate(struct mfii_softc *,
487 struct bioc_setstate *);
488 int mfii_bio_hs(struct mfii_softc *, int, int, void *);
489 int mfii_bio_getitall(struct mfii_softc *);
490 #endif /* NBIO > 0 */
491
492 #if 0
493 static const char *mfi_bbu_indicators[] = {
494 "pack missing",
495 "voltage low",
496 "temp high",
497 "charge active",
498 "discharge active",
499 "learn cycle req'd",
500 "learn cycle active",
501 "learn cycle failed",
502 "learn cycle timeout",
503 "I2C errors",
504 "replace pack",
505 "low capacity",
506 "periodic learn req'd"
507 };
508 #endif
509
510 void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
511 void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
512 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
513 int mfii_create_sensors(struct mfii_softc *);
514 static int mfii_destroy_sensors(struct mfii_softc *);
515 void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
516 void mfii_bbu(struct mfii_softc *, envsys_data_t *);
517
518 /*
519 * mfii boards support asynchronous (and non-polled) completion of
520 * dcmds by proxying them through a passthru mpii command that points
521 * at a dcmd frame. since the passthru command is submitted like
522 * the scsi commands using an SMID in the request descriptor,
523 * ccb_request memory * must contain the passthru command because
524 * that is what the SMID refers to. this means ccb_request cannot
525 * contain the dcmd. rather than allocating separate dma memory to
526 * hold the dcmd, we reuse the sense memory buffer for it.
527 */
528
529 void mfii_dcmd_start(struct mfii_softc *,
530 struct mfii_ccb *);
531
532 static inline void
533 mfii_dcmd_scrub(struct mfii_ccb *ccb)
534 {
535 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
536 }
537
538 static inline struct mfi_dcmd_frame *
539 mfii_dcmd_frame(struct mfii_ccb *ccb)
540 {
541 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
542 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
543 }
544
545 static inline void
546 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
547 {
548 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
549 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
550 }
551
552 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
553
554 const struct mfii_iop mfii_iop_thunderbolt = {
555 MFII_BAR,
556 MFII_IOP_NUM_SGE_LOC_ORIG,
557 0,
558 MFII_REQ_TYPE_LDIO,
559 0,
560 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
561 0
562 };
563
564 /*
565 * a lot of these values depend on us not implementing fastpath yet.
566 */
567 const struct mfii_iop mfii_iop_25 = {
568 MFII_BAR,
569 MFII_IOP_NUM_SGE_LOC_ORIG,
570 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
571 MFII_REQ_TYPE_NO_LOCK,
572 MFII_RAID_CTX_TYPE_CUDA | 0x1,
573 MFII_SGE_CHAIN_ELEMENT,
574 MFII_SGE_END_OF_LIST
575 };
576
577 const struct mfii_iop mfii_iop_35 = {
578 MFII_BAR_35,
579 MFII_IOP_NUM_SGE_LOC_35,
580 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
581 MFII_REQ_TYPE_NO_LOCK,
582 MFII_RAID_CTX_TYPE_CUDA | 0x1,
583 MFII_SGE_CHAIN_ELEMENT,
584 MFII_SGE_END_OF_LIST
585 };
586
587 struct mfii_device {
588 pcireg_t mpd_vendor;
589 pcireg_t mpd_product;
590 const struct mfii_iop *mpd_iop;
591 };
592
593 const struct mfii_device mfii_devices[] = {
594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
595 &mfii_iop_thunderbolt },
596 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
597 &mfii_iop_25 },
598 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
599 &mfii_iop_25 },
600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
601 &mfii_iop_35 },
602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
603 &mfii_iop_35 },
604 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
605 &mfii_iop_35 },
606 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
607 &mfii_iop_35 },
608 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
609 &mfii_iop_35 },
610 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
611 &mfii_iop_35 }
612 };
613
614 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
615
616 const struct mfii_iop *
617 mfii_find_iop(struct pci_attach_args *pa)
618 {
619 const struct mfii_device *mpd;
620 int i;
621
622 for (i = 0; i < __arraycount(mfii_devices); i++) {
623 mpd = &mfii_devices[i];
624
625 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
626 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
627 return (mpd->mpd_iop);
628 }
629
630 return (NULL);
631 }
632
633 int
634 mfii_match(device_t parent, cfdata_t match, void *aux)
635 {
636 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
637 }
638
639 void
640 mfii_attach(device_t parent, device_t self, void *aux)
641 {
642 struct mfii_softc *sc = device_private(self);
643 struct pci_attach_args *pa = aux;
644 pcireg_t memtype;
645 pci_intr_handle_t ih;
646 char intrbuf[PCI_INTRSTR_LEN];
647 const char *intrstr;
648 u_int32_t status, scpad2, scpad3;
649 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
650 struct scsipi_adapter *adapt = &sc->sc_adapt;
651 struct scsipi_channel *chan = &sc->sc_chan;
652
653 /* init sc */
654 sc->sc_dev = self;
655 sc->sc_iop = mfii_find_iop(aux);
656 sc->sc_dmat = pa->pa_dmat;
657 if (pci_dma64_available(pa)) {
658 sc->sc_dmat64 = pa->pa_dmat64;
659 sc->sc_64bit_dma = 1;
660 } else {
661 sc->sc_dmat64 = pa->pa_dmat;
662 sc->sc_64bit_dma = 0;
663 }
664 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
665 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
666 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
667 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
668
669 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
670
671 sc->sc_aen_ccb = NULL;
672 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
673 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
674 PRI_BIO, IPL_BIO, WQ_MPSAFE);
675
676 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
677 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
678 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
679
680 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
681 SIMPLEQ_INIT(&sc->sc_abort_list);
682
683 /* wire up the bus shizz */
684 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
685 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
686 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
687 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
688 aprint_error(": unable to map registers\n");
689 return;
690 }
691
692 /* disable interrupts */
693 mfii_write(sc, MFI_OMSK, 0xffffffff);
694
695 if (pci_intr_map(pa, &ih) != 0) {
696 aprint_error(": unable to map interrupt\n");
697 goto pci_unmap;
698 }
699 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
700 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
701
702 /* lets get started */
703 if (mfii_transition_firmware(sc))
704 goto pci_unmap;
705 sc->sc_running = true;
706
707 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
708 scpad3 = mfii_read(sc, MFII_OSP3);
709 status = mfii_fw_state(sc);
710 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
711 if (sc->sc_max_fw_cmds == 0)
712 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
713 /*
714 * reduce max_cmds by 1 to ensure that the reply queue depth does not
715 * exceed FW supplied max_fw_cmds.
716 */
717 sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
718
719 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
720 scpad2 = mfii_read(sc, MFII_OSP2);
721 chain_frame_sz =
722 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
723 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
724 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
725 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
726
727 nsge_in_io = (MFII_REQUEST_SIZE -
728 sizeof(struct mpii_msg_scsi_io) -
729 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
730 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
731
732 /* round down to nearest power of two */
733 sc->sc_max_sgl = 1;
734 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
735 sc->sc_max_sgl <<= 1;
736
737 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
738 DEVNAME(sc), status, scpad2, scpad3);
739 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
740 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
741 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
742 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
743 sc->sc_max_sgl);
744
745 /* sense memory */
746 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
747 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
748 if (sc->sc_sense == NULL) {
749 aprint_error(": unable to allocate sense memory\n");
750 goto pci_unmap;
751 }
752
753 /* reply post queue */
754 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
755
756 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
757 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
758 if (sc->sc_reply_postq == NULL)
759 goto free_sense;
760
761 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
762 MFII_DMA_LEN(sc->sc_reply_postq));
763
764 /* MPII request frame array */
765 sc->sc_requests = mfii_dmamem_alloc(sc,
766 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
767 if (sc->sc_requests == NULL)
768 goto free_reply_postq;
769
770 /* MFI command frame array */
771 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
772 if (sc->sc_mfi == NULL)
773 goto free_requests;
774
775 /* MPII SGL array */
776 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
777 sizeof(struct mfii_sge) * sc->sc_max_sgl);
778 if (sc->sc_sgl == NULL)
779 goto free_mfi;
780
781 if (mfii_init_ccb(sc) != 0) {
782 aprint_error(": could not init ccb list\n");
783 goto free_sgl;
784 }
785
786 /* kickstart firmware with all addresses and pointers */
787 if (mfii_initialise_firmware(sc) != 0) {
788 aprint_error(": could not initialize firmware\n");
789 goto free_sgl;
790 }
791
792 mutex_enter(&sc->sc_lock);
793 if (mfii_get_info(sc) != 0) {
794 mutex_exit(&sc->sc_lock);
795 aprint_error(": could not retrieve controller information\n");
796 goto free_sgl;
797 }
798 mutex_exit(&sc->sc_lock);
799
800 aprint_normal(": \"%s\", firmware %s",
801 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
802 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
803 aprint_normal(", %uMB cache",
804 le16toh(sc->sc_info.mci_memory_size));
805 }
806 aprint_normal("\n");
807 aprint_naive("\n");
808
809 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
810 mfii_intr, sc, DEVNAME(sc));
811 if (sc->sc_ih == NULL) {
812 aprint_error_dev(self, "can't establish interrupt");
813 if (intrstr)
814 aprint_error(" at %s", intrstr);
815 aprint_error("\n");
816 goto free_sgl;
817 }
818 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
819
820 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
821 sc->sc_ld[i].ld_present = 1;
822
823 memset(adapt, 0, sizeof(*adapt));
824 adapt->adapt_dev = sc->sc_dev;
825 adapt->adapt_nchannels = 1;
826 /* keep a few commands for management */
827 if (sc->sc_max_cmds > 4)
828 adapt->adapt_openings = sc->sc_max_cmds - 4;
829 else
830 adapt->adapt_openings = sc->sc_max_cmds;
831 adapt->adapt_max_periph = adapt->adapt_openings;
832 adapt->adapt_request = mfii_scsipi_request;
833 adapt->adapt_minphys = minphys;
834 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
835
836 memset(chan, 0, sizeof(*chan));
837 chan->chan_adapter = adapt;
838 chan->chan_bustype = &scsi_sas_bustype;
839 chan->chan_channel = 0;
840 chan->chan_flags = 0;
841 chan->chan_nluns = 8;
842 chan->chan_ntargets = sc->sc_info.mci_max_lds;
843 chan->chan_id = sc->sc_info.mci_max_lds;
844
845 mfii_rescan(sc->sc_dev, "scsi", NULL);
846
847 if (mfii_aen_register(sc) != 0) {
848 /* error printed by mfii_aen_register */
849 goto intr_disestablish;
850 }
851
852 mutex_enter(&sc->sc_lock);
853 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
854 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
855 mutex_exit(&sc->sc_lock);
856 aprint_error_dev(self,
857 "getting list of logical disks failed\n");
858 goto intr_disestablish;
859 }
860 mutex_exit(&sc->sc_lock);
861 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
862 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
863 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
864 sc->sc_target_lds[target] = i;
865 }
866
867 /* enable interrupts */
868 mfii_write(sc, MFI_OSTS, 0xffffffff);
869 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
870
871 #if NBIO > 0
872 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
873 panic("%s: controller registration failed", DEVNAME(sc));
874 #endif /* NBIO > 0 */
875
876 if (mfii_create_sensors(sc) != 0)
877 aprint_error_dev(self, "unable to create sensors\n");
878
879 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
880 mfii_shutdown))
881 aprint_error_dev(self, "couldn't establish power handler\n");
882 return;
883 intr_disestablish:
884 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
885 free_sgl:
886 mfii_dmamem_free(sc, sc->sc_sgl);
887 free_mfi:
888 mfii_dmamem_free(sc, sc->sc_mfi);
889 free_requests:
890 mfii_dmamem_free(sc, sc->sc_requests);
891 free_reply_postq:
892 mfii_dmamem_free(sc, sc->sc_reply_postq);
893 free_sense:
894 mfii_dmamem_free(sc, sc->sc_sense);
895 pci_unmap:
896 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
897 }
898
899 #if 0
900 struct srp_gc mfii_dev_handles_gc =
901 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
902
903 static inline uint16_t
904 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
905 {
906 struct srp_ref sr;
907 uint16_t *map, handle;
908
909 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
910 handle = map[target];
911 srp_leave(&sr);
912
913 return (handle);
914 }
915
916 int
917 mfii_dev_handles_update(struct mfii_softc *sc)
918 {
919 struct mfii_ld_map *lm;
920 uint16_t *dev_handles = NULL;
921 int i;
922 int rv = 0;
923
924 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
925
926 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
927 MFII_DATA_IN, false);
928
929 if (rv != 0) {
930 rv = EIO;
931 goto free_lm;
932 }
933
934 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
935 M_DEVBUF, M_WAITOK);
936
937 for (i = 0; i < MFI_MAX_PD; i++)
938 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
939
940 /* commit the updated info */
941 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
942 srp_update_locked(&mfii_dev_handles_gc,
943 &sc->sc_pd->pd_dev_handles, dev_handles);
944
945 free_lm:
946 free(lm, M_TEMP, sizeof(*lm));
947
948 return (rv);
949 }
950
951 void
952 mfii_dev_handles_dtor(void *null, void *v)
953 {
954 uint16_t *dev_handles = v;
955
956 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
957 }
958 #endif /* 0 */
959
960 int
961 mfii_detach(device_t self, int flags)
962 {
963 struct mfii_softc *sc = device_private(self);
964 int error;
965
966 if (sc->sc_ih == NULL)
967 return (0);
968
969 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
970 return error;
971
972 mfii_destroy_sensors(sc);
973 #if NBIO > 0
974 bio_unregister(sc->sc_dev);
975 #endif
976 mfii_shutdown(sc->sc_dev, 0);
977 mfii_write(sc, MFI_OMSK, 0xffffffff);
978
979 mfii_aen_unregister(sc);
980 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
981 mfii_dmamem_free(sc, sc->sc_sgl);
982 mfii_dmamem_free(sc, sc->sc_mfi);
983 mfii_dmamem_free(sc, sc->sc_requests);
984 mfii_dmamem_free(sc, sc->sc_reply_postq);
985 mfii_dmamem_free(sc, sc->sc_sense);
986 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
987
988 return (0);
989 }
990
991 int
992 mfii_rescan(device_t self, const char *ifattr, const int *locators)
993 {
994 struct mfii_softc *sc = device_private(self);
995 if (sc->sc_child != NULL)
996 return 0;
997
998 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
999 scsiprint, NULL);
1000 return 0;
1001 }
1002
1003 void
1004 mfii_childdetached(device_t self, device_t child)
1005 {
1006 struct mfii_softc *sc = device_private(self);
1007
1008 KASSERT(self == sc->sc_dev);
1009 KASSERT(child == sc->sc_child);
1010
1011 if (child == sc->sc_child)
1012 sc->sc_child = NULL;
1013 }
1014
1015 static bool
1016 mfii_suspend(device_t dev, const pmf_qual_t *q)
1017 {
1018 /* XXX to be implemented */
1019 return false;
1020 }
1021
1022 static bool
1023 mfii_resume(device_t dev, const pmf_qual_t *q)
1024 {
1025 /* XXX to be implemented */
1026 return false;
1027 }
1028
1029 static bool
1030 mfii_shutdown(device_t dev, int how)
1031 {
1032 struct mfii_softc *sc = device_private(dev);
1033 struct mfii_ccb *ccb;
1034 union mfi_mbox mbox;
1035 bool rv = true;;
1036
1037 memset(&mbox, 0, sizeof(mbox));
1038
1039 mutex_enter(&sc->sc_lock);
1040 DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1041 ccb = mfii_get_ccb(sc);
1042 if (ccb == NULL)
1043 return false;
1044 mutex_enter(&sc->sc_ccb_mtx);
1045 if (sc->sc_running) {
1046 sc->sc_running = 0; /* prevent new commands */
1047 mutex_exit(&sc->sc_ccb_mtx);
1048 #if 0 /* XXX why does this hang ? */
1049 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1050 mfii_scrub_ccb(ccb);
1051 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1052 NULL, 0, MFII_DATA_NONE, true)) {
1053 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1054 rv = false;
1055 goto fail;
1056 }
1057 printf("ok1\n");
1058 #endif
1059 mbox.b[0] = 0;
1060 mfii_scrub_ccb(ccb);
1061 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1062 NULL, 0, MFII_DATA_NONE, true)) {
1063 aprint_error_dev(dev, "shutdown: "
1064 "firmware shutdown failed\n");
1065 rv = false;
1066 goto fail;
1067 }
1068 } else {
1069 mutex_exit(&sc->sc_ccb_mtx);
1070 }
1071 fail:
1072 mfii_put_ccb(sc, ccb);
1073 mutex_exit(&sc->sc_lock);
1074 return rv;
1075 }
1076
1077 static u_int32_t
1078 mfii_read(struct mfii_softc *sc, bus_size_t r)
1079 {
1080 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1081 BUS_SPACE_BARRIER_READ);
1082 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1083 }
1084
1085 static void
1086 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1087 {
1088 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1089 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1090 BUS_SPACE_BARRIER_WRITE);
1091 }
1092
1093 struct mfii_dmamem *
1094 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1095 {
1096 struct mfii_dmamem *m;
1097 int nsegs;
1098
1099 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1100 if (m == NULL)
1101 return (NULL);
1102
1103 m->mdm_size = size;
1104
1105 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1106 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1107 goto mdmfree;
1108
1109 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1110 &nsegs, BUS_DMA_NOWAIT) != 0)
1111 goto destroy;
1112
1113 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1114 BUS_DMA_NOWAIT) != 0)
1115 goto free;
1116
1117 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1118 BUS_DMA_NOWAIT) != 0)
1119 goto unmap;
1120
1121 memset(m->mdm_kva, 0, size);
1122 return (m);
1123
1124 unmap:
1125 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1126 free:
1127 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1128 destroy:
1129 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1130 mdmfree:
1131 free(m, M_DEVBUF);
1132
1133 return (NULL);
1134 }
1135
1136 void
1137 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1138 {
1139 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1140 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1141 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1142 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1143 free(m, M_DEVBUF);
1144 }
1145
1146 void
1147 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1148 {
1149 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1150 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1151 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1152
1153 io->function = MFII_FUNCTION_PASSTHRU_IO;
1154 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1155 io->chain_offset = io->sgl_offset0 / 4;
1156
1157 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1158 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1159 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1160
1161 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1162 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1163
1164 mfii_start(sc, ccb);
1165 }
1166
1167 int
1168 mfii_aen_register(struct mfii_softc *sc)
1169 {
1170 struct mfi_evt_log_info mel;
1171 struct mfii_ccb *ccb;
1172 struct mfii_dmamem *mdm;
1173 int rv;
1174
1175 ccb = mfii_get_ccb(sc);
1176 if (ccb == NULL) {
1177 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1178 return (ENOMEM);
1179 }
1180
1181 memset(&mel, 0, sizeof(mel));
1182 mfii_scrub_ccb(ccb);
1183
1184 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1185 &mel, sizeof(mel), MFII_DATA_IN, true);
1186 if (rv != 0) {
1187 mfii_put_ccb(sc, ccb);
1188 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1189 return (EIO);
1190 }
1191
1192 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1193 if (mdm == NULL) {
1194 mfii_put_ccb(sc, ccb);
1195 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1196 return (ENOMEM);
1197 }
1198
1199 /* replay all the events from boot */
1200 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1201
1202 return (0);
1203 }
1204
1205 void
1206 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1207 struct mfii_dmamem *mdm, uint32_t seq)
1208 {
1209 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1210 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1211 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1212 union mfi_evt_class_locale mec;
1213
1214 mfii_scrub_ccb(ccb);
1215 mfii_dcmd_scrub(ccb);
1216 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1217
1218 ccb->ccb_cookie = mdm;
1219 ccb->ccb_done = mfii_aen_done;
1220 sc->sc_aen_ccb = ccb;
1221
1222 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1223 mec.mec_members.reserved = 0;
1224 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1225
1226 hdr->mfh_cmd = MFI_CMD_DCMD;
1227 hdr->mfh_sg_count = 1;
1228 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1229 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1230 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1231 dcmd->mdf_mbox.w[0] = htole32(seq);
1232 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1233 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1234 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1235
1236 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1237 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1238
1239 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1240 mfii_dcmd_start(sc, ccb);
1241 }
1242
1243 void
1244 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1245 {
1246 KASSERT(sc->sc_aen_ccb == ccb);
1247
1248 /*
1249 * defer to a thread with KERNEL_LOCK so we can run autoconf
1250 * We shouldn't have more than one AEN command pending at a time,
1251 * so no need to lock
1252 */
1253 if (sc->sc_running)
1254 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1255 }
1256
1257 void
1258 mfii_aen(struct work *wk, void *arg)
1259 {
1260 struct mfii_softc *sc = arg;
1261 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1262 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1263 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1264
1265 mfii_dcmd_sync(sc, ccb,
1266 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1267 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1268 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1269
1270 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1271 le32toh(med->med_seq_num), le32toh(med->med_code),
1272 med->med_arg_type, med->med_description);
1273
1274 switch (le32toh(med->med_code)) {
1275 case MR_EVT_PD_INSERTED_EXT:
1276 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1277 break;
1278
1279 mfii_aen_pd_insert(sc, &med->args.pd_address);
1280 break;
1281 case MR_EVT_PD_REMOVED_EXT:
1282 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1283 break;
1284
1285 mfii_aen_pd_remove(sc, &med->args.pd_address);
1286 break;
1287
1288 case MR_EVT_PD_STATE_CHANGE:
1289 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1290 break;
1291
1292 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1293 break;
1294
1295 case MR_EVT_LD_CREATED:
1296 case MR_EVT_LD_DELETED:
1297 mfii_aen_ld_update(sc);
1298 break;
1299
1300 default:
1301 break;
1302 }
1303
1304 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1305 }
1306
1307 void
1308 mfii_aen_pd_insert(struct mfii_softc *sc,
1309 const struct mfi_evtarg_pd_address *pd)
1310 {
1311 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1312 le16toh(pd->device_id), le16toh(pd->encl_id));
1313 }
1314
1315 void
1316 mfii_aen_pd_remove(struct mfii_softc *sc,
1317 const struct mfi_evtarg_pd_address *pd)
1318 {
1319 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1320 le16toh(pd->device_id), le16toh(pd->encl_id));
1321 }
1322
1323 void
1324 mfii_aen_pd_state_change(struct mfii_softc *sc,
1325 const struct mfi_evtarg_pd_state *state)
1326 {
1327 return;
1328 }
1329
1330 void
1331 mfii_aen_ld_update(struct mfii_softc *sc)
1332 {
1333 int i, target, old, nld;
1334 int newlds[MFI_MAX_LD];
1335
1336 mutex_enter(&sc->sc_lock);
1337 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1338 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1339 mutex_exit(&sc->sc_lock);
1340 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1341 DEVNAME(sc));
1342 return;
1343 }
1344 mutex_exit(&sc->sc_lock);
1345
1346 memset(newlds, -1, sizeof(newlds));
1347
1348 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1349 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1350 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1351 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1352 newlds[target] = i;
1353 }
1354
1355 for (i = 0; i < MFI_MAX_LD; i++) {
1356 old = sc->sc_target_lds[i];
1357 nld = newlds[i];
1358
1359 if (old == -1 && nld != -1) {
1360 printf("%s: logical drive %d added (target %d)\n",
1361 DEVNAME(sc), i, nld);
1362
1363 // XXX scsi_probe_target(sc->sc_scsibus, i);
1364
1365 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1366 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1367 } else if (nld == -1 && old != -1) {
1368 printf("%s: logical drive %d removed (target %d)\n",
1369 DEVNAME(sc), i, old);
1370
1371 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1372 sysmon_envsys_sensor_detach(sc->sc_sme,
1373 &sc->sc_sensors[i]);
1374 }
1375 }
1376
1377 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1378 }
1379
1380 void
1381 mfii_aen_unregister(struct mfii_softc *sc)
1382 {
1383 /* XXX */
1384 }
1385
1386 int
1387 mfii_transition_firmware(struct mfii_softc *sc)
1388 {
1389 int32_t fw_state, cur_state;
1390 int max_wait, i;
1391
1392 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1393
1394 while (fw_state != MFI_STATE_READY) {
1395 cur_state = fw_state;
1396 switch (fw_state) {
1397 case MFI_STATE_FAULT:
1398 printf("%s: firmware fault\n", DEVNAME(sc));
1399 return (1);
1400 case MFI_STATE_WAIT_HANDSHAKE:
1401 mfii_write(sc, MFI_SKINNY_IDB,
1402 MFI_INIT_CLEAR_HANDSHAKE);
1403 max_wait = 2;
1404 break;
1405 case MFI_STATE_OPERATIONAL:
1406 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1407 max_wait = 10;
1408 break;
1409 case MFI_STATE_UNDEFINED:
1410 case MFI_STATE_BB_INIT:
1411 max_wait = 2;
1412 break;
1413 case MFI_STATE_FW_INIT:
1414 case MFI_STATE_DEVICE_SCAN:
1415 case MFI_STATE_FLUSH_CACHE:
1416 max_wait = 20;
1417 break;
1418 default:
1419 printf("%s: unknown firmware state %d\n",
1420 DEVNAME(sc), fw_state);
1421 return (1);
1422 }
1423 for (i = 0; i < (max_wait * 10); i++) {
1424 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1425 if (fw_state == cur_state)
1426 DELAY(100000);
1427 else
1428 break;
1429 }
1430 if (fw_state == cur_state) {
1431 printf("%s: firmware stuck in state %#x\n",
1432 DEVNAME(sc), fw_state);
1433 return (1);
1434 }
1435 }
1436
1437 return (0);
1438 }
1439
1440 int
1441 mfii_get_info(struct mfii_softc *sc)
1442 {
1443 int i, rv;
1444
1445 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1446 sizeof(sc->sc_info), MFII_DATA_IN, true);
1447
1448 if (rv != 0)
1449 return (rv);
1450
1451 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1452 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1453 DEVNAME(sc),
1454 sc->sc_info.mci_image_component[i].mic_name,
1455 sc->sc_info.mci_image_component[i].mic_version,
1456 sc->sc_info.mci_image_component[i].mic_build_date,
1457 sc->sc_info.mci_image_component[i].mic_build_time);
1458 }
1459
1460 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1461 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1462 DEVNAME(sc),
1463 sc->sc_info.mci_pending_image_component[i].mic_name,
1464 sc->sc_info.mci_pending_image_component[i].mic_version,
1465 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1466 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1467 }
1468
1469 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1470 DEVNAME(sc),
1471 sc->sc_info.mci_max_arms,
1472 sc->sc_info.mci_max_spans,
1473 sc->sc_info.mci_max_arrays,
1474 sc->sc_info.mci_max_lds,
1475 sc->sc_info.mci_product_name);
1476
1477 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1478 DEVNAME(sc),
1479 sc->sc_info.mci_serial_number,
1480 sc->sc_info.mci_hw_present,
1481 sc->sc_info.mci_current_fw_time,
1482 sc->sc_info.mci_max_cmds,
1483 sc->sc_info.mci_max_sg_elements);
1484
1485 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1486 DEVNAME(sc),
1487 sc->sc_info.mci_max_request_size,
1488 sc->sc_info.mci_lds_present,
1489 sc->sc_info.mci_lds_degraded,
1490 sc->sc_info.mci_lds_offline,
1491 sc->sc_info.mci_pd_present);
1492
1493 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1494 DEVNAME(sc),
1495 sc->sc_info.mci_pd_disks_present,
1496 sc->sc_info.mci_pd_disks_pred_failure,
1497 sc->sc_info.mci_pd_disks_failed);
1498
1499 DPRINTF("%s: nvram %d mem %d flash %d\n",
1500 DEVNAME(sc),
1501 sc->sc_info.mci_nvram_size,
1502 sc->sc_info.mci_memory_size,
1503 sc->sc_info.mci_flash_size);
1504
1505 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1506 DEVNAME(sc),
1507 sc->sc_info.mci_ram_correctable_errors,
1508 sc->sc_info.mci_ram_uncorrectable_errors,
1509 sc->sc_info.mci_cluster_allowed,
1510 sc->sc_info.mci_cluster_active);
1511
1512 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1513 DEVNAME(sc),
1514 sc->sc_info.mci_max_strips_per_io,
1515 sc->sc_info.mci_raid_levels,
1516 sc->sc_info.mci_adapter_ops,
1517 sc->sc_info.mci_ld_ops);
1518
1519 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1520 DEVNAME(sc),
1521 sc->sc_info.mci_stripe_sz_ops.min,
1522 sc->sc_info.mci_stripe_sz_ops.max,
1523 sc->sc_info.mci_pd_ops,
1524 sc->sc_info.mci_pd_mix_support);
1525
1526 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1527 DEVNAME(sc),
1528 sc->sc_info.mci_ecc_bucket_count,
1529 sc->sc_info.mci_package_version);
1530
1531 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1532 DEVNAME(sc),
1533 sc->sc_info.mci_properties.mcp_seq_num,
1534 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1535 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1536 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1537
1538 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1539 DEVNAME(sc),
1540 sc->sc_info.mci_properties.mcp_rebuild_rate,
1541 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1542 sc->sc_info.mci_properties.mcp_bgi_rate,
1543 sc->sc_info.mci_properties.mcp_cc_rate);
1544
1545 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1546 DEVNAME(sc),
1547 sc->sc_info.mci_properties.mcp_recon_rate,
1548 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1549 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1550 sc->sc_info.mci_properties.mcp_spinup_delay,
1551 sc->sc_info.mci_properties.mcp_cluster_enable);
1552
1553 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1554 DEVNAME(sc),
1555 sc->sc_info.mci_properties.mcp_coercion_mode,
1556 sc->sc_info.mci_properties.mcp_alarm_enable,
1557 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1558 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1559 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1560
1561 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1562 DEVNAME(sc),
1563 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1564 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1565 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1566
1567 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1568 DEVNAME(sc),
1569 sc->sc_info.mci_pci.mip_vendor,
1570 sc->sc_info.mci_pci.mip_device,
1571 sc->sc_info.mci_pci.mip_subvendor,
1572 sc->sc_info.mci_pci.mip_subdevice);
1573
1574 DPRINTF("%s: type %#x port_count %d port_addr ",
1575 DEVNAME(sc),
1576 sc->sc_info.mci_host.mih_type,
1577 sc->sc_info.mci_host.mih_port_count);
1578
1579 for (i = 0; i < 8; i++)
1580 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1581 DPRINTF("\n");
1582
1583 DPRINTF("%s: type %.x port_count %d port_addr ",
1584 DEVNAME(sc),
1585 sc->sc_info.mci_device.mid_type,
1586 sc->sc_info.mci_device.mid_port_count);
1587
1588 for (i = 0; i < 8; i++)
1589 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1590 DPRINTF("\n");
1591
1592 return (0);
1593 }
1594
1595 int
1596 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1597 {
1598 struct mfi_frame_header *hdr = ccb->ccb_request;
1599 u_int64_t r;
1600 int to = 0, rv = 0;
1601
1602 #ifdef DIAGNOSTIC
1603 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1604 panic("mfii_mfa_poll called with cookie or done set");
1605 #endif
1606
1607 hdr->mfh_context = ccb->ccb_smid;
1608 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1609 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1610
1611 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1612 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1613
1614 mfii_start(sc, ccb);
1615
1616 for (;;) {
1617 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1618 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1619 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1620
1621 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1622 break;
1623
1624 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1625 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1626 ccb->ccb_smid);
1627 ccb->ccb_flags |= MFI_CCB_F_ERR;
1628 rv = 1;
1629 break;
1630 }
1631
1632 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1633 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1634 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1635
1636 delay(1000);
1637 }
1638
1639 if (ccb->ccb_len > 0) {
1640 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1641 0, ccb->ccb_dmamap32->dm_mapsize,
1642 (ccb->ccb_direction == MFII_DATA_IN) ?
1643 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1644
1645 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1646 }
1647
1648 return (rv);
1649 }
1650
1651 int
1652 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1653 {
1654 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1655 void *cookie;
1656 int rv = 1;
1657
1658 done = ccb->ccb_done;
1659 cookie = ccb->ccb_cookie;
1660
1661 ccb->ccb_done = mfii_poll_done;
1662 ccb->ccb_cookie = &rv;
1663
1664 mfii_start(sc, ccb);
1665
1666 do {
1667 delay(10);
1668 mfii_postq(sc);
1669 } while (rv == 1);
1670
1671 ccb->ccb_cookie = cookie;
1672 done(sc, ccb);
1673
1674 return (0);
1675 }
1676
1677 void
1678 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1679 {
1680 int *rv = ccb->ccb_cookie;
1681
1682 *rv = 0;
1683 }
1684
1685 int
1686 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1687 {
1688 #ifdef DIAGNOSTIC
1689 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1690 panic("mfii_exec called with cookie or done set");
1691 #endif
1692
1693 ccb->ccb_cookie = ccb;
1694 ccb->ccb_done = mfii_exec_done;
1695
1696 mfii_start(sc, ccb);
1697
1698 mutex_enter(&ccb->ccb_mtx);
1699 while (ccb->ccb_cookie != NULL)
1700 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1701 mutex_exit(&ccb->ccb_mtx);
1702
1703 return (0);
1704 }
1705
1706 void
1707 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1708 {
1709 mutex_enter(&ccb->ccb_mtx);
1710 ccb->ccb_cookie = NULL;
1711 cv_signal(&ccb->ccb_cv);
1712 mutex_exit(&ccb->ccb_mtx);
1713 }
1714
1715 int
1716 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1717 void *buf, size_t len, mfii_direction_t dir, bool poll)
1718 {
1719 struct mfii_ccb *ccb;
1720 int rv;
1721
1722 KASSERT(mutex_owned(&sc->sc_lock));
1723 if (!sc->sc_running)
1724 return EAGAIN;
1725
1726 ccb = mfii_get_ccb(sc);
1727 if (ccb == NULL)
1728 return (ENOMEM);
1729
1730 mfii_scrub_ccb(ccb);
1731 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1732 mfii_put_ccb(sc, ccb);
1733
1734 return (rv);
1735 }
1736
1737 int
1738 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1739 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1740 bool poll)
1741 {
1742 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1743 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1744 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1745 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1746 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1747 int rv = EIO;
1748
1749 if (cold)
1750 poll = true;
1751
1752 ccb->ccb_data = buf;
1753 ccb->ccb_len = len;
1754 ccb->ccb_direction = dir;
1755 switch (dir) {
1756 case MFII_DATA_IN:
1757 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1758 break;
1759 case MFII_DATA_OUT:
1760 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1761 break;
1762 case MFII_DATA_NONE:
1763 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1764 break;
1765 }
1766
1767 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1768 rv = ENOMEM;
1769 goto done;
1770 }
1771
1772 hdr->mfh_cmd = MFI_CMD_DCMD;
1773 hdr->mfh_context = ccb->ccb_smid;
1774 hdr->mfh_data_len = htole32(len);
1775 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1776 KASSERT(!ccb->ccb_dma64);
1777
1778 dcmd->mdf_opcode = opc;
1779 /* handle special opcodes */
1780 if (mbox != NULL)
1781 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1782
1783 io->function = MFII_FUNCTION_PASSTHRU_IO;
1784 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1785 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1786
1787 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1788 sge->sg_len = htole32(MFI_FRAME_SIZE);
1789 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1790
1791 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1792 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1793
1794 if (poll) {
1795 ccb->ccb_done = mfii_empty_done;
1796 mfii_poll(sc, ccb);
1797 } else
1798 mfii_exec(sc, ccb);
1799
1800 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1801 rv = 0;
1802 }
1803
1804 done:
1805 return (rv);
1806 }
1807
1808 void
1809 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1810 {
1811 return;
1812 }
1813
1814 int
1815 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1816 void *sglp, int nosleep)
1817 {
1818 union mfi_sgl *sgl = sglp;
1819 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1820 int error;
1821 int i;
1822
1823 KASSERT(!ccb->ccb_dma64);
1824 if (ccb->ccb_len == 0)
1825 return (0);
1826
1827 error = bus_dmamap_load(sc->sc_dmat, dmap,
1828 ccb->ccb_data, ccb->ccb_len, NULL,
1829 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1830 if (error) {
1831 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1832 return (1);
1833 }
1834
1835 for (i = 0; i < dmap->dm_nsegs; i++) {
1836 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1837 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1838 }
1839
1840 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1841 ccb->ccb_direction == MFII_DATA_OUT ?
1842 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1843
1844 return (0);
1845 }
1846
1847 void
1848 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1849 {
1850 u_long *r = (u_long *)&ccb->ccb_req;
1851
1852 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1853 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1854 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1855
1856 #if defined(__LP64__) && 0
1857 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1858 #else
1859 mutex_enter(&sc->sc_post_mtx);
1860 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1861 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1862 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1863
1864 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1865 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1866 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1867 mutex_exit(&sc->sc_post_mtx);
1868 #endif
1869 }
1870
1871 void
1872 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1873 {
1874 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1875 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1876 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1877
1878 if (ccb->ccb_sgl_len > 0) {
1879 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1880 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1881 BUS_DMASYNC_POSTWRITE);
1882 }
1883
1884 if (ccb->ccb_dma64) {
1885 KASSERT(ccb->ccb_len > 0);
1886 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1887 0, ccb->ccb_dmamap64->dm_mapsize,
1888 (ccb->ccb_direction == MFII_DATA_IN) ?
1889 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1890
1891 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1892 } else if (ccb->ccb_len > 0) {
1893 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1894 0, ccb->ccb_dmamap32->dm_mapsize,
1895 (ccb->ccb_direction == MFII_DATA_IN) ?
1896 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1897
1898 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1899 }
1900
1901 ccb->ccb_done(sc, ccb);
1902 }
1903
1904 int
1905 mfii_initialise_firmware(struct mfii_softc *sc)
1906 {
1907 struct mpii_msg_iocinit_request *iiq;
1908 struct mfii_dmamem *m;
1909 struct mfii_ccb *ccb;
1910 struct mfi_init_frame *init;
1911 int rv;
1912
1913 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1914 if (m == NULL)
1915 return (1);
1916
1917 iiq = MFII_DMA_KVA(m);
1918 memset(iiq, 0, sizeof(*iiq));
1919
1920 iiq->function = MPII_FUNCTION_IOC_INIT;
1921 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1922
1923 iiq->msg_version_maj = 0x02;
1924 iiq->msg_version_min = 0x00;
1925 iiq->hdr_version_unit = 0x10;
1926 iiq->hdr_version_dev = 0x0;
1927
1928 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1929
1930 iiq->reply_descriptor_post_queue_depth =
1931 htole16(sc->sc_reply_postq_depth);
1932 iiq->reply_free_queue_depth = htole16(0);
1933
1934 iiq->sense_buffer_address_high = htole32(
1935 MFII_DMA_DVA(sc->sc_sense) >> 32);
1936
1937 iiq->reply_descriptor_post_queue_address = htole64(
1938 MFII_DMA_DVA(sc->sc_reply_postq));
1939
1940 iiq->system_request_frame_base_address =
1941 htole64(MFII_DMA_DVA(sc->sc_requests));
1942
1943 iiq->timestamp = htole64(time_uptime);
1944
1945 ccb = mfii_get_ccb(sc);
1946 if (ccb == NULL) {
1947 /* shouldn't ever run out of ccbs during attach */
1948 return (1);
1949 }
1950 mfii_scrub_ccb(ccb);
1951 init = ccb->ccb_request;
1952
1953 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1954 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1955 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1956 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1957
1958 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1959 0, MFII_DMA_LEN(sc->sc_reply_postq),
1960 BUS_DMASYNC_PREREAD);
1961
1962 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1963 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1964
1965 rv = mfii_mfa_poll(sc, ccb);
1966
1967 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1968 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1969
1970 mfii_put_ccb(sc, ccb);
1971 mfii_dmamem_free(sc, m);
1972
1973 return (rv);
1974 }
1975
1976 int
1977 mfii_my_intr(struct mfii_softc *sc)
1978 {
1979 u_int32_t status;
1980
1981 status = mfii_read(sc, MFI_OSTS);
1982
1983 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1984 if (ISSET(status, 0x1)) {
1985 mfii_write(sc, MFI_OSTS, status);
1986 return (1);
1987 }
1988
1989 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1990 }
1991
1992 int
1993 mfii_intr(void *arg)
1994 {
1995 struct mfii_softc *sc = arg;
1996
1997 if (!mfii_my_intr(sc))
1998 return (0);
1999
2000 mfii_postq(sc);
2001
2002 return (1);
2003 }
2004
2005 void
2006 mfii_postq(struct mfii_softc *sc)
2007 {
2008 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2009 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2010 struct mpii_reply_descr *rdp;
2011 struct mfii_ccb *ccb;
2012 int rpi = 0;
2013
2014 mutex_enter(&sc->sc_reply_postq_mtx);
2015
2016 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2017 0, MFII_DMA_LEN(sc->sc_reply_postq),
2018 BUS_DMASYNC_POSTREAD);
2019
2020 for (;;) {
2021 rdp = &postq[sc->sc_reply_postq_index];
2022 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2023 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2024 rdp->data == 0xffffffff);
2025 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2026 MPII_REPLY_DESCR_UNUSED)
2027 break;
2028 if (rdp->data == 0xffffffff) {
2029 /*
2030 * ioc is still writing to the reply post queue
2031 * race condition - bail!
2032 */
2033 break;
2034 }
2035
2036 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2037 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2038 memset(rdp, 0xff, sizeof(*rdp));
2039
2040 sc->sc_reply_postq_index++;
2041 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2042 rpi = 1;
2043 }
2044
2045 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2046 0, MFII_DMA_LEN(sc->sc_reply_postq),
2047 BUS_DMASYNC_PREREAD);
2048
2049 if (rpi)
2050 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2051
2052 mutex_exit(&sc->sc_reply_postq_mtx);
2053
2054 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2055 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2056 mfii_done(sc, ccb);
2057 }
2058 }
2059
2060 void
2061 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2062 void *arg)
2063 {
2064 struct scsipi_periph *periph;
2065 struct scsipi_xfer *xs;
2066 struct scsipi_adapter *adapt = chan->chan_adapter;
2067 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2068 struct mfii_ccb *ccb;
2069 int timeout;
2070 int target;
2071
2072 switch(req) {
2073 case ADAPTER_REQ_GROW_RESOURCES:
2074 /* Not supported. */
2075 return;
2076 case ADAPTER_REQ_SET_XFER_MODE:
2077 {
2078 struct scsipi_xfer_mode *xm = arg;
2079 xm->xm_mode = PERIPH_CAP_TQING;
2080 xm->xm_period = 0;
2081 xm->xm_offset = 0;
2082 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2083 return;
2084 }
2085 case ADAPTER_REQ_RUN_XFER:
2086 break;
2087 }
2088
2089 xs = arg;
2090 periph = xs->xs_periph;
2091 target = periph->periph_target;
2092
2093 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2094 periph->periph_lun != 0) {
2095 xs->error = XS_SELTIMEOUT;
2096 scsipi_done(xs);
2097 return;
2098 }
2099
2100 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2101 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2102 /* the cache is stable storage, don't flush */
2103 xs->error = XS_NOERROR;
2104 xs->status = SCSI_OK;
2105 xs->resid = 0;
2106 scsipi_done(xs);
2107 return;
2108 }
2109
2110 ccb = mfii_get_ccb(sc);
2111 if (ccb == NULL) {
2112 xs->error = XS_RESOURCE_SHORTAGE;
2113 scsipi_done(xs);
2114 return;
2115 }
2116 mfii_scrub_ccb(ccb);
2117 ccb->ccb_cookie = xs;
2118 ccb->ccb_done = mfii_scsi_cmd_done;
2119 ccb->ccb_data = xs->data;
2120 ccb->ccb_len = xs->datalen;
2121
2122 timeout = mstohz(xs->timeout);
2123 if (timeout == 0)
2124 timeout = 1;
2125 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2126
2127 switch (xs->cmd->opcode) {
2128 case SCSI_READ_6_COMMAND:
2129 case READ_10:
2130 case READ_12:
2131 case READ_16:
2132 case SCSI_WRITE_6_COMMAND:
2133 case WRITE_10:
2134 case WRITE_12:
2135 case WRITE_16:
2136 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2137 goto stuffup;
2138 break;
2139
2140 default:
2141 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2142 goto stuffup;
2143 break;
2144 }
2145
2146 xs->error = XS_NOERROR;
2147 xs->resid = 0;
2148
2149 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2150 xs->cmd->opcode);
2151
2152 if (xs->xs_control & XS_CTL_POLL) {
2153 if (mfii_poll(sc, ccb) != 0)
2154 goto stuffup;
2155 return;
2156 }
2157
2158 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2159 mfii_start(sc, ccb);
2160
2161 return;
2162
2163 stuffup:
2164 xs->error = XS_DRIVER_STUFFUP;
2165 scsipi_done(xs);
2166 mfii_put_ccb(sc, ccb);
2167 }
2168
2169 void
2170 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2171 {
2172 struct scsipi_xfer *xs = ccb->ccb_cookie;
2173 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2174 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2175 u_int refs = 2;
2176
2177 if (callout_stop(&xs->xs_callout) != 0)
2178 refs = 1;
2179
2180 switch (ctx->status) {
2181 case MFI_STAT_OK:
2182 break;
2183
2184 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2185 xs->error = XS_SENSE;
2186 memset(&xs->sense, 0, sizeof(xs->sense));
2187 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2188 break;
2189
2190 case MFI_STAT_LD_OFFLINE:
2191 case MFI_STAT_DEVICE_NOT_FOUND:
2192 xs->error = XS_SELTIMEOUT;
2193 break;
2194
2195 default:
2196 xs->error = XS_DRIVER_STUFFUP;
2197 break;
2198 }
2199
2200 if (atomic_add_int_nv(&ccb->ccb_refcnt, -refs) == 0) {
2201 scsipi_done(xs);
2202 mfii_put_ccb(sc, ccb);
2203 }
2204 }
2205
2206 int
2207 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2208 struct scsipi_xfer *xs)
2209 {
2210 struct scsipi_periph *periph = xs->xs_periph;
2211 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2212 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2213 int segs;
2214
2215 io->dev_handle = htole16(periph->periph_target);
2216 io->function = MFII_FUNCTION_LDIO_REQUEST;
2217 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2218 io->sgl_flags = htole16(0x02); /* XXX */
2219 io->sense_buffer_length = sizeof(xs->sense);
2220 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2221 io->data_length = htole32(xs->datalen);
2222 io->io_flags = htole16(xs->cmdlen);
2223 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2224 case XS_CTL_DATA_IN:
2225 ccb->ccb_direction = MFII_DATA_IN;
2226 io->direction = MPII_SCSIIO_DIR_READ;
2227 break;
2228 case XS_CTL_DATA_OUT:
2229 ccb->ccb_direction = MFII_DATA_OUT;
2230 io->direction = MPII_SCSIIO_DIR_WRITE;
2231 break;
2232 default:
2233 ccb->ccb_direction = MFII_DATA_NONE;
2234 io->direction = MPII_SCSIIO_DIR_NONE;
2235 break;
2236 }
2237 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2238
2239 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2240 ctx->timeout_value = htole16(0x14); /* XXX */
2241 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2242 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2243
2244 if (mfii_load_ccb(sc, ccb, ctx + 1,
2245 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2246 return (1);
2247
2248 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2249 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2250 switch (sc->sc_iop->num_sge_loc) {
2251 case MFII_IOP_NUM_SGE_LOC_ORIG:
2252 ctx->num_sge = segs;
2253 break;
2254 case MFII_IOP_NUM_SGE_LOC_35:
2255 /* 12 bit field, but we're only using the lower 8 */
2256 ctx->span_arm = segs;
2257 break;
2258 }
2259
2260 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2261 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2262
2263 return (0);
2264 }
2265
2266 int
2267 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2268 struct scsipi_xfer *xs)
2269 {
2270 struct scsipi_periph *periph = xs->xs_periph;
2271 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2272 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2273
2274 io->dev_handle = htole16(periph->periph_target);
2275 io->function = MFII_FUNCTION_LDIO_REQUEST;
2276 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2277 io->sgl_flags = htole16(0x02); /* XXX */
2278 io->sense_buffer_length = sizeof(xs->sense);
2279 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2280 io->data_length = htole32(xs->datalen);
2281 io->io_flags = htole16(xs->cmdlen);
2282 io->lun[0] = htobe16(periph->periph_lun);
2283 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2284 case XS_CTL_DATA_IN:
2285 ccb->ccb_direction = MFII_DATA_IN;
2286 io->direction = MPII_SCSIIO_DIR_READ;
2287 break;
2288 case XS_CTL_DATA_OUT:
2289 ccb->ccb_direction = MFII_DATA_OUT;
2290 io->direction = MPII_SCSIIO_DIR_WRITE;
2291 break;
2292 default:
2293 ccb->ccb_direction = MFII_DATA_NONE;
2294 io->direction = MPII_SCSIIO_DIR_NONE;
2295 break;
2296 }
2297 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2298
2299 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2300
2301 if (mfii_load_ccb(sc, ccb, ctx + 1,
2302 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2303 return (1);
2304
2305 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2306 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2307
2308 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2309 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2310
2311 return (0);
2312 }
2313
2314 #if 0
2315 void
2316 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2317 {
2318 struct scsi_link *link = xs->sc_link;
2319 struct mfii_softc *sc = link->adapter_softc;
2320 struct mfii_ccb *ccb = xs->io;
2321
2322 mfii_scrub_ccb(ccb);
2323 ccb->ccb_cookie = xs;
2324 ccb->ccb_done = mfii_scsi_cmd_done;
2325 ccb->ccb_data = xs->data;
2326 ccb->ccb_len = xs->datalen;
2327
2328 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2329
2330 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2331 if (xs->error != XS_NOERROR)
2332 goto done;
2333
2334 xs->resid = 0;
2335
2336 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2337 if (mfii_poll(sc, ccb) != 0)
2338 goto stuffup;
2339 return;
2340 }
2341
2342 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2343 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2344 mfii_start(sc, ccb);
2345
2346 return;
2347
2348 stuffup:
2349 xs->error = XS_DRIVER_STUFFUP;
2350 done:
2351 scsi_done(xs);
2352 }
2353
2354 int
2355 mfii_pd_scsi_probe(struct scsi_link *link)
2356 {
2357 struct mfii_softc *sc = link->adapter_softc;
2358 struct mfi_pd_details mpd;
2359 union mfi_mbox mbox;
2360 int rv;
2361
2362 if (link->lun > 0)
2363 return (0);
2364
2365 memset(&mbox, 0, sizeof(mbox));
2366 mbox.s[0] = htole16(link->target);
2367
2368 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2369 MFII_DATA_IN, true);
2370 if (rv != 0)
2371 return (EIO);
2372
2373 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2374 return (ENXIO);
2375
2376 return (0);
2377 }
2378
2379 int
2380 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2381 struct scsipi_xfer *xs)
2382 {
2383 struct scsi_link *link = xs->sc_link;
2384 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2385 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2386 uint16_t dev_handle;
2387
2388 dev_handle = mfii_dev_handle(sc, link->target);
2389 if (dev_handle == htole16(0xffff))
2390 return (XS_SELTIMEOUT);
2391
2392 io->dev_handle = dev_handle;
2393 io->function = 0;
2394 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2395 io->sgl_flags = htole16(0x02); /* XXX */
2396 io->sense_buffer_length = sizeof(xs->sense);
2397 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2398 io->data_length = htole32(xs->datalen);
2399 io->io_flags = htole16(xs->cmdlen);
2400 io->lun[0] = htobe16(link->lun);
2401 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2402 case XS_CTL_DATA_IN:
2403 ccb->ccb_direction = MFII_DATA_IN;
2404 io->direction = MPII_SCSIIO_DIR_READ;
2405 break;
2406 case XS_CTL_DATA_OUT:
2407 ccb->ccb_direction = MFII_DATA_OUT;
2408 io->direction = MPII_SCSIIO_DIR_WRITE;
2409 break;
2410 default:
2411 ccb->ccb_direction = MFII_DATA_NONE;
2412 io->direction = MPII_SCSIIO_DIR_NONE;
2413 break;
2414 }
2415 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2416
2417 ctx->virtual_disk_target_id = htole16(link->target);
2418 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2419 ctx->timeout_value = sc->sc_pd->pd_timeout;
2420
2421 if (mfii_load_ccb(sc, ccb, ctx + 1,
2422 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2423 return (XS_DRIVER_STUFFUP);
2424
2425 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2426 KASSERT(ccb->ccb_dma64);
2427
2428 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2429 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2430 ccb->ccb_req.dev_handle = dev_handle;
2431
2432 return (XS_NOERROR);
2433 }
2434 #endif
2435
2436 int
2437 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2438 int nosleep)
2439 {
2440 struct mpii_msg_request *req = ccb->ccb_request;
2441 struct mfii_sge *sge = NULL, *nsge = sglp;
2442 struct mfii_sge *ce = NULL;
2443 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2444 u_int space;
2445 int i;
2446
2447 int error;
2448
2449 if (ccb->ccb_len == 0)
2450 return (0);
2451
2452 ccb->ccb_dma64 = true;
2453 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2454 ccb->ccb_data, ccb->ccb_len, NULL,
2455 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2456 if (error) {
2457 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2458 return (1);
2459 }
2460
2461 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2462 sizeof(*nsge);
2463 if (dmap->dm_nsegs > space) {
2464 space--;
2465
2466 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2467 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2468
2469 ce = nsge + space;
2470 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2471 ce->sg_len = htole32(ccb->ccb_sgl_len);
2472 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2473
2474 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2475 }
2476
2477 for (i = 0; i < dmap->dm_nsegs; i++) {
2478 if (nsge == ce)
2479 nsge = ccb->ccb_sgl;
2480
2481 sge = nsge;
2482
2483 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2484 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2485 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2486
2487 nsge = sge + 1;
2488 }
2489 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2490
2491 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2492 ccb->ccb_direction == MFII_DATA_OUT ?
2493 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2494
2495 if (ccb->ccb_sgl_len > 0) {
2496 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2497 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2498 BUS_DMASYNC_PREWRITE);
2499 }
2500
2501 return (0);
2502 }
2503
2504 void
2505 mfii_scsi_cmd_tmo(void *p)
2506 {
2507 struct mfii_ccb *ccb = p;
2508 struct mfii_softc *sc = ccb->ccb_sc;
2509 bool start_abort;
2510
2511 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2512
2513 mutex_enter(&sc->sc_abort_mtx);
2514 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2515 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2516 if (start_abort)
2517 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2518 mutex_exit(&sc->sc_abort_mtx);
2519 }
2520
2521 void
2522 mfii_abort_task(struct work *wk, void *scp)
2523 {
2524 struct mfii_softc *sc = scp;
2525 struct mfii_ccb *list;
2526
2527 mutex_enter(&sc->sc_abort_mtx);
2528 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2529 SIMPLEQ_INIT(&sc->sc_abort_list);
2530 mutex_exit(&sc->sc_abort_mtx);
2531
2532 while (list != NULL) {
2533 struct mfii_ccb *ccb = list;
2534 struct scsipi_xfer *xs = ccb->ccb_cookie;
2535 struct scsipi_periph *periph = xs->xs_periph;
2536 struct mfii_ccb *accb;
2537
2538 list = SIMPLEQ_NEXT(ccb, ccb_link);
2539
2540 if (!sc->sc_ld[periph->periph_target].ld_present) {
2541 /* device is gone */
2542 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2543 scsipi_done(xs);
2544 mfii_put_ccb(sc, ccb);
2545 }
2546 continue;
2547 }
2548
2549 accb = mfii_get_ccb(sc);
2550 mfii_scrub_ccb(accb);
2551 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2552 MPII_SCSI_TASK_ABORT_TASK,
2553 htole32(MFII_TASK_MGMT_FLAGS_PD));
2554
2555 accb->ccb_cookie = ccb;
2556 accb->ccb_done = mfii_scsi_cmd_abort_done;
2557
2558 mfii_start(sc, accb);
2559 }
2560 }
2561
2562 void
2563 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2564 uint16_t smid, uint8_t type, uint32_t flags)
2565 {
2566 struct mfii_task_mgmt *msg;
2567 struct mpii_msg_scsi_task_request *req;
2568
2569 msg = accb->ccb_request;
2570 req = &msg->mpii_request;
2571 req->dev_handle = dev_handle;
2572 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2573 req->task_type = type;
2574 req->task_mid = htole16( smid);
2575 msg->flags = flags;
2576
2577 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2578 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2579 }
2580
2581 void
2582 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2583 {
2584 struct mfii_ccb *ccb = accb->ccb_cookie;
2585 struct scsipi_xfer *xs = ccb->ccb_cookie;
2586
2587 /* XXX check accb completion? */
2588
2589 mfii_put_ccb(sc, accb);
2590
2591 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2592 xs->error = XS_TIMEOUT;
2593 scsipi_done(xs);
2594 mfii_put_ccb(sc, ccb);
2595 }
2596 }
2597
2598 struct mfii_ccb *
2599 mfii_get_ccb(struct mfii_softc *sc)
2600 {
2601 struct mfii_ccb *ccb;
2602
2603 mutex_enter(&sc->sc_ccb_mtx);
2604 if (!sc->sc_running) {
2605 ccb = NULL;
2606 } else {
2607 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2608 if (ccb != NULL)
2609 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2610 }
2611 mutex_exit(&sc->sc_ccb_mtx);
2612 return (ccb);
2613 }
2614
2615 void
2616 mfii_scrub_ccb(struct mfii_ccb *ccb)
2617 {
2618 ccb->ccb_cookie = NULL;
2619 ccb->ccb_done = NULL;
2620 ccb->ccb_flags = 0;
2621 ccb->ccb_data = NULL;
2622 ccb->ccb_direction = MFII_DATA_NONE;
2623 ccb->ccb_dma64 = false;
2624 ccb->ccb_len = 0;
2625 ccb->ccb_sgl_len = 0;
2626 ccb->ccb_refcnt = 1;
2627
2628 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2629 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2630 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2631 }
2632
2633 void
2634 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2635 {
2636 mutex_enter(&sc->sc_ccb_mtx);
2637 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2638 mutex_exit(&sc->sc_ccb_mtx);
2639 }
2640
2641 int
2642 mfii_init_ccb(struct mfii_softc *sc)
2643 {
2644 struct mfii_ccb *ccb;
2645 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2646 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2647 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2648 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2649 u_int i;
2650 int error;
2651
2652 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2653 M_DEVBUF, M_WAITOK|M_ZERO);
2654
2655 for (i = 0; i < sc->sc_max_cmds; i++) {
2656 ccb = &sc->sc_ccb[i];
2657 ccb->ccb_sc = sc;
2658
2659 /* create a dma map for transfer */
2660 error = bus_dmamap_create(sc->sc_dmat,
2661 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2662 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2663 if (error) {
2664 printf("%s: cannot create ccb dmamap32 (%d)\n",
2665 DEVNAME(sc), error);
2666 goto destroy;
2667 }
2668 error = bus_dmamap_create(sc->sc_dmat64,
2669 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2670 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2671 if (error) {
2672 printf("%s: cannot create ccb dmamap64 (%d)\n",
2673 DEVNAME(sc), error);
2674 goto destroy32;
2675 }
2676
2677 /* select i + 1'th request. 0 is reserved for events */
2678 ccb->ccb_smid = i + 1;
2679 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2680 ccb->ccb_request = request + ccb->ccb_request_offset;
2681 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2682 ccb->ccb_request_offset;
2683
2684 /* select i'th MFI command frame */
2685 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2686 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2687 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2688 ccb->ccb_mfi_offset;
2689
2690 /* select i'th sense */
2691 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2692 ccb->ccb_sense = (struct mfi_sense *)(sense +
2693 ccb->ccb_sense_offset);
2694 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2695 ccb->ccb_sense_offset;
2696
2697 /* select i'th sgl */
2698 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2699 sc->sc_max_sgl * i;
2700 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2701 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2702 ccb->ccb_sgl_offset;
2703
2704 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2705 cv_init(&ccb->ccb_cv, "mfiiexec");
2706
2707 /* add ccb to queue */
2708 mfii_put_ccb(sc, ccb);
2709 }
2710
2711 return (0);
2712
2713 destroy32:
2714 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2715 destroy:
2716 /* free dma maps and ccb memory */
2717 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2718 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2719 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2720 }
2721
2722 free(sc->sc_ccb, M_DEVBUF);
2723
2724 return (1);
2725 }
2726
2727 #if NBIO > 0
2728 int
2729 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2730 {
2731 struct mfii_softc *sc = device_private(dev);
2732 int error = 0;
2733
2734 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2735
2736 mutex_enter(&sc->sc_lock);
2737
2738 switch (cmd) {
2739 case BIOCINQ:
2740 DNPRINTF(MFII_D_IOCTL, "inq\n");
2741 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2742 break;
2743
2744 case BIOCVOL:
2745 DNPRINTF(MFII_D_IOCTL, "vol\n");
2746 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2747 break;
2748
2749 case BIOCDISK:
2750 DNPRINTF(MFII_D_IOCTL, "disk\n");
2751 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2752 break;
2753
2754 case BIOCALARM:
2755 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2756 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2757 break;
2758
2759 case BIOCBLINK:
2760 DNPRINTF(MFII_D_IOCTL, "blink\n");
2761 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2762 break;
2763
2764 case BIOCSETSTATE:
2765 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2766 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2767 break;
2768
2769 #if 0
2770 case BIOCPATROL:
2771 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2772 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2773 break;
2774 #endif
2775
2776 default:
2777 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2778 error = ENOTTY;
2779 }
2780
2781 mutex_exit(&sc->sc_lock);
2782
2783 return (error);
2784 }
2785
2786 int
2787 mfii_bio_getitall(struct mfii_softc *sc)
2788 {
2789 int i, d, rv = EINVAL;
2790 size_t size;
2791 union mfi_mbox mbox;
2792 struct mfi_conf *cfg = NULL;
2793 struct mfi_ld_details *ld_det = NULL;
2794
2795 /* get info */
2796 if (mfii_get_info(sc)) {
2797 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2798 DEVNAME(sc));
2799 goto done;
2800 }
2801
2802 /* send single element command to retrieve size for full structure */
2803 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2804 if (cfg == NULL)
2805 goto done;
2806 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2807 MFII_DATA_IN, false)) {
2808 free(cfg, M_DEVBUF);
2809 goto done;
2810 }
2811
2812 size = cfg->mfc_size;
2813 free(cfg, M_DEVBUF);
2814
2815 /* memory for read config */
2816 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2817 if (cfg == NULL)
2818 goto done;
2819 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2820 MFII_DATA_IN, false)) {
2821 free(cfg, M_DEVBUF);
2822 goto done;
2823 }
2824
2825 /* replace current pointer with new one */
2826 if (sc->sc_cfg)
2827 free(sc->sc_cfg, M_DEVBUF);
2828 sc->sc_cfg = cfg;
2829
2830 /* get all ld info */
2831 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2832 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2833 goto done;
2834
2835 /* get memory for all ld structures */
2836 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2837 if (sc->sc_ld_sz != size) {
2838 if (sc->sc_ld_details)
2839 free(sc->sc_ld_details, M_DEVBUF);
2840
2841 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2842 if (ld_det == NULL)
2843 goto done;
2844 sc->sc_ld_sz = size;
2845 sc->sc_ld_details = ld_det;
2846 }
2847
2848 /* find used physical disks */
2849 size = sizeof(struct mfi_ld_details);
2850 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2851 memset(&mbox, 0, sizeof(mbox));
2852 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2853 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2854 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2855 goto done;
2856
2857 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2858 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2859 }
2860 sc->sc_no_pd = d;
2861
2862 rv = 0;
2863 done:
2864 return (rv);
2865 }
2866
2867 int
2868 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2869 {
2870 int rv = EINVAL;
2871 struct mfi_conf *cfg = NULL;
2872
2873 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2874
2875 if (mfii_bio_getitall(sc)) {
2876 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2877 DEVNAME(sc));
2878 goto done;
2879 }
2880
2881 /* count unused disks as volumes */
2882 if (sc->sc_cfg == NULL)
2883 goto done;
2884 cfg = sc->sc_cfg;
2885
2886 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2887 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2888 #if notyet
2889 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2890 (bi->bi_nodisk - sc->sc_no_pd);
2891 #endif
2892 /* tell bio who we are */
2893 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2894
2895 rv = 0;
2896 done:
2897 return (rv);
2898 }
2899
2900 int
2901 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2902 {
2903 int i, per, rv = EINVAL;
2904
2905 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2906 DEVNAME(sc), bv->bv_volid);
2907
2908 /* we really could skip and expect that inq took care of it */
2909 if (mfii_bio_getitall(sc)) {
2910 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2911 DEVNAME(sc));
2912 goto done;
2913 }
2914
2915 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2916 /* go do hotspares & unused disks */
2917 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2918 goto done;
2919 }
2920
2921 i = bv->bv_volid;
2922 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2923 sizeof(bv->bv_dev));
2924
2925 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2926 case MFI_LD_OFFLINE:
2927 bv->bv_status = BIOC_SVOFFLINE;
2928 break;
2929
2930 case MFI_LD_PART_DEGRADED:
2931 case MFI_LD_DEGRADED:
2932 bv->bv_status = BIOC_SVDEGRADED;
2933 break;
2934
2935 case MFI_LD_ONLINE:
2936 bv->bv_status = BIOC_SVONLINE;
2937 break;
2938
2939 default:
2940 bv->bv_status = BIOC_SVINVALID;
2941 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2942 DEVNAME(sc),
2943 sc->sc_ld_list.mll_list[i].mll_state);
2944 }
2945
2946 /* additional status can modify MFI status */
2947 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2948 case MFI_LD_PROG_CC:
2949 case MFI_LD_PROG_BGI:
2950 bv->bv_status = BIOC_SVSCRUB;
2951 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2952 bv->bv_percent = (per * 100) / 0xffff;
2953 bv->bv_seconds =
2954 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2955 break;
2956
2957 case MFI_LD_PROG_FGI:
2958 case MFI_LD_PROG_RECONSTRUCT:
2959 /* nothing yet */
2960 break;
2961 }
2962
2963 #if 0
2964 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2965 bv->bv_cache = BIOC_CVWRITEBACK;
2966 else
2967 bv->bv_cache = BIOC_CVWRITETHROUGH;
2968 #endif
2969
2970 /*
2971 * The RAID levels are determined per the SNIA DDF spec, this is only
2972 * a subset that is valid for the MFI controller.
2973 */
2974 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2975 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2976 bv->bv_level *= 10;
2977
2978 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2979 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2980
2981 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2982
2983 rv = 0;
2984 done:
2985 return (rv);
2986 }
2987
2988 int
2989 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2990 {
2991 struct mfi_conf *cfg;
2992 struct mfi_array *ar;
2993 struct mfi_ld_cfg *ld;
2994 struct mfi_pd_details *pd;
2995 struct mfi_pd_list *pl;
2996 struct scsipi_inquiry_data *inqbuf;
2997 char vend[8+16+4+1], *vendp;
2998 int i, rv = EINVAL;
2999 int arr, vol, disk, span;
3000 union mfi_mbox mbox;
3001
3002 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3003 DEVNAME(sc), bd->bd_diskid);
3004
3005 /* we really could skip and expect that inq took care of it */
3006 if (mfii_bio_getitall(sc)) {
3007 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3008 DEVNAME(sc));
3009 return (rv);
3010 }
3011 cfg = sc->sc_cfg;
3012
3013 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3014 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3015
3016 ar = cfg->mfc_array;
3017 vol = bd->bd_volid;
3018 if (vol >= cfg->mfc_no_ld) {
3019 /* do hotspares */
3020 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3021 goto freeme;
3022 }
3023
3024 /* calculate offset to ld structure */
3025 ld = (struct mfi_ld_cfg *)(
3026 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3027 cfg->mfc_array_size * cfg->mfc_no_array);
3028
3029 /* use span 0 only when raid group is not spanned */
3030 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3031 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3032 else
3033 span = 0;
3034 arr = ld[vol].mlc_span[span].mls_index;
3035
3036 /* offset disk into pd list */
3037 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3038
3039 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3040 /* disk is missing but succeed command */
3041 bd->bd_status = BIOC_SDFAILED;
3042 rv = 0;
3043
3044 /* try to find an unused disk for the target to rebuild */
3045 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3046 MFII_DATA_IN, false))
3047 goto freeme;
3048
3049 for (i = 0; i < pl->mpl_no_pd; i++) {
3050 if (pl->mpl_address[i].mpa_scsi_type != 0)
3051 continue;
3052
3053 memset(&mbox, 0, sizeof(mbox));
3054 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3055 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3056 pd, sizeof(*pd), MFII_DATA_IN, false))
3057 continue;
3058
3059 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3060 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3061 break;
3062 }
3063
3064 if (i == pl->mpl_no_pd)
3065 goto freeme;
3066 } else {
3067 memset(&mbox, 0, sizeof(mbox));
3068 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3069 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3070 MFII_DATA_IN, false)) {
3071 bd->bd_status = BIOC_SDINVALID;
3072 goto freeme;
3073 }
3074 }
3075
3076 /* get the remaining fields */
3077 bd->bd_channel = pd->mpd_enc_idx;
3078 bd->bd_target = pd->mpd_enc_slot;
3079
3080 /* get status */
3081 switch (pd->mpd_fw_state){
3082 case MFI_PD_UNCONFIG_GOOD:
3083 case MFI_PD_UNCONFIG_BAD:
3084 bd->bd_status = BIOC_SDUNUSED;
3085 break;
3086
3087 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3088 bd->bd_status = BIOC_SDHOTSPARE;
3089 break;
3090
3091 case MFI_PD_OFFLINE:
3092 bd->bd_status = BIOC_SDOFFLINE;
3093 break;
3094
3095 case MFI_PD_FAILED:
3096 bd->bd_status = BIOC_SDFAILED;
3097 break;
3098
3099 case MFI_PD_REBUILD:
3100 bd->bd_status = BIOC_SDREBUILD;
3101 break;
3102
3103 case MFI_PD_ONLINE:
3104 bd->bd_status = BIOC_SDONLINE;
3105 break;
3106
3107 case MFI_PD_COPYBACK:
3108 case MFI_PD_SYSTEM:
3109 default:
3110 bd->bd_status = BIOC_SDINVALID;
3111 break;
3112 }
3113
3114 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3115
3116 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3117 vendp = inqbuf->vendor;
3118 memcpy(vend, vendp, sizeof vend - 1);
3119 vend[sizeof vend - 1] = '\0';
3120 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3121
3122 /* XXX find a way to retrieve serial nr from drive */
3123 /* XXX find a way to get bd_procdev */
3124
3125 #if 0
3126 mfp = &pd->mpd_progress;
3127 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3128 mp = &mfp->mfp_patrol_read;
3129 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3130 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3131 }
3132 #endif
3133
3134 rv = 0;
3135 freeme:
3136 free(pd, M_DEVBUF);
3137 free(pl, M_DEVBUF);
3138
3139 return (rv);
3140 }
3141
3142 int
3143 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3144 {
3145 uint32_t opc;
3146 int rv = 0;
3147 int8_t ret;
3148 mfii_direction_t dir = MFII_DATA_NONE;
3149
3150 switch(ba->ba_opcode) {
3151 case BIOC_SADISABLE:
3152 opc = MR_DCMD_SPEAKER_DISABLE;
3153 break;
3154
3155 case BIOC_SAENABLE:
3156 opc = MR_DCMD_SPEAKER_ENABLE;
3157 break;
3158
3159 case BIOC_SASILENCE:
3160 opc = MR_DCMD_SPEAKER_SILENCE;
3161 break;
3162
3163 case BIOC_GASTATUS:
3164 opc = MR_DCMD_SPEAKER_GET;
3165 dir = MFII_DATA_IN;
3166 break;
3167
3168 case BIOC_SATEST:
3169 opc = MR_DCMD_SPEAKER_TEST;
3170 break;
3171
3172 default:
3173 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3174 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3175 return (EINVAL);
3176 }
3177
3178 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3179 rv = EINVAL;
3180 else
3181 if (ba->ba_opcode == BIOC_GASTATUS)
3182 ba->ba_status = ret;
3183 else
3184 ba->ba_status = 0;
3185
3186 return (rv);
3187 }
3188
3189 int
3190 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3191 {
3192 int i, found, rv = EINVAL;
3193 union mfi_mbox mbox;
3194 uint32_t cmd;
3195 struct mfi_pd_list *pd;
3196
3197 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3198 bb->bb_status);
3199
3200 /* channel 0 means not in an enclosure so can't be blinked */
3201 if (bb->bb_channel == 0)
3202 return (EINVAL);
3203
3204 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3205
3206 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3207 MFII_DATA_IN, false))
3208 goto done;
3209
3210 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3211 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3212 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3213 found = 1;
3214 break;
3215 }
3216
3217 if (!found)
3218 goto done;
3219
3220 memset(&mbox, 0, sizeof(mbox));
3221 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3222
3223 switch (bb->bb_status) {
3224 case BIOC_SBUNBLINK:
3225 cmd = MR_DCMD_PD_UNBLINK;
3226 break;
3227
3228 case BIOC_SBBLINK:
3229 cmd = MR_DCMD_PD_BLINK;
3230 break;
3231
3232 case BIOC_SBALARM:
3233 default:
3234 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3235 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3236 goto done;
3237 }
3238
3239
3240 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3241 goto done;
3242
3243 rv = 0;
3244 done:
3245 free(pd, M_DEVBUF);
3246 return (rv);
3247 }
3248
3249 static int
3250 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3251 {
3252 struct mfii_foreign_scan_info *fsi;
3253 struct mfi_pd_details *pd;
3254 union mfi_mbox mbox;
3255 int rv;
3256
3257 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3258 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3259
3260 memset(&mbox, 0, sizeof mbox);
3261 mbox.s[0] = pd_id;
3262 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3263 MFII_DATA_IN, false);
3264 if (rv != 0)
3265 goto done;
3266
3267 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3268 mbox.s[0] = pd_id;
3269 mbox.s[1] = pd->mpd_pd.mfp_seq;
3270 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3271 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3272 MFII_DATA_NONE, false);
3273 if (rv != 0)
3274 goto done;
3275 }
3276
3277 memset(&mbox, 0, sizeof mbox);
3278 mbox.s[0] = pd_id;
3279 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3280 MFII_DATA_IN, false);
3281 if (rv != 0)
3282 goto done;
3283
3284 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3285 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3286 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3287 if (rv != 0)
3288 goto done;
3289
3290 if (fsi->count > 0) {
3291 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3292 NULL, 0, MFII_DATA_NONE, false);
3293 if (rv != 0)
3294 goto done;
3295 }
3296 }
3297
3298 memset(&mbox, 0, sizeof mbox);
3299 mbox.s[0] = pd_id;
3300 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3301 MFII_DATA_IN, false);
3302 if (rv != 0)
3303 goto done;
3304
3305 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3306 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3307 rv = ENXIO;
3308
3309 done:
3310 free(fsi, M_DEVBUF);
3311 free(pd, M_DEVBUF);
3312
3313 return (rv);
3314 }
3315
3316 static int
3317 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3318 {
3319 struct mfi_hotspare *hs;
3320 struct mfi_pd_details *pd;
3321 union mfi_mbox mbox;
3322 size_t size;
3323 int rv = EINVAL;
3324
3325 /* we really could skip and expect that inq took care of it */
3326 if (mfii_bio_getitall(sc)) {
3327 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3328 DEVNAME(sc));
3329 return (rv);
3330 }
3331 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3332
3333 hs = malloc(size, M_DEVBUF, M_WAITOK);
3334 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3335
3336 memset(&mbox, 0, sizeof mbox);
3337 mbox.s[0] = pd_id;
3338 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3339 MFII_DATA_IN, false);
3340 if (rv != 0)
3341 goto done;
3342
3343 memset(hs, 0, size);
3344 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3345 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3346 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3347 MFII_DATA_OUT, false);
3348
3349 done:
3350 free(hs, M_DEVBUF);
3351 free(pd, M_DEVBUF);
3352
3353 return (rv);
3354 }
3355
3356 int
3357 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3358 {
3359 struct mfi_pd_details *pd;
3360 struct mfi_pd_list *pl;
3361 int i, found, rv = EINVAL;
3362 union mfi_mbox mbox;
3363
3364 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3365 bs->bs_status);
3366
3367 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3368 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3369
3370 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3371 MFII_DATA_IN, false))
3372 goto done;
3373
3374 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3375 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3376 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3377 found = 1;
3378 break;
3379 }
3380
3381 if (!found)
3382 goto done;
3383
3384 memset(&mbox, 0, sizeof(mbox));
3385 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3386
3387 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3388 MFII_DATA_IN, false))
3389 goto done;
3390
3391 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3392 mbox.s[1] = pd->mpd_pd.mfp_seq;
3393
3394 switch (bs->bs_status) {
3395 case BIOC_SSONLINE:
3396 mbox.b[4] = MFI_PD_ONLINE;
3397 break;
3398
3399 case BIOC_SSOFFLINE:
3400 mbox.b[4] = MFI_PD_OFFLINE;
3401 break;
3402
3403 case BIOC_SSHOTSPARE:
3404 mbox.b[4] = MFI_PD_HOTSPARE;
3405 break;
3406
3407 case BIOC_SSREBUILD:
3408 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3409 if ((rv = mfii_makegood(sc,
3410 pl->mpl_address[i].mpa_pd_id)))
3411 goto done;
3412
3413 if ((rv = mfii_makespare(sc,
3414 pl->mpl_address[i].mpa_pd_id)))
3415 goto done;
3416
3417 memset(&mbox, 0, sizeof(mbox));
3418 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3419 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3420 pd, sizeof(*pd), MFII_DATA_IN, false);
3421 if (rv != 0)
3422 goto done;
3423
3424 /* rebuilding might be started by mfii_makespare() */
3425 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3426 rv = 0;
3427 goto done;
3428 }
3429
3430 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3431 mbox.s[1] = pd->mpd_pd.mfp_seq;
3432 }
3433 mbox.b[4] = MFI_PD_REBUILD;
3434 break;
3435
3436 default:
3437 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3438 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3439 goto done;
3440 }
3441
3442
3443 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3444 MFII_DATA_NONE, false);
3445 done:
3446 free(pd, M_DEVBUF);
3447 free(pl, M_DEVBUF);
3448 return (rv);
3449 }
3450
3451 #if 0
3452 int
3453 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3454 {
3455 uint32_t opc;
3456 int rv = 0;
3457 struct mfi_pr_properties prop;
3458 struct mfi_pr_status status;
3459 uint32_t time, exec_freq;
3460
3461 switch (bp->bp_opcode) {
3462 case BIOC_SPSTOP:
3463 case BIOC_SPSTART:
3464 if (bp->bp_opcode == BIOC_SPSTART)
3465 opc = MR_DCMD_PR_START;
3466 else
3467 opc = MR_DCMD_PR_STOP;
3468 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3469 return (EINVAL);
3470 break;
3471
3472 case BIOC_SPMANUAL:
3473 case BIOC_SPDISABLE:
3474 case BIOC_SPAUTO:
3475 /* Get device's time. */
3476 opc = MR_DCMD_TIME_SECS_GET;
3477 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3478 MFII_DATA_IN, false))
3479 return (EINVAL);
3480
3481 opc = MR_DCMD_PR_GET_PROPERTIES;
3482 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3483 MFII_DATA_IN, false))
3484 return (EINVAL);
3485
3486 switch (bp->bp_opcode) {
3487 case BIOC_SPMANUAL:
3488 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3489 break;
3490 case BIOC_SPDISABLE:
3491 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3492 break;
3493 case BIOC_SPAUTO:
3494 if (bp->bp_autoival != 0) {
3495 if (bp->bp_autoival == -1)
3496 /* continuously */
3497 exec_freq = 0xffffffffU;
3498 else if (bp->bp_autoival > 0)
3499 exec_freq = bp->bp_autoival;
3500 else
3501 return (EINVAL);
3502 prop.exec_freq = exec_freq;
3503 }
3504 if (bp->bp_autonext != 0) {
3505 if (bp->bp_autonext < 0)
3506 return (EINVAL);
3507 else
3508 prop.next_exec = time + bp->bp_autonext;
3509 }
3510 prop.op_mode = MFI_PR_OPMODE_AUTO;
3511 break;
3512 }
3513
3514 opc = MR_DCMD_PR_SET_PROPERTIES;
3515 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3516 MFII_DATA_OUT, false))
3517 return (EINVAL);
3518
3519 break;
3520
3521 case BIOC_GPSTATUS:
3522 opc = MR_DCMD_PR_GET_PROPERTIES;
3523 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3524 MFII_DATA_IN, false))
3525 return (EINVAL);
3526
3527 opc = MR_DCMD_PR_GET_STATUS;
3528 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3529 MFII_DATA_IN, false))
3530 return (EINVAL);
3531
3532 /* Get device's time. */
3533 opc = MR_DCMD_TIME_SECS_GET;
3534 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3535 MFII_DATA_IN, false))
3536 return (EINVAL);
3537
3538 switch (prop.op_mode) {
3539 case MFI_PR_OPMODE_AUTO:
3540 bp->bp_mode = BIOC_SPMAUTO;
3541 bp->bp_autoival = prop.exec_freq;
3542 bp->bp_autonext = prop.next_exec;
3543 bp->bp_autonow = time;
3544 break;
3545 case MFI_PR_OPMODE_MANUAL:
3546 bp->bp_mode = BIOC_SPMMANUAL;
3547 break;
3548 case MFI_PR_OPMODE_DISABLED:
3549 bp->bp_mode = BIOC_SPMDISABLED;
3550 break;
3551 default:
3552 printf("%s: unknown patrol mode %d\n",
3553 DEVNAME(sc), prop.op_mode);
3554 break;
3555 }
3556
3557 switch (status.state) {
3558 case MFI_PR_STATE_STOPPED:
3559 bp->bp_status = BIOC_SPSSTOPPED;
3560 break;
3561 case MFI_PR_STATE_READY:
3562 bp->bp_status = BIOC_SPSREADY;
3563 break;
3564 case MFI_PR_STATE_ACTIVE:
3565 bp->bp_status = BIOC_SPSACTIVE;
3566 break;
3567 case MFI_PR_STATE_ABORTED:
3568 bp->bp_status = BIOC_SPSABORTED;
3569 break;
3570 default:
3571 printf("%s: unknown patrol state %d\n",
3572 DEVNAME(sc), status.state);
3573 break;
3574 }
3575
3576 break;
3577
3578 default:
3579 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3580 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3581 return (EINVAL);
3582 }
3583
3584 return (rv);
3585 }
3586 #endif
3587
3588 int
3589 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3590 {
3591 struct mfi_conf *cfg;
3592 struct mfi_hotspare *hs;
3593 struct mfi_pd_details *pd;
3594 struct bioc_disk *sdhs;
3595 struct bioc_vol *vdhs;
3596 struct scsipi_inquiry_data *inqbuf;
3597 char vend[8+16+4+1], *vendp;
3598 int i, rv = EINVAL;
3599 uint32_t size;
3600 union mfi_mbox mbox;
3601
3602 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3603
3604 if (!bio_hs)
3605 return (EINVAL);
3606
3607 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3608
3609 /* send single element command to retrieve size for full structure */
3610 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3611 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3612 MFII_DATA_IN, false))
3613 goto freeme;
3614
3615 size = cfg->mfc_size;
3616 free(cfg, M_DEVBUF);
3617
3618 /* memory for read config */
3619 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3620 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3621 MFII_DATA_IN, false))
3622 goto freeme;
3623
3624 /* calculate offset to hs structure */
3625 hs = (struct mfi_hotspare *)(
3626 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3627 cfg->mfc_array_size * cfg->mfc_no_array +
3628 cfg->mfc_ld_size * cfg->mfc_no_ld);
3629
3630 if (volid < cfg->mfc_no_ld)
3631 goto freeme; /* not a hotspare */
3632
3633 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3634 goto freeme; /* not a hotspare */
3635
3636 /* offset into hotspare structure */
3637 i = volid - cfg->mfc_no_ld;
3638
3639 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3640 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3641 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3642
3643 /* get pd fields */
3644 memset(&mbox, 0, sizeof(mbox));
3645 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3646 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3647 MFII_DATA_IN, false)) {
3648 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3649 DEVNAME(sc));
3650 goto freeme;
3651 }
3652
3653 switch (type) {
3654 case MFI_MGMT_VD:
3655 vdhs = bio_hs;
3656 vdhs->bv_status = BIOC_SVONLINE;
3657 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3658 vdhs->bv_level = -1; /* hotspare */
3659 vdhs->bv_nodisk = 1;
3660 break;
3661
3662 case MFI_MGMT_SD:
3663 sdhs = bio_hs;
3664 sdhs->bd_status = BIOC_SDHOTSPARE;
3665 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3666 sdhs->bd_channel = pd->mpd_enc_idx;
3667 sdhs->bd_target = pd->mpd_enc_slot;
3668 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3669 vendp = inqbuf->vendor;
3670 memcpy(vend, vendp, sizeof vend - 1);
3671 vend[sizeof vend - 1] = '\0';
3672 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3673 break;
3674
3675 default:
3676 goto freeme;
3677 }
3678
3679 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3680 rv = 0;
3681 freeme:
3682 free(pd, M_DEVBUF);
3683 free(cfg, M_DEVBUF);
3684
3685 return (rv);
3686 }
3687
3688 #endif /* NBIO > 0 */
3689
3690 #define MFI_BBU_SENSORS 4
3691
3692 void
3693 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3694 {
3695 struct mfi_bbu_status bbu;
3696 u_int32_t status;
3697 u_int32_t mask;
3698 u_int32_t soh_bad;
3699 int rv;
3700
3701 mutex_enter(&sc->sc_lock);
3702 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3703 sizeof(bbu), MFII_DATA_IN, false);
3704 mutex_exit(&sc->sc_lock);
3705 if (rv != 0) {
3706 edata->state = ENVSYS_SINVALID;
3707 edata->value_cur = 0;
3708 return;
3709 }
3710
3711 switch (bbu.battery_type) {
3712 case MFI_BBU_TYPE_IBBU:
3713 mask = MFI_BBU_STATE_BAD_IBBU;
3714 soh_bad = 0;
3715 break;
3716 case MFI_BBU_TYPE_BBU:
3717 mask = MFI_BBU_STATE_BAD_BBU;
3718 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3719 break;
3720
3721 case MFI_BBU_TYPE_NONE:
3722 default:
3723 edata->state = ENVSYS_SCRITICAL;
3724 edata->value_cur = 0;
3725 return;
3726 }
3727
3728 status = le32toh(bbu.fw_status) & mask;
3729 switch(edata->sensor) {
3730 case 0:
3731 edata->value_cur = (status || soh_bad) ? 0 : 1;
3732 edata->state =
3733 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3734 return;
3735 case 1:
3736 edata->value_cur = le16toh(bbu.voltage) * 1000;
3737 edata->state = ENVSYS_SVALID;
3738 return;
3739 case 2:
3740 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3741 edata->state = ENVSYS_SVALID;
3742 return;
3743 case 3:
3744 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3745 edata->state = ENVSYS_SVALID;
3746 return;
3747 }
3748 }
3749
3750 void
3751 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3752 {
3753 struct bioc_vol bv;
3754 int error;
3755
3756 memset(&bv, 0, sizeof(bv));
3757 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3758 mutex_enter(&sc->sc_lock);
3759 error = mfii_ioctl_vol(sc, &bv);
3760 mutex_exit(&sc->sc_lock);
3761 if (error)
3762 bv.bv_status = BIOC_SVINVALID;
3763 bio_vol_to_envsys(edata, &bv);
3764 }
3765
3766 void
3767 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3768 {
3769 sensor->units = ENVSYS_DRIVE;
3770 sensor->state = ENVSYS_SINVALID;
3771 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3772 /* Enable monitoring for drive state changes */
3773 sensor->flags |= ENVSYS_FMONSTCHANGED;
3774 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3775 }
3776
3777 static void
3778 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3779 {
3780 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3781 aprint_error_dev(sc->sc_dev,
3782 "failed to attach sensor %s\n", s->desc);
3783 }
3784
3785 int
3786 mfii_create_sensors(struct mfii_softc *sc)
3787 {
3788 int i, rv;
3789 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3790
3791 sc->sc_sme = sysmon_envsys_create();
3792 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3793 M_DEVBUF, M_NOWAIT | M_ZERO);
3794
3795 if (sc->sc_sensors == NULL) {
3796 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3797 return ENOMEM;
3798 }
3799 /* BBU */
3800 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3801 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3802 sc->sc_sensors[0].value_cur = 0;
3803 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3804 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3805 sc->sc_sensors[1].value_cur = 0;
3806 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3807 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3808 sc->sc_sensors[2].value_cur = 0;
3809 sc->sc_sensors[3].units = ENVSYS_STEMP;
3810 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3811 sc->sc_sensors[3].value_cur = 0;
3812
3813 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3814 sc->sc_bbuok = true;
3815 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3816 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3817 "%s BBU state", DEVNAME(sc));
3818 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3819 "%s BBU voltage", DEVNAME(sc));
3820 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3821 "%s BBU current", DEVNAME(sc));
3822 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3823 "%s BBU temperature", DEVNAME(sc));
3824 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3825 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3826 }
3827 }
3828
3829 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3830 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3831 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3832 }
3833
3834 sc->sc_sme->sme_name = DEVNAME(sc);
3835 sc->sc_sme->sme_cookie = sc;
3836 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3837 rv = sysmon_envsys_register(sc->sc_sme);
3838 if (rv) {
3839 aprint_error_dev(sc->sc_dev,
3840 "unable to register with sysmon (rv = %d)\n", rv);
3841 }
3842 return rv;
3843
3844 }
3845
3846 static int
3847 mfii_destroy_sensors(struct mfii_softc *sc)
3848 {
3849 if (sc->sc_sme == NULL)
3850 return 0;
3851 sysmon_envsys_unregister(sc->sc_sme);
3852 sc->sc_sme = NULL;
3853 free(sc->sc_sensors, M_DEVBUF);
3854 return 0;
3855 }
3856
3857 void
3858 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3859 {
3860 struct mfii_softc *sc = sme->sme_cookie;
3861
3862 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3863 return;
3864
3865 if (edata->sensor < MFI_BBU_SENSORS) {
3866 if (sc->sc_bbuok)
3867 mfii_bbu(sc, edata);
3868 } else {
3869 mfii_refresh_ld_sensor(sc, edata);
3870 }
3871 }
3872