mfii.c revision 1.3 1 /* $NetBSD: mfii.c,v 1.3 2018/12/03 22:34:36 bouyer Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer (at) lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.3 2018/12/03 22:34:36 bouyer Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 struct mfii_dmamem {
210 bus_dmamap_t mdm_map;
211 bus_dma_segment_t mdm_seg;
212 size_t mdm_size;
213 void * mdm_kva;
214 };
215 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
216 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
217 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
218 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
219
220 struct mfii_softc;
221
222 typedef enum mfii_direction {
223 MFII_DATA_NONE = 0,
224 MFII_DATA_IN,
225 MFII_DATA_OUT
226 } mfii_direction_t;
227
228 struct mfii_ccb {
229 struct mfii_softc *ccb_sc;
230 void *ccb_request;
231 u_int64_t ccb_request_dva;
232 bus_addr_t ccb_request_offset;
233
234 void *ccb_mfi;
235 u_int64_t ccb_mfi_dva;
236 bus_addr_t ccb_mfi_offset;
237
238 struct mfi_sense *ccb_sense;
239 u_int64_t ccb_sense_dva;
240 bus_addr_t ccb_sense_offset;
241
242 struct mfii_sge *ccb_sgl;
243 u_int64_t ccb_sgl_dva;
244 bus_addr_t ccb_sgl_offset;
245 u_int ccb_sgl_len;
246
247 struct mfii_request_descr ccb_req;
248
249 bus_dmamap_t ccb_dmamap64;
250 bus_dmamap_t ccb_dmamap32;
251 bool ccb_dma64;
252
253 /* data for sgl */
254 void *ccb_data;
255 size_t ccb_len;
256
257 mfii_direction_t ccb_direction;
258
259 void *ccb_cookie;
260 kmutex_t ccb_mtx;
261 kcondvar_t ccb_cv;
262 void (*ccb_done)(struct mfii_softc *,
263 struct mfii_ccb *);
264
265 u_int32_t ccb_flags;
266 #define MFI_CCB_F_ERR (1<<0)
267 u_int ccb_smid;
268 u_int ccb_refcnt;
269 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
270 };
271 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
272
273 struct mfii_iop {
274 int bar;
275 int num_sge_loc;
276 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
277 #define MFII_IOP_NUM_SGE_LOC_35 1
278 u_int16_t ldio_ctx_reg_lock_flags;
279 u_int8_t ldio_req_type;
280 u_int8_t ldio_ctx_type_nseg;
281 u_int8_t sge_flag_chain;
282 u_int8_t sge_flag_eol;
283 };
284
285 struct mfii_softc {
286 device_t sc_dev;
287 struct scsipi_channel sc_chan;
288 struct scsipi_adapter sc_adapt;
289
290 const struct mfii_iop *sc_iop;
291
292 pci_chipset_tag_t sc_pc;
293 pcitag_t sc_tag;
294
295 bus_space_tag_t sc_iot;
296 bus_space_handle_t sc_ioh;
297 bus_size_t sc_ios;
298 bus_dma_tag_t sc_dmat;
299 bus_dma_tag_t sc_dmat64;
300 bool sc_64bit_dma;
301
302 void *sc_ih;
303
304 kmutex_t sc_ccb_mtx;
305 kmutex_t sc_post_mtx;
306
307 u_int sc_max_fw_cmds;
308 u_int sc_max_cmds;
309 u_int sc_max_sgl;
310
311 u_int sc_reply_postq_depth;
312 u_int sc_reply_postq_index;
313 kmutex_t sc_reply_postq_mtx;
314 struct mfii_dmamem *sc_reply_postq;
315
316 struct mfii_dmamem *sc_requests;
317 struct mfii_dmamem *sc_mfi;
318 struct mfii_dmamem *sc_sense;
319 struct mfii_dmamem *sc_sgl;
320
321 struct mfii_ccb *sc_ccb;
322 struct mfii_ccb_list sc_ccb_freeq;
323
324 struct mfii_ccb *sc_aen_ccb;
325 struct workqueue *sc_aen_wq;
326 struct work sc_aen_work;
327
328 kmutex_t sc_abort_mtx;
329 struct mfii_ccb_list sc_abort_list;
330 struct workqueue *sc_abort_wq;
331 struct work sc_abort_work;
332
333 /* save some useful information for logical drives that is missing
334 * in sc_ld_list
335 */
336 struct {
337 bool ld_present;
338 char ld_dev[16]; /* device name sd? */
339 } sc_ld[MFI_MAX_LD];
340 int sc_target_lds[MFI_MAX_LD];
341
342 /* bio */
343 struct mfi_conf *sc_cfg;
344 struct mfi_ctrl_info sc_info;
345 struct mfi_ld_list sc_ld_list;
346 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
347 int sc_no_pd; /* used physical disks */
348 int sc_ld_sz; /* sizeof sc_ld_details */
349
350 /* mgmt lock */
351 kmutex_t sc_lock;
352 bool sc_running;
353
354 /* sensors */
355 struct sysmon_envsys *sc_sme;
356 envsys_data_t *sc_sensors;
357 bool sc_bbuok;
358
359 device_t sc_child;
360 };
361
362 // #define MFII_DEBUG
363 #ifdef MFII_DEBUG
364 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
365 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
366 #define MFII_D_CMD 0x0001
367 #define MFII_D_INTR 0x0002
368 #define MFII_D_MISC 0x0004
369 #define MFII_D_DMA 0x0008
370 #define MFII_D_IOCTL 0x0010
371 #define MFII_D_RW 0x0020
372 #define MFII_D_MEM 0x0040
373 #define MFII_D_CCB 0x0080
374 uint32_t mfii_debug = 0
375 /* | MFII_D_CMD */
376 /* | MFII_D_INTR */
377 | MFII_D_MISC
378 /* | MFII_D_DMA */
379 /* | MFII_D_IOCTL */
380 /* | MFII_D_RW */
381 /* | MFII_D_MEM */
382 /* | MFII_D_CCB */
383 ;
384 #else
385 #define DPRINTF(x...)
386 #define DNPRINTF(n,x...)
387 #endif
388
389 int mfii_match(device_t, cfdata_t, void *);
390 void mfii_attach(device_t, device_t, void *);
391 int mfii_detach(device_t, int);
392 int mfii_rescan(device_t, const char *, const int *);
393 void mfii_childdetached(device_t, device_t);
394 static bool mfii_suspend(device_t, const pmf_qual_t *);
395 static bool mfii_resume(device_t, const pmf_qual_t *);
396 static bool mfii_shutdown(device_t, int);
397
398
399 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
400 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
401 mfii_childdetached, DVF_DETACH_SHUTDOWN);
402
403 void mfii_scsipi_request(struct scsipi_channel *,
404 scsipi_adapter_req_t, void *);
405 void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
406
407 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
408
409 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
410 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
411
412 struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
413 void mfii_dmamem_free(struct mfii_softc *,
414 struct mfii_dmamem *);
415
416 struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
417 void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
418 int mfii_init_ccb(struct mfii_softc *);
419 void mfii_scrub_ccb(struct mfii_ccb *);
420
421 int mfii_transition_firmware(struct mfii_softc *);
422 int mfii_initialise_firmware(struct mfii_softc *);
423 int mfii_get_info(struct mfii_softc *);
424
425 void mfii_start(struct mfii_softc *, struct mfii_ccb *);
426 void mfii_done(struct mfii_softc *, struct mfii_ccb *);
427 int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
428 void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
429 int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
430 void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
431 int mfii_my_intr(struct mfii_softc *);
432 int mfii_intr(void *);
433 void mfii_postq(struct mfii_softc *);
434
435 int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
436 void *, int);
437 int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
438 void *, int);
439
440 int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
441
442 int mfii_mgmt(struct mfii_softc *, uint32_t,
443 const union mfi_mbox *, void *, size_t,
444 mfii_direction_t, bool);
445 int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
446 uint32_t, const union mfi_mbox *, void *, size_t,
447 mfii_direction_t, bool);
448 void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
449
450 int mfii_scsi_cmd_io(struct mfii_softc *,
451 struct mfii_ccb *, struct scsipi_xfer *);
452 int mfii_scsi_cmd_cdb(struct mfii_softc *,
453 struct mfii_ccb *, struct scsipi_xfer *);
454 void mfii_scsi_cmd_tmo(void *);
455
456 int mfii_dev_handles_update(struct mfii_softc *sc);
457 void mfii_dev_handles_dtor(void *, void *);
458
459 void mfii_abort_task(struct work *, void *);
460 void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
461 uint16_t, uint16_t, uint8_t, uint32_t);
462 void mfii_scsi_cmd_abort_done(struct mfii_softc *,
463 struct mfii_ccb *);
464
465 int mfii_aen_register(struct mfii_softc *);
466 void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
467 struct mfii_dmamem *, uint32_t);
468 void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
469 void mfii_aen(struct work *, void *);
470 void mfii_aen_unregister(struct mfii_softc *);
471
472 void mfii_aen_pd_insert(struct mfii_softc *,
473 const struct mfi_evtarg_pd_address *);
474 void mfii_aen_pd_remove(struct mfii_softc *,
475 const struct mfi_evtarg_pd_address *);
476 void mfii_aen_pd_state_change(struct mfii_softc *,
477 const struct mfi_evtarg_pd_state *);
478 void mfii_aen_ld_update(struct mfii_softc *);
479
480 #if NBIO > 0
481 int mfii_ioctl(device_t, u_long, void *);
482 int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
483 int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
484 int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
485 int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
486 int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
487 int mfii_ioctl_setstate(struct mfii_softc *,
488 struct bioc_setstate *);
489 int mfii_bio_hs(struct mfii_softc *, int, int, void *);
490 int mfii_bio_getitall(struct mfii_softc *);
491 #endif /* NBIO > 0 */
492
493 #if 0
494 static const char *mfi_bbu_indicators[] = {
495 "pack missing",
496 "voltage low",
497 "temp high",
498 "charge active",
499 "discharge active",
500 "learn cycle req'd",
501 "learn cycle active",
502 "learn cycle failed",
503 "learn cycle timeout",
504 "I2C errors",
505 "replace pack",
506 "low capacity",
507 "periodic learn req'd"
508 };
509 #endif
510
511 void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
512 void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
513 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
514 int mfii_create_sensors(struct mfii_softc *);
515 static int mfii_destroy_sensors(struct mfii_softc *);
516 void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
517 void mfii_bbu(struct mfii_softc *, envsys_data_t *);
518
519 /*
520 * mfii boards support asynchronous (and non-polled) completion of
521 * dcmds by proxying them through a passthru mpii command that points
522 * at a dcmd frame. since the passthru command is submitted like
523 * the scsi commands using an SMID in the request descriptor,
524 * ccb_request memory * must contain the passthru command because
525 * that is what the SMID refers to. this means ccb_request cannot
526 * contain the dcmd. rather than allocating separate dma memory to
527 * hold the dcmd, we reuse the sense memory buffer for it.
528 */
529
530 void mfii_dcmd_start(struct mfii_softc *,
531 struct mfii_ccb *);
532
533 static inline void
534 mfii_dcmd_scrub(struct mfii_ccb *ccb)
535 {
536 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
537 }
538
539 static inline struct mfi_dcmd_frame *
540 mfii_dcmd_frame(struct mfii_ccb *ccb)
541 {
542 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
543 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
544 }
545
546 static inline void
547 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
548 {
549 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
550 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
551 }
552
553 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
554
555 const struct mfii_iop mfii_iop_thunderbolt = {
556 MFII_BAR,
557 MFII_IOP_NUM_SGE_LOC_ORIG,
558 0,
559 MFII_REQ_TYPE_LDIO,
560 0,
561 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
562 0
563 };
564
565 /*
566 * a lot of these values depend on us not implementing fastpath yet.
567 */
568 const struct mfii_iop mfii_iop_25 = {
569 MFII_BAR,
570 MFII_IOP_NUM_SGE_LOC_ORIG,
571 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
572 MFII_REQ_TYPE_NO_LOCK,
573 MFII_RAID_CTX_TYPE_CUDA | 0x1,
574 MFII_SGE_CHAIN_ELEMENT,
575 MFII_SGE_END_OF_LIST
576 };
577
578 const struct mfii_iop mfii_iop_35 = {
579 MFII_BAR_35,
580 MFII_IOP_NUM_SGE_LOC_35,
581 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
582 MFII_REQ_TYPE_NO_LOCK,
583 MFII_RAID_CTX_TYPE_CUDA | 0x1,
584 MFII_SGE_CHAIN_ELEMENT,
585 MFII_SGE_END_OF_LIST
586 };
587
588 struct mfii_device {
589 pcireg_t mpd_vendor;
590 pcireg_t mpd_product;
591 const struct mfii_iop *mpd_iop;
592 };
593
594 const struct mfii_device mfii_devices[] = {
595 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
596 &mfii_iop_thunderbolt },
597 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
598 &mfii_iop_25 },
599 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
600 &mfii_iop_25 },
601 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
602 &mfii_iop_35 },
603 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
604 &mfii_iop_35 },
605 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
606 &mfii_iop_35 },
607 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
608 &mfii_iop_35 },
609 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
610 &mfii_iop_35 },
611 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
612 &mfii_iop_35 }
613 };
614
615 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
616
617 const struct mfii_iop *
618 mfii_find_iop(struct pci_attach_args *pa)
619 {
620 const struct mfii_device *mpd;
621 int i;
622
623 for (i = 0; i < __arraycount(mfii_devices); i++) {
624 mpd = &mfii_devices[i];
625
626 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
627 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
628 return (mpd->mpd_iop);
629 }
630
631 return (NULL);
632 }
633
634 int
635 mfii_match(device_t parent, cfdata_t match, void *aux)
636 {
637 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
638 }
639
640 void
641 mfii_attach(device_t parent, device_t self, void *aux)
642 {
643 struct mfii_softc *sc = device_private(self);
644 struct pci_attach_args *pa = aux;
645 pcireg_t memtype;
646 pci_intr_handle_t ih;
647 char intrbuf[PCI_INTRSTR_LEN];
648 const char *intrstr;
649 u_int32_t status, scpad2, scpad3;
650 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
651 struct scsipi_adapter *adapt = &sc->sc_adapt;
652 struct scsipi_channel *chan = &sc->sc_chan;
653
654 /* init sc */
655 sc->sc_dev = self;
656 sc->sc_iop = mfii_find_iop(aux);
657 sc->sc_dmat = pa->pa_dmat;
658 if (pci_dma64_available(pa)) {
659 sc->sc_dmat64 = pa->pa_dmat64;
660 sc->sc_64bit_dma = 1;
661 } else {
662 sc->sc_dmat64 = pa->pa_dmat;
663 sc->sc_64bit_dma = 0;
664 }
665 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
666 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
667 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
668 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
669
670 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
671
672 sc->sc_aen_ccb = NULL;
673 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
674 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
675 PRI_BIO, IPL_BIO, WQ_MPSAFE);
676
677 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
678 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
679 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
680
681 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
682 SIMPLEQ_INIT(&sc->sc_abort_list);
683
684 /* wire up the bus shizz */
685 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
686 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
687 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
688 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
689 aprint_error(": unable to map registers\n");
690 return;
691 }
692
693 /* disable interrupts */
694 mfii_write(sc, MFI_OMSK, 0xffffffff);
695
696 if (pci_intr_map(pa, &ih) != 0) {
697 aprint_error(": unable to map interrupt\n");
698 goto pci_unmap;
699 }
700 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
701 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
702
703 /* lets get started */
704 if (mfii_transition_firmware(sc))
705 goto pci_unmap;
706 sc->sc_running = true;
707
708 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
709 scpad3 = mfii_read(sc, MFII_OSP3);
710 status = mfii_fw_state(sc);
711 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
712 if (sc->sc_max_fw_cmds == 0)
713 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
714 /*
715 * reduce max_cmds by 1 to ensure that the reply queue depth does not
716 * exceed FW supplied max_fw_cmds.
717 */
718 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
719
720 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
721 scpad2 = mfii_read(sc, MFII_OSP2);
722 chain_frame_sz =
723 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
724 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
725 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
726 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
727
728 nsge_in_io = (MFII_REQUEST_SIZE -
729 sizeof(struct mpii_msg_scsi_io) -
730 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
731 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
732
733 /* round down to nearest power of two */
734 sc->sc_max_sgl = 1;
735 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
736 sc->sc_max_sgl <<= 1;
737
738 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
739 DEVNAME(sc), status, scpad2, scpad3);
740 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
741 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
742 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
743 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
744 sc->sc_max_sgl);
745
746 /* sense memory */
747 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
748 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
749 if (sc->sc_sense == NULL) {
750 aprint_error(": unable to allocate sense memory\n");
751 goto pci_unmap;
752 }
753
754 /* reply post queue */
755 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
756
757 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
758 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
759 if (sc->sc_reply_postq == NULL)
760 goto free_sense;
761
762 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
763 MFII_DMA_LEN(sc->sc_reply_postq));
764
765 /* MPII request frame array */
766 sc->sc_requests = mfii_dmamem_alloc(sc,
767 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
768 if (sc->sc_requests == NULL)
769 goto free_reply_postq;
770
771 /* MFI command frame array */
772 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
773 if (sc->sc_mfi == NULL)
774 goto free_requests;
775
776 /* MPII SGL array */
777 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
778 sizeof(struct mfii_sge) * sc->sc_max_sgl);
779 if (sc->sc_sgl == NULL)
780 goto free_mfi;
781
782 if (mfii_init_ccb(sc) != 0) {
783 aprint_error(": could not init ccb list\n");
784 goto free_sgl;
785 }
786
787 /* kickstart firmware with all addresses and pointers */
788 if (mfii_initialise_firmware(sc) != 0) {
789 aprint_error(": could not initialize firmware\n");
790 goto free_sgl;
791 }
792
793 mutex_enter(&sc->sc_lock);
794 if (mfii_get_info(sc) != 0) {
795 mutex_exit(&sc->sc_lock);
796 aprint_error(": could not retrieve controller information\n");
797 goto free_sgl;
798 }
799 mutex_exit(&sc->sc_lock);
800
801 aprint_normal(": \"%s\", firmware %s",
802 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
803 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
804 aprint_normal(", %uMB cache",
805 le16toh(sc->sc_info.mci_memory_size));
806 }
807 aprint_normal("\n");
808 aprint_naive("\n");
809
810 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ih, IPL_BIO,
811 mfii_intr, sc, DEVNAME(sc));
812 if (sc->sc_ih == NULL) {
813 aprint_error_dev(self, "can't establish interrupt");
814 if (intrstr)
815 aprint_error(" at %s", intrstr);
816 aprint_error("\n");
817 goto free_sgl;
818 }
819 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
820
821 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
822 sc->sc_ld[i].ld_present = 1;
823
824 memset(adapt, 0, sizeof(*adapt));
825 adapt->adapt_dev = sc->sc_dev;
826 adapt->adapt_nchannels = 1;
827 /* keep a few commands for management */
828 if (sc->sc_max_cmds > 4)
829 adapt->adapt_openings = sc->sc_max_cmds - 4;
830 else
831 adapt->adapt_openings = sc->sc_max_cmds;
832 adapt->adapt_max_periph = adapt->adapt_openings;
833 adapt->adapt_request = mfii_scsipi_request;
834 adapt->adapt_minphys = minphys;
835 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
836
837 memset(chan, 0, sizeof(*chan));
838 chan->chan_adapter = adapt;
839 chan->chan_bustype = &scsi_sas_bustype;
840 chan->chan_channel = 0;
841 chan->chan_flags = 0;
842 chan->chan_nluns = 8;
843 chan->chan_ntargets = sc->sc_info.mci_max_lds;
844 chan->chan_id = sc->sc_info.mci_max_lds;
845
846 mfii_rescan(sc->sc_dev, "scsi", NULL);
847
848 if (mfii_aen_register(sc) != 0) {
849 /* error printed by mfii_aen_register */
850 goto intr_disestablish;
851 }
852
853 mutex_enter(&sc->sc_lock);
854 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
855 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
856 mutex_exit(&sc->sc_lock);
857 aprint_error_dev(self,
858 "getting list of logical disks failed\n");
859 goto intr_disestablish;
860 }
861 mutex_exit(&sc->sc_lock);
862 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
863 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
864 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
865 sc->sc_target_lds[target] = i;
866 }
867
868 /* enable interrupts */
869 mfii_write(sc, MFI_OSTS, 0xffffffff);
870 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
871
872 #if NBIO > 0
873 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
874 panic("%s: controller registration failed", DEVNAME(sc));
875 #endif /* NBIO > 0 */
876
877 if (mfii_create_sensors(sc) != 0)
878 aprint_error_dev(self, "unable to create sensors\n");
879
880 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
881 mfii_shutdown))
882 aprint_error_dev(self, "couldn't establish power handler\n");
883 return;
884 intr_disestablish:
885 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
886 free_sgl:
887 mfii_dmamem_free(sc, sc->sc_sgl);
888 free_mfi:
889 mfii_dmamem_free(sc, sc->sc_mfi);
890 free_requests:
891 mfii_dmamem_free(sc, sc->sc_requests);
892 free_reply_postq:
893 mfii_dmamem_free(sc, sc->sc_reply_postq);
894 free_sense:
895 mfii_dmamem_free(sc, sc->sc_sense);
896 pci_unmap:
897 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
898 }
899
900 #if 0
901 struct srp_gc mfii_dev_handles_gc =
902 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
903
904 static inline uint16_t
905 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
906 {
907 struct srp_ref sr;
908 uint16_t *map, handle;
909
910 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
911 handle = map[target];
912 srp_leave(&sr);
913
914 return (handle);
915 }
916
917 int
918 mfii_dev_handles_update(struct mfii_softc *sc)
919 {
920 struct mfii_ld_map *lm;
921 uint16_t *dev_handles = NULL;
922 int i;
923 int rv = 0;
924
925 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
926
927 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
928 MFII_DATA_IN, false);
929
930 if (rv != 0) {
931 rv = EIO;
932 goto free_lm;
933 }
934
935 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
936 M_DEVBUF, M_WAITOK);
937
938 for (i = 0; i < MFI_MAX_PD; i++)
939 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
940
941 /* commit the updated info */
942 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
943 srp_update_locked(&mfii_dev_handles_gc,
944 &sc->sc_pd->pd_dev_handles, dev_handles);
945
946 free_lm:
947 free(lm, M_TEMP, sizeof(*lm));
948
949 return (rv);
950 }
951
952 void
953 mfii_dev_handles_dtor(void *null, void *v)
954 {
955 uint16_t *dev_handles = v;
956
957 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
958 }
959 #endif /* 0 */
960
961 int
962 mfii_detach(device_t self, int flags)
963 {
964 struct mfii_softc *sc = device_private(self);
965 int error;
966
967 if (sc->sc_ih == NULL)
968 return (0);
969
970 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
971 return error;
972
973 mfii_destroy_sensors(sc);
974 #if NBIO > 0
975 bio_unregister(sc->sc_dev);
976 #endif
977 mfii_shutdown(sc->sc_dev, 0);
978 mfii_write(sc, MFI_OMSK, 0xffffffff);
979
980 mfii_aen_unregister(sc);
981 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
982 mfii_dmamem_free(sc, sc->sc_sgl);
983 mfii_dmamem_free(sc, sc->sc_mfi);
984 mfii_dmamem_free(sc, sc->sc_requests);
985 mfii_dmamem_free(sc, sc->sc_reply_postq);
986 mfii_dmamem_free(sc, sc->sc_sense);
987 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
988
989 return (0);
990 }
991
992 int
993 mfii_rescan(device_t self, const char *ifattr, const int *locators)
994 {
995 struct mfii_softc *sc = device_private(self);
996 if (sc->sc_child != NULL)
997 return 0;
998
999 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
1000 scsiprint, NULL);
1001 return 0;
1002 }
1003
1004 void
1005 mfii_childdetached(device_t self, device_t child)
1006 {
1007 struct mfii_softc *sc = device_private(self);
1008
1009 KASSERT(self == sc->sc_dev);
1010 KASSERT(child == sc->sc_child);
1011
1012 if (child == sc->sc_child)
1013 sc->sc_child = NULL;
1014 }
1015
1016 static bool
1017 mfii_suspend(device_t dev, const pmf_qual_t *q)
1018 {
1019 /* XXX to be implemented */
1020 return false;
1021 }
1022
1023 static bool
1024 mfii_resume(device_t dev, const pmf_qual_t *q)
1025 {
1026 /* XXX to be implemented */
1027 return false;
1028 }
1029
1030 static bool
1031 mfii_shutdown(device_t dev, int how)
1032 {
1033 struct mfii_softc *sc = device_private(dev);
1034 struct mfii_ccb *ccb;
1035 union mfi_mbox mbox;
1036 bool rv = true;;
1037
1038 memset(&mbox, 0, sizeof(mbox));
1039
1040 mutex_enter(&sc->sc_lock);
1041 DNPRINTF(MFI_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1042 ccb = mfii_get_ccb(sc);
1043 if (ccb == NULL)
1044 return false;
1045 mutex_enter(&sc->sc_ccb_mtx);
1046 if (sc->sc_running) {
1047 sc->sc_running = 0; /* prevent new commands */
1048 mutex_exit(&sc->sc_ccb_mtx);
1049 #if 0 /* XXX why does this hang ? */
1050 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1051 mfii_scrub_ccb(ccb);
1052 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1053 NULL, 0, MFII_DATA_NONE, true)) {
1054 aprint_error_dev(dev, "shutdown: cache flush failed\n");
1055 rv = false;
1056 goto fail;
1057 }
1058 printf("ok1\n");
1059 #endif
1060 mbox.b[0] = 0;
1061 mfii_scrub_ccb(ccb);
1062 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1063 NULL, 0, MFII_DATA_NONE, true)) {
1064 aprint_error_dev(dev, "shutdown: "
1065 "firmware shutdown failed\n");
1066 rv = false;
1067 goto fail;
1068 }
1069 } else {
1070 mutex_exit(&sc->sc_ccb_mtx);
1071 }
1072 fail:
1073 mfii_put_ccb(sc, ccb);
1074 mutex_exit(&sc->sc_lock);
1075 return rv;
1076 }
1077
1078 static u_int32_t
1079 mfii_read(struct mfii_softc *sc, bus_size_t r)
1080 {
1081 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1082 BUS_SPACE_BARRIER_READ);
1083 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1084 }
1085
1086 static void
1087 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1088 {
1089 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1090 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1091 BUS_SPACE_BARRIER_WRITE);
1092 }
1093
1094 struct mfii_dmamem *
1095 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1096 {
1097 struct mfii_dmamem *m;
1098 int nsegs;
1099
1100 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1101 if (m == NULL)
1102 return (NULL);
1103
1104 m->mdm_size = size;
1105
1106 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1107 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1108 goto mdmfree;
1109
1110 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1111 &nsegs, BUS_DMA_NOWAIT) != 0)
1112 goto destroy;
1113
1114 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1115 BUS_DMA_NOWAIT) != 0)
1116 goto free;
1117
1118 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1119 BUS_DMA_NOWAIT) != 0)
1120 goto unmap;
1121
1122 memset(m->mdm_kva, 0, size);
1123 return (m);
1124
1125 unmap:
1126 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1127 free:
1128 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1129 destroy:
1130 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1131 mdmfree:
1132 free(m, M_DEVBUF);
1133
1134 return (NULL);
1135 }
1136
1137 void
1138 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1139 {
1140 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1141 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1142 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1143 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1144 free(m, M_DEVBUF);
1145 }
1146
1147 void
1148 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1149 {
1150 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1151 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1152 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1153
1154 io->function = MFII_FUNCTION_PASSTHRU_IO;
1155 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1156 io->chain_offset = io->sgl_offset0 / 4;
1157
1158 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1159 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1160 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1161
1162 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1163 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1164
1165 mfii_start(sc, ccb);
1166 }
1167
1168 int
1169 mfii_aen_register(struct mfii_softc *sc)
1170 {
1171 struct mfi_evt_log_info mel;
1172 struct mfii_ccb *ccb;
1173 struct mfii_dmamem *mdm;
1174 int rv;
1175
1176 ccb = mfii_get_ccb(sc);
1177 if (ccb == NULL) {
1178 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1179 return (ENOMEM);
1180 }
1181
1182 memset(&mel, 0, sizeof(mel));
1183 mfii_scrub_ccb(ccb);
1184
1185 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1186 &mel, sizeof(mel), MFII_DATA_IN, true);
1187 if (rv != 0) {
1188 mfii_put_ccb(sc, ccb);
1189 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1190 return (EIO);
1191 }
1192
1193 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1194 if (mdm == NULL) {
1195 mfii_put_ccb(sc, ccb);
1196 aprint_error_dev(sc->sc_dev, "unable to allocate event data\n");
1197 return (ENOMEM);
1198 }
1199
1200 /* replay all the events from boot */
1201 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1202
1203 return (0);
1204 }
1205
1206 void
1207 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1208 struct mfii_dmamem *mdm, uint32_t seq)
1209 {
1210 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1211 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1212 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1213 union mfi_evt_class_locale mec;
1214
1215 mfii_scrub_ccb(ccb);
1216 mfii_dcmd_scrub(ccb);
1217 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1218
1219 ccb->ccb_cookie = mdm;
1220 ccb->ccb_done = mfii_aen_done;
1221 sc->sc_aen_ccb = ccb;
1222
1223 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1224 mec.mec_members.reserved = 0;
1225 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1226
1227 hdr->mfh_cmd = MFI_CMD_DCMD;
1228 hdr->mfh_sg_count = 1;
1229 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1230 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1231 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1232 dcmd->mdf_mbox.w[0] = htole32(seq);
1233 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1234 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1235 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1236
1237 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1238 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1239
1240 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1241 mfii_dcmd_start(sc, ccb);
1242 }
1243
1244 void
1245 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1246 {
1247 KASSERT(sc->sc_aen_ccb == ccb);
1248
1249 /*
1250 * defer to a thread with KERNEL_LOCK so we can run autoconf
1251 * We shouldn't have more than one AEN command pending at a time,
1252 * so no need to lock
1253 */
1254 if (sc->sc_running)
1255 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1256 }
1257
1258 void
1259 mfii_aen(struct work *wk, void *arg)
1260 {
1261 struct mfii_softc *sc = arg;
1262 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1263 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1264 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1265
1266 mfii_dcmd_sync(sc, ccb,
1267 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1268 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1269 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1270
1271 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1272 le32toh(med->med_seq_num), le32toh(med->med_code),
1273 med->med_arg_type, med->med_description);
1274
1275 switch (le32toh(med->med_code)) {
1276 case MR_EVT_PD_INSERTED_EXT:
1277 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1278 break;
1279
1280 mfii_aen_pd_insert(sc, &med->args.pd_address);
1281 break;
1282 case MR_EVT_PD_REMOVED_EXT:
1283 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1284 break;
1285
1286 mfii_aen_pd_remove(sc, &med->args.pd_address);
1287 break;
1288
1289 case MR_EVT_PD_STATE_CHANGE:
1290 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1291 break;
1292
1293 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1294 break;
1295
1296 case MR_EVT_LD_CREATED:
1297 case MR_EVT_LD_DELETED:
1298 mfii_aen_ld_update(sc);
1299 break;
1300
1301 default:
1302 break;
1303 }
1304
1305 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1306 }
1307
1308 void
1309 mfii_aen_pd_insert(struct mfii_softc *sc,
1310 const struct mfi_evtarg_pd_address *pd)
1311 {
1312 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1313 le16toh(pd->device_id), le16toh(pd->encl_id));
1314 }
1315
1316 void
1317 mfii_aen_pd_remove(struct mfii_softc *sc,
1318 const struct mfi_evtarg_pd_address *pd)
1319 {
1320 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1321 le16toh(pd->device_id), le16toh(pd->encl_id));
1322 }
1323
1324 void
1325 mfii_aen_pd_state_change(struct mfii_softc *sc,
1326 const struct mfi_evtarg_pd_state *state)
1327 {
1328 return;
1329 }
1330
1331 void
1332 mfii_aen_ld_update(struct mfii_softc *sc)
1333 {
1334 int i, target, old, nld;
1335 int newlds[MFI_MAX_LD];
1336
1337 mutex_enter(&sc->sc_lock);
1338 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1339 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1340 mutex_exit(&sc->sc_lock);
1341 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1342 DEVNAME(sc));
1343 return;
1344 }
1345 mutex_exit(&sc->sc_lock);
1346
1347 memset(newlds, -1, sizeof(newlds));
1348
1349 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1350 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1351 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1352 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1353 newlds[target] = i;
1354 }
1355
1356 for (i = 0; i < MFI_MAX_LD; i++) {
1357 old = sc->sc_target_lds[i];
1358 nld = newlds[i];
1359
1360 if (old == -1 && nld != -1) {
1361 printf("%s: logical drive %d added (target %d)\n",
1362 DEVNAME(sc), i, nld);
1363
1364 // XXX scsi_probe_target(sc->sc_scsibus, i);
1365
1366 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1367 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1368 } else if (nld == -1 && old != -1) {
1369 printf("%s: logical drive %d removed (target %d)\n",
1370 DEVNAME(sc), i, old);
1371
1372 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1373 sysmon_envsys_sensor_detach(sc->sc_sme,
1374 &sc->sc_sensors[i]);
1375 }
1376 }
1377
1378 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1379 }
1380
1381 void
1382 mfii_aen_unregister(struct mfii_softc *sc)
1383 {
1384 /* XXX */
1385 }
1386
1387 int
1388 mfii_transition_firmware(struct mfii_softc *sc)
1389 {
1390 int32_t fw_state, cur_state;
1391 int max_wait, i;
1392
1393 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1394
1395 while (fw_state != MFI_STATE_READY) {
1396 cur_state = fw_state;
1397 switch (fw_state) {
1398 case MFI_STATE_FAULT:
1399 printf("%s: firmware fault\n", DEVNAME(sc));
1400 return (1);
1401 case MFI_STATE_WAIT_HANDSHAKE:
1402 mfii_write(sc, MFI_SKINNY_IDB,
1403 MFI_INIT_CLEAR_HANDSHAKE);
1404 max_wait = 2;
1405 break;
1406 case MFI_STATE_OPERATIONAL:
1407 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1408 max_wait = 10;
1409 break;
1410 case MFI_STATE_UNDEFINED:
1411 case MFI_STATE_BB_INIT:
1412 max_wait = 2;
1413 break;
1414 case MFI_STATE_FW_INIT:
1415 case MFI_STATE_DEVICE_SCAN:
1416 case MFI_STATE_FLUSH_CACHE:
1417 max_wait = 20;
1418 break;
1419 default:
1420 printf("%s: unknown firmware state %d\n",
1421 DEVNAME(sc), fw_state);
1422 return (1);
1423 }
1424 for (i = 0; i < (max_wait * 10); i++) {
1425 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1426 if (fw_state == cur_state)
1427 DELAY(100000);
1428 else
1429 break;
1430 }
1431 if (fw_state == cur_state) {
1432 printf("%s: firmware stuck in state %#x\n",
1433 DEVNAME(sc), fw_state);
1434 return (1);
1435 }
1436 }
1437
1438 return (0);
1439 }
1440
1441 int
1442 mfii_get_info(struct mfii_softc *sc)
1443 {
1444 int i, rv;
1445
1446 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1447 sizeof(sc->sc_info), MFII_DATA_IN, true);
1448
1449 if (rv != 0)
1450 return (rv);
1451
1452 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1453 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1454 DEVNAME(sc),
1455 sc->sc_info.mci_image_component[i].mic_name,
1456 sc->sc_info.mci_image_component[i].mic_version,
1457 sc->sc_info.mci_image_component[i].mic_build_date,
1458 sc->sc_info.mci_image_component[i].mic_build_time);
1459 }
1460
1461 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1462 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1463 DEVNAME(sc),
1464 sc->sc_info.mci_pending_image_component[i].mic_name,
1465 sc->sc_info.mci_pending_image_component[i].mic_version,
1466 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1467 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1468 }
1469
1470 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1471 DEVNAME(sc),
1472 sc->sc_info.mci_max_arms,
1473 sc->sc_info.mci_max_spans,
1474 sc->sc_info.mci_max_arrays,
1475 sc->sc_info.mci_max_lds,
1476 sc->sc_info.mci_product_name);
1477
1478 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1479 DEVNAME(sc),
1480 sc->sc_info.mci_serial_number,
1481 sc->sc_info.mci_hw_present,
1482 sc->sc_info.mci_current_fw_time,
1483 sc->sc_info.mci_max_cmds,
1484 sc->sc_info.mci_max_sg_elements);
1485
1486 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1487 DEVNAME(sc),
1488 sc->sc_info.mci_max_request_size,
1489 sc->sc_info.mci_lds_present,
1490 sc->sc_info.mci_lds_degraded,
1491 sc->sc_info.mci_lds_offline,
1492 sc->sc_info.mci_pd_present);
1493
1494 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1495 DEVNAME(sc),
1496 sc->sc_info.mci_pd_disks_present,
1497 sc->sc_info.mci_pd_disks_pred_failure,
1498 sc->sc_info.mci_pd_disks_failed);
1499
1500 DPRINTF("%s: nvram %d mem %d flash %d\n",
1501 DEVNAME(sc),
1502 sc->sc_info.mci_nvram_size,
1503 sc->sc_info.mci_memory_size,
1504 sc->sc_info.mci_flash_size);
1505
1506 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1507 DEVNAME(sc),
1508 sc->sc_info.mci_ram_correctable_errors,
1509 sc->sc_info.mci_ram_uncorrectable_errors,
1510 sc->sc_info.mci_cluster_allowed,
1511 sc->sc_info.mci_cluster_active);
1512
1513 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1514 DEVNAME(sc),
1515 sc->sc_info.mci_max_strips_per_io,
1516 sc->sc_info.mci_raid_levels,
1517 sc->sc_info.mci_adapter_ops,
1518 sc->sc_info.mci_ld_ops);
1519
1520 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1521 DEVNAME(sc),
1522 sc->sc_info.mci_stripe_sz_ops.min,
1523 sc->sc_info.mci_stripe_sz_ops.max,
1524 sc->sc_info.mci_pd_ops,
1525 sc->sc_info.mci_pd_mix_support);
1526
1527 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1528 DEVNAME(sc),
1529 sc->sc_info.mci_ecc_bucket_count,
1530 sc->sc_info.mci_package_version);
1531
1532 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1533 DEVNAME(sc),
1534 sc->sc_info.mci_properties.mcp_seq_num,
1535 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1536 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1537 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1538
1539 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1540 DEVNAME(sc),
1541 sc->sc_info.mci_properties.mcp_rebuild_rate,
1542 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1543 sc->sc_info.mci_properties.mcp_bgi_rate,
1544 sc->sc_info.mci_properties.mcp_cc_rate);
1545
1546 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1547 DEVNAME(sc),
1548 sc->sc_info.mci_properties.mcp_recon_rate,
1549 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1550 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1551 sc->sc_info.mci_properties.mcp_spinup_delay,
1552 sc->sc_info.mci_properties.mcp_cluster_enable);
1553
1554 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1555 DEVNAME(sc),
1556 sc->sc_info.mci_properties.mcp_coercion_mode,
1557 sc->sc_info.mci_properties.mcp_alarm_enable,
1558 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1559 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1560 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1561
1562 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1563 DEVNAME(sc),
1564 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1565 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1566 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1567
1568 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1569 DEVNAME(sc),
1570 sc->sc_info.mci_pci.mip_vendor,
1571 sc->sc_info.mci_pci.mip_device,
1572 sc->sc_info.mci_pci.mip_subvendor,
1573 sc->sc_info.mci_pci.mip_subdevice);
1574
1575 DPRINTF("%s: type %#x port_count %d port_addr ",
1576 DEVNAME(sc),
1577 sc->sc_info.mci_host.mih_type,
1578 sc->sc_info.mci_host.mih_port_count);
1579
1580 for (i = 0; i < 8; i++)
1581 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
1582 DPRINTF("\n");
1583
1584 DPRINTF("%s: type %.x port_count %d port_addr ",
1585 DEVNAME(sc),
1586 sc->sc_info.mci_device.mid_type,
1587 sc->sc_info.mci_device.mid_port_count);
1588
1589 for (i = 0; i < 8; i++)
1590 DPRINTF("%.0" PRIx64 " ", sc->sc_info.mci_device.mid_port_addr[i]);
1591 DPRINTF("\n");
1592
1593 return (0);
1594 }
1595
1596 int
1597 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1598 {
1599 struct mfi_frame_header *hdr = ccb->ccb_request;
1600 u_int64_t r;
1601 int to = 0, rv = 0;
1602
1603 #ifdef DIAGNOSTIC
1604 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1605 panic("mfii_mfa_poll called with cookie or done set");
1606 #endif
1607
1608 hdr->mfh_context = ccb->ccb_smid;
1609 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1610 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1611
1612 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1613 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1614
1615 mfii_start(sc, ccb);
1616
1617 for (;;) {
1618 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1619 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1620 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1621
1622 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1623 break;
1624
1625 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1626 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1627 ccb->ccb_smid);
1628 ccb->ccb_flags |= MFI_CCB_F_ERR;
1629 rv = 1;
1630 break;
1631 }
1632
1633 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1634 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1635 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1636
1637 delay(1000);
1638 }
1639
1640 if (ccb->ccb_len > 0) {
1641 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1642 0, ccb->ccb_dmamap32->dm_mapsize,
1643 (ccb->ccb_direction == MFII_DATA_IN) ?
1644 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1645
1646 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1647 }
1648
1649 return (rv);
1650 }
1651
1652 int
1653 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1654 {
1655 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1656 void *cookie;
1657 int rv = 1;
1658
1659 done = ccb->ccb_done;
1660 cookie = ccb->ccb_cookie;
1661
1662 ccb->ccb_done = mfii_poll_done;
1663 ccb->ccb_cookie = &rv;
1664
1665 mfii_start(sc, ccb);
1666
1667 do {
1668 delay(10);
1669 mfii_postq(sc);
1670 } while (rv == 1);
1671
1672 ccb->ccb_cookie = cookie;
1673 done(sc, ccb);
1674
1675 return (0);
1676 }
1677
1678 void
1679 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1680 {
1681 int *rv = ccb->ccb_cookie;
1682
1683 *rv = 0;
1684 }
1685
1686 int
1687 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1688 {
1689 #ifdef DIAGNOSTIC
1690 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1691 panic("mfii_exec called with cookie or done set");
1692 #endif
1693
1694 ccb->ccb_cookie = ccb;
1695 ccb->ccb_done = mfii_exec_done;
1696
1697 mfii_start(sc, ccb);
1698
1699 mutex_enter(&ccb->ccb_mtx);
1700 while (ccb->ccb_cookie != NULL)
1701 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1702 mutex_exit(&ccb->ccb_mtx);
1703
1704 return (0);
1705 }
1706
1707 void
1708 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1709 {
1710 mutex_enter(&ccb->ccb_mtx);
1711 ccb->ccb_cookie = NULL;
1712 cv_signal(&ccb->ccb_cv);
1713 mutex_exit(&ccb->ccb_mtx);
1714 }
1715
1716 int
1717 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1718 void *buf, size_t len, mfii_direction_t dir, bool poll)
1719 {
1720 struct mfii_ccb *ccb;
1721 int rv;
1722
1723 KASSERT(mutex_owned(&sc->sc_lock));
1724 if (!sc->sc_running)
1725 return EAGAIN;
1726
1727 ccb = mfii_get_ccb(sc);
1728 if (ccb == NULL)
1729 return (ENOMEM);
1730
1731 mfii_scrub_ccb(ccb);
1732 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1733 mfii_put_ccb(sc, ccb);
1734
1735 return (rv);
1736 }
1737
1738 int
1739 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1740 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1741 bool poll)
1742 {
1743 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1744 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1745 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1746 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1747 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1748 int rv = EIO;
1749
1750 if (cold)
1751 poll = true;
1752
1753 ccb->ccb_data = buf;
1754 ccb->ccb_len = len;
1755 ccb->ccb_direction = dir;
1756 switch (dir) {
1757 case MFII_DATA_IN:
1758 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1759 break;
1760 case MFII_DATA_OUT:
1761 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1762 break;
1763 case MFII_DATA_NONE:
1764 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1765 break;
1766 }
1767
1768 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1769 rv = ENOMEM;
1770 goto done;
1771 }
1772
1773 hdr->mfh_cmd = MFI_CMD_DCMD;
1774 hdr->mfh_context = ccb->ccb_smid;
1775 hdr->mfh_data_len = htole32(len);
1776 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1777 KASSERT(!ccb->ccb_dma64);
1778
1779 dcmd->mdf_opcode = opc;
1780 /* handle special opcodes */
1781 if (mbox != NULL)
1782 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1783
1784 io->function = MFII_FUNCTION_PASSTHRU_IO;
1785 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1786 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1787
1788 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1789 sge->sg_len = htole32(MFI_FRAME_SIZE);
1790 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1791
1792 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1793 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1794
1795 if (poll) {
1796 ccb->ccb_done = mfii_empty_done;
1797 mfii_poll(sc, ccb);
1798 } else
1799 mfii_exec(sc, ccb);
1800
1801 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1802 rv = 0;
1803 }
1804
1805 done:
1806 return (rv);
1807 }
1808
1809 void
1810 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1811 {
1812 return;
1813 }
1814
1815 int
1816 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1817 void *sglp, int nosleep)
1818 {
1819 union mfi_sgl *sgl = sglp;
1820 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1821 int error;
1822 int i;
1823
1824 KASSERT(!ccb->ccb_dma64);
1825 if (ccb->ccb_len == 0)
1826 return (0);
1827
1828 error = bus_dmamap_load(sc->sc_dmat, dmap,
1829 ccb->ccb_data, ccb->ccb_len, NULL,
1830 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1831 if (error) {
1832 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1833 return (1);
1834 }
1835
1836 for (i = 0; i < dmap->dm_nsegs; i++) {
1837 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1838 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1839 }
1840
1841 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1842 ccb->ccb_direction == MFII_DATA_OUT ?
1843 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1844
1845 return (0);
1846 }
1847
1848 void
1849 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1850 {
1851 u_long *r = (u_long *)&ccb->ccb_req;
1852
1853 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1854 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1855 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1856
1857 #if defined(__LP64__) && 0
1858 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1859 #else
1860 mutex_enter(&sc->sc_post_mtx);
1861 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1862 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1863 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1864
1865 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1866 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1867 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1868 mutex_exit(&sc->sc_post_mtx);
1869 #endif
1870 }
1871
1872 void
1873 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1874 {
1875 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1876 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1877 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1878
1879 if (ccb->ccb_sgl_len > 0) {
1880 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1881 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1882 BUS_DMASYNC_POSTWRITE);
1883 }
1884
1885 if (ccb->ccb_dma64) {
1886 KASSERT(ccb->ccb_len > 0);
1887 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
1888 0, ccb->ccb_dmamap64->dm_mapsize,
1889 (ccb->ccb_direction == MFII_DATA_IN) ?
1890 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1891
1892 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
1893 } else if (ccb->ccb_len > 0) {
1894 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1895 0, ccb->ccb_dmamap32->dm_mapsize,
1896 (ccb->ccb_direction == MFII_DATA_IN) ?
1897 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1898
1899 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1900 }
1901
1902 ccb->ccb_done(sc, ccb);
1903 }
1904
1905 int
1906 mfii_initialise_firmware(struct mfii_softc *sc)
1907 {
1908 struct mpii_msg_iocinit_request *iiq;
1909 struct mfii_dmamem *m;
1910 struct mfii_ccb *ccb;
1911 struct mfi_init_frame *init;
1912 int rv;
1913
1914 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1915 if (m == NULL)
1916 return (1);
1917
1918 iiq = MFII_DMA_KVA(m);
1919 memset(iiq, 0, sizeof(*iiq));
1920
1921 iiq->function = MPII_FUNCTION_IOC_INIT;
1922 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1923
1924 iiq->msg_version_maj = 0x02;
1925 iiq->msg_version_min = 0x00;
1926 iiq->hdr_version_unit = 0x10;
1927 iiq->hdr_version_dev = 0x0;
1928
1929 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1930
1931 iiq->reply_descriptor_post_queue_depth =
1932 htole16(sc->sc_reply_postq_depth);
1933 iiq->reply_free_queue_depth = htole16(0);
1934
1935 iiq->sense_buffer_address_high = htole32(
1936 MFII_DMA_DVA(sc->sc_sense) >> 32);
1937
1938 iiq->reply_descriptor_post_queue_address_lo =
1939 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
1940 iiq->reply_descriptor_post_queue_address_hi =
1941 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1942
1943 iiq->system_request_frame_base_address_lo =
1944 htole32(MFII_DMA_DVA(sc->sc_requests));
1945 iiq->system_request_frame_base_address_hi =
1946 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
1947
1948 iiq->timestamp = htole64(time_uptime);
1949
1950 ccb = mfii_get_ccb(sc);
1951 if (ccb == NULL) {
1952 /* shouldn't ever run out of ccbs during attach */
1953 return (1);
1954 }
1955 mfii_scrub_ccb(ccb);
1956 init = ccb->ccb_request;
1957
1958 init->mif_header.mfh_cmd = MFI_CMD_INIT;
1959 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1960 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
1961 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
1962
1963 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1964 0, MFII_DMA_LEN(sc->sc_reply_postq),
1965 BUS_DMASYNC_PREREAD);
1966
1967 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1968 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1969
1970 rv = mfii_mfa_poll(sc, ccb);
1971
1972 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1973 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1974
1975 mfii_put_ccb(sc, ccb);
1976 mfii_dmamem_free(sc, m);
1977
1978 return (rv);
1979 }
1980
1981 int
1982 mfii_my_intr(struct mfii_softc *sc)
1983 {
1984 u_int32_t status;
1985
1986 status = mfii_read(sc, MFI_OSTS);
1987
1988 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
1989 if (ISSET(status, 0x1)) {
1990 mfii_write(sc, MFI_OSTS, status);
1991 return (1);
1992 }
1993
1994 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1995 }
1996
1997 int
1998 mfii_intr(void *arg)
1999 {
2000 struct mfii_softc *sc = arg;
2001
2002 if (!mfii_my_intr(sc))
2003 return (0);
2004
2005 mfii_postq(sc);
2006
2007 return (1);
2008 }
2009
2010 void
2011 mfii_postq(struct mfii_softc *sc)
2012 {
2013 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2014 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2015 struct mpii_reply_descr *rdp;
2016 struct mfii_ccb *ccb;
2017 int rpi = 0;
2018
2019 mutex_enter(&sc->sc_reply_postq_mtx);
2020
2021 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2022 0, MFII_DMA_LEN(sc->sc_reply_postq),
2023 BUS_DMASYNC_POSTREAD);
2024
2025 for (;;) {
2026 rdp = &postq[sc->sc_reply_postq_index];
2027 DNPRINTF(MFII_D_INTR, "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2028 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2029 rdp->data == 0xffffffff);
2030 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2031 MPII_REPLY_DESCR_UNUSED)
2032 break;
2033 if (rdp->data == 0xffffffff) {
2034 /*
2035 * ioc is still writing to the reply post queue
2036 * race condition - bail!
2037 */
2038 break;
2039 }
2040
2041 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2042 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2043 memset(rdp, 0xff, sizeof(*rdp));
2044
2045 sc->sc_reply_postq_index++;
2046 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2047 rpi = 1;
2048 }
2049
2050 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2051 0, MFII_DMA_LEN(sc->sc_reply_postq),
2052 BUS_DMASYNC_PREREAD);
2053
2054 if (rpi)
2055 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2056
2057 mutex_exit(&sc->sc_reply_postq_mtx);
2058
2059 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2060 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2061 mfii_done(sc, ccb);
2062 }
2063 }
2064
2065 void
2066 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2067 void *arg)
2068 {
2069 struct scsipi_periph *periph;
2070 struct scsipi_xfer *xs;
2071 struct scsipi_adapter *adapt = chan->chan_adapter;
2072 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2073 struct mfii_ccb *ccb;
2074 int timeout;
2075 int target;
2076
2077 switch(req) {
2078 case ADAPTER_REQ_GROW_RESOURCES:
2079 /* Not supported. */
2080 return;
2081 case ADAPTER_REQ_SET_XFER_MODE:
2082 {
2083 struct scsipi_xfer_mode *xm = arg;
2084 xm->xm_mode = PERIPH_CAP_TQING;
2085 xm->xm_period = 0;
2086 xm->xm_offset = 0;
2087 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2088 return;
2089 }
2090 case ADAPTER_REQ_RUN_XFER:
2091 break;
2092 }
2093
2094 xs = arg;
2095 periph = xs->xs_periph;
2096 target = periph->periph_target;
2097
2098 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
2099 periph->periph_lun != 0) {
2100 xs->error = XS_SELTIMEOUT;
2101 scsipi_done(xs);
2102 return;
2103 }
2104
2105 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2106 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2107 /* the cache is stable storage, don't flush */
2108 xs->error = XS_NOERROR;
2109 xs->status = SCSI_OK;
2110 xs->resid = 0;
2111 scsipi_done(xs);
2112 return;
2113 }
2114
2115 ccb = mfii_get_ccb(sc);
2116 if (ccb == NULL) {
2117 xs->error = XS_RESOURCE_SHORTAGE;
2118 scsipi_done(xs);
2119 return;
2120 }
2121 mfii_scrub_ccb(ccb);
2122 ccb->ccb_cookie = xs;
2123 ccb->ccb_done = mfii_scsi_cmd_done;
2124 ccb->ccb_data = xs->data;
2125 ccb->ccb_len = xs->datalen;
2126
2127 timeout = mstohz(xs->timeout);
2128 if (timeout == 0)
2129 timeout = 1;
2130 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2131
2132 switch (xs->cmd->opcode) {
2133 case SCSI_READ_6_COMMAND:
2134 case READ_10:
2135 case READ_12:
2136 case READ_16:
2137 case SCSI_WRITE_6_COMMAND:
2138 case WRITE_10:
2139 case WRITE_12:
2140 case WRITE_16:
2141 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2142 goto stuffup;
2143 break;
2144
2145 default:
2146 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2147 goto stuffup;
2148 break;
2149 }
2150
2151 xs->error = XS_NOERROR;
2152 xs->resid = 0;
2153
2154 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2155 xs->cmd->opcode);
2156
2157 if (xs->xs_control & XS_CTL_POLL) {
2158 if (mfii_poll(sc, ccb) != 0)
2159 goto stuffup;
2160 return;
2161 }
2162
2163 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2164 mfii_start(sc, ccb);
2165
2166 return;
2167
2168 stuffup:
2169 xs->error = XS_DRIVER_STUFFUP;
2170 scsipi_done(xs);
2171 mfii_put_ccb(sc, ccb);
2172 }
2173
2174 void
2175 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2176 {
2177 struct scsipi_xfer *xs = ccb->ccb_cookie;
2178 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2179 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2180 u_int refs = 2;
2181
2182 if (callout_stop(&xs->xs_callout) != 0)
2183 refs = 1;
2184
2185 switch (ctx->status) {
2186 case MFI_STAT_OK:
2187 break;
2188
2189 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2190 xs->error = XS_SENSE;
2191 memset(&xs->sense, 0, sizeof(xs->sense));
2192 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2193 break;
2194
2195 case MFI_STAT_LD_OFFLINE:
2196 case MFI_STAT_DEVICE_NOT_FOUND:
2197 xs->error = XS_SELTIMEOUT;
2198 break;
2199
2200 default:
2201 xs->error = XS_DRIVER_STUFFUP;
2202 break;
2203 }
2204
2205 if (atomic_add_int_nv(&ccb->ccb_refcnt, -refs) == 0) {
2206 scsipi_done(xs);
2207 mfii_put_ccb(sc, ccb);
2208 }
2209 }
2210
2211 int
2212 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2213 struct scsipi_xfer *xs)
2214 {
2215 struct scsipi_periph *periph = xs->xs_periph;
2216 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2217 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2218 int segs;
2219
2220 io->dev_handle = htole16(periph->periph_target);
2221 io->function = MFII_FUNCTION_LDIO_REQUEST;
2222 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2223 io->sgl_flags = htole16(0x02); /* XXX */
2224 io->sense_buffer_length = sizeof(xs->sense);
2225 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2226 io->data_length = htole32(xs->datalen);
2227 io->io_flags = htole16(xs->cmdlen);
2228 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2229 case XS_CTL_DATA_IN:
2230 ccb->ccb_direction = MFII_DATA_IN;
2231 io->direction = MPII_SCSIIO_DIR_READ;
2232 break;
2233 case XS_CTL_DATA_OUT:
2234 ccb->ccb_direction = MFII_DATA_OUT;
2235 io->direction = MPII_SCSIIO_DIR_WRITE;
2236 break;
2237 default:
2238 ccb->ccb_direction = MFII_DATA_NONE;
2239 io->direction = MPII_SCSIIO_DIR_NONE;
2240 break;
2241 }
2242 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2243
2244 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2245 ctx->timeout_value = htole16(0x14); /* XXX */
2246 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2247 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2248
2249 if (mfii_load_ccb(sc, ccb, ctx + 1,
2250 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2251 return (1);
2252
2253 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2254 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2255 switch (sc->sc_iop->num_sge_loc) {
2256 case MFII_IOP_NUM_SGE_LOC_ORIG:
2257 ctx->num_sge = segs;
2258 break;
2259 case MFII_IOP_NUM_SGE_LOC_35:
2260 /* 12 bit field, but we're only using the lower 8 */
2261 ctx->span_arm = segs;
2262 break;
2263 }
2264
2265 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2266 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2267
2268 return (0);
2269 }
2270
2271 int
2272 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2273 struct scsipi_xfer *xs)
2274 {
2275 struct scsipi_periph *periph = xs->xs_periph;
2276 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2277 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2278
2279 io->dev_handle = htole16(periph->periph_target);
2280 io->function = MFII_FUNCTION_LDIO_REQUEST;
2281 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2282 io->sgl_flags = htole16(0x02); /* XXX */
2283 io->sense_buffer_length = sizeof(xs->sense);
2284 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2285 io->data_length = htole32(xs->datalen);
2286 io->io_flags = htole16(xs->cmdlen);
2287 io->lun[0] = htobe16(periph->periph_lun);
2288 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2289 case XS_CTL_DATA_IN:
2290 ccb->ccb_direction = MFII_DATA_IN;
2291 io->direction = MPII_SCSIIO_DIR_READ;
2292 break;
2293 case XS_CTL_DATA_OUT:
2294 ccb->ccb_direction = MFII_DATA_OUT;
2295 io->direction = MPII_SCSIIO_DIR_WRITE;
2296 break;
2297 default:
2298 ccb->ccb_direction = MFII_DATA_NONE;
2299 io->direction = MPII_SCSIIO_DIR_NONE;
2300 break;
2301 }
2302 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2303
2304 ctx->virtual_disk_target_id = htole16(periph->periph_target);
2305
2306 if (mfii_load_ccb(sc, ccb, ctx + 1,
2307 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2308 return (1);
2309
2310 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2311 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2312
2313 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2314 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2315
2316 return (0);
2317 }
2318
2319 #if 0
2320 void
2321 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2322 {
2323 struct scsi_link *link = xs->sc_link;
2324 struct mfii_softc *sc = link->adapter_softc;
2325 struct mfii_ccb *ccb = xs->io;
2326
2327 mfii_scrub_ccb(ccb);
2328 ccb->ccb_cookie = xs;
2329 ccb->ccb_done = mfii_scsi_cmd_done;
2330 ccb->ccb_data = xs->data;
2331 ccb->ccb_len = xs->datalen;
2332
2333 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2334
2335 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2336 if (xs->error != XS_NOERROR)
2337 goto done;
2338
2339 xs->resid = 0;
2340
2341 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2342 if (mfii_poll(sc, ccb) != 0)
2343 goto stuffup;
2344 return;
2345 }
2346
2347 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2348 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2349 mfii_start(sc, ccb);
2350
2351 return;
2352
2353 stuffup:
2354 xs->error = XS_DRIVER_STUFFUP;
2355 done:
2356 scsi_done(xs);
2357 }
2358
2359 int
2360 mfii_pd_scsi_probe(struct scsi_link *link)
2361 {
2362 struct mfii_softc *sc = link->adapter_softc;
2363 struct mfi_pd_details mpd;
2364 union mfi_mbox mbox;
2365 int rv;
2366
2367 if (link->lun > 0)
2368 return (0);
2369
2370 memset(&mbox, 0, sizeof(mbox));
2371 mbox.s[0] = htole16(link->target);
2372
2373 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2374 MFII_DATA_IN, true);
2375 if (rv != 0)
2376 return (EIO);
2377
2378 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2379 return (ENXIO);
2380
2381 return (0);
2382 }
2383
2384 int
2385 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2386 struct scsipi_xfer *xs)
2387 {
2388 struct scsi_link *link = xs->sc_link;
2389 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2390 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2391 uint16_t dev_handle;
2392
2393 dev_handle = mfii_dev_handle(sc, link->target);
2394 if (dev_handle == htole16(0xffff))
2395 return (XS_SELTIMEOUT);
2396
2397 io->dev_handle = dev_handle;
2398 io->function = 0;
2399 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2400 io->sgl_flags = htole16(0x02); /* XXX */
2401 io->sense_buffer_length = sizeof(xs->sense);
2402 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2403 io->data_length = htole32(xs->datalen);
2404 io->io_flags = htole16(xs->cmdlen);
2405 io->lun[0] = htobe16(link->lun);
2406 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2407 case XS_CTL_DATA_IN:
2408 ccb->ccb_direction = MFII_DATA_IN;
2409 io->direction = MPII_SCSIIO_DIR_READ;
2410 break;
2411 case XS_CTL_DATA_OUT:
2412 ccb->ccb_direction = MFII_DATA_OUT;
2413 io->direction = MPII_SCSIIO_DIR_WRITE;
2414 break;
2415 default:
2416 ccb->ccb_direction = MFII_DATA_NONE;
2417 io->direction = MPII_SCSIIO_DIR_NONE;
2418 break;
2419 }
2420 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2421
2422 ctx->virtual_disk_target_id = htole16(link->target);
2423 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2424 ctx->timeout_value = sc->sc_pd->pd_timeout;
2425
2426 if (mfii_load_ccb(sc, ccb, ctx + 1,
2427 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2428 return (XS_DRIVER_STUFFUP);
2429
2430 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2431 KASSERT(ccb->ccb_dma64);
2432
2433 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2434 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2435 ccb->ccb_req.dev_handle = dev_handle;
2436
2437 return (XS_NOERROR);
2438 }
2439 #endif
2440
2441 int
2442 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2443 int nosleep)
2444 {
2445 struct mpii_msg_request *req = ccb->ccb_request;
2446 struct mfii_sge *sge = NULL, *nsge = sglp;
2447 struct mfii_sge *ce = NULL;
2448 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2449 u_int space;
2450 int i;
2451
2452 int error;
2453
2454 if (ccb->ccb_len == 0)
2455 return (0);
2456
2457 ccb->ccb_dma64 = true;
2458 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2459 ccb->ccb_data, ccb->ccb_len, NULL,
2460 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2461 if (error) {
2462 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2463 return (1);
2464 }
2465
2466 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2467 sizeof(*nsge);
2468 if (dmap->dm_nsegs > space) {
2469 space--;
2470
2471 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2472 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2473
2474 ce = nsge + space;
2475 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2476 ce->sg_len = htole32(ccb->ccb_sgl_len);
2477 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2478
2479 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2480 }
2481
2482 for (i = 0; i < dmap->dm_nsegs; i++) {
2483 if (nsge == ce)
2484 nsge = ccb->ccb_sgl;
2485
2486 sge = nsge;
2487
2488 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2489 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2490 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2491
2492 nsge = sge + 1;
2493 }
2494 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2495
2496 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2497 ccb->ccb_direction == MFII_DATA_OUT ?
2498 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2499
2500 if (ccb->ccb_sgl_len > 0) {
2501 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2502 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2503 BUS_DMASYNC_PREWRITE);
2504 }
2505
2506 return (0);
2507 }
2508
2509 void
2510 mfii_scsi_cmd_tmo(void *p)
2511 {
2512 struct mfii_ccb *ccb = p;
2513 struct mfii_softc *sc = ccb->ccb_sc;
2514 bool start_abort;
2515
2516 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2517
2518 mutex_enter(&sc->sc_abort_mtx);
2519 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2520 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2521 if (start_abort)
2522 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2523 mutex_exit(&sc->sc_abort_mtx);
2524 }
2525
2526 void
2527 mfii_abort_task(struct work *wk, void *scp)
2528 {
2529 struct mfii_softc *sc = scp;
2530 struct mfii_ccb *list;
2531
2532 mutex_enter(&sc->sc_abort_mtx);
2533 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2534 SIMPLEQ_INIT(&sc->sc_abort_list);
2535 mutex_exit(&sc->sc_abort_mtx);
2536
2537 while (list != NULL) {
2538 struct mfii_ccb *ccb = list;
2539 struct scsipi_xfer *xs = ccb->ccb_cookie;
2540 struct scsipi_periph *periph = xs->xs_periph;
2541 struct mfii_ccb *accb;
2542
2543 list = SIMPLEQ_NEXT(ccb, ccb_link);
2544
2545 if (!sc->sc_ld[periph->periph_target].ld_present) {
2546 /* device is gone */
2547 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2548 scsipi_done(xs);
2549 mfii_put_ccb(sc, ccb);
2550 }
2551 continue;
2552 }
2553
2554 accb = mfii_get_ccb(sc);
2555 mfii_scrub_ccb(accb);
2556 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2557 MPII_SCSI_TASK_ABORT_TASK,
2558 htole32(MFII_TASK_MGMT_FLAGS_PD));
2559
2560 accb->ccb_cookie = ccb;
2561 accb->ccb_done = mfii_scsi_cmd_abort_done;
2562
2563 mfii_start(sc, accb);
2564 }
2565 }
2566
2567 void
2568 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2569 uint16_t smid, uint8_t type, uint32_t flags)
2570 {
2571 struct mfii_task_mgmt *msg;
2572 struct mpii_msg_scsi_task_request *req;
2573
2574 msg = accb->ccb_request;
2575 req = &msg->mpii_request;
2576 req->dev_handle = dev_handle;
2577 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2578 req->task_type = type;
2579 req->task_mid = htole16( smid);
2580 msg->flags = flags;
2581
2582 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2583 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2584 }
2585
2586 void
2587 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2588 {
2589 struct mfii_ccb *ccb = accb->ccb_cookie;
2590 struct scsipi_xfer *xs = ccb->ccb_cookie;
2591
2592 /* XXX check accb completion? */
2593
2594 mfii_put_ccb(sc, accb);
2595
2596 if (atomic_dec_uint_nv(&ccb->ccb_refcnt) == 0) {
2597 xs->error = XS_TIMEOUT;
2598 scsipi_done(xs);
2599 mfii_put_ccb(sc, ccb);
2600 }
2601 }
2602
2603 struct mfii_ccb *
2604 mfii_get_ccb(struct mfii_softc *sc)
2605 {
2606 struct mfii_ccb *ccb;
2607
2608 mutex_enter(&sc->sc_ccb_mtx);
2609 if (!sc->sc_running) {
2610 ccb = NULL;
2611 } else {
2612 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2613 if (ccb != NULL)
2614 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2615 }
2616 mutex_exit(&sc->sc_ccb_mtx);
2617 return (ccb);
2618 }
2619
2620 void
2621 mfii_scrub_ccb(struct mfii_ccb *ccb)
2622 {
2623 ccb->ccb_cookie = NULL;
2624 ccb->ccb_done = NULL;
2625 ccb->ccb_flags = 0;
2626 ccb->ccb_data = NULL;
2627 ccb->ccb_direction = MFII_DATA_NONE;
2628 ccb->ccb_dma64 = false;
2629 ccb->ccb_len = 0;
2630 ccb->ccb_sgl_len = 0;
2631 ccb->ccb_refcnt = 1;
2632
2633 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2634 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2635 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2636 }
2637
2638 void
2639 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2640 {
2641 mutex_enter(&sc->sc_ccb_mtx);
2642 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2643 mutex_exit(&sc->sc_ccb_mtx);
2644 }
2645
2646 int
2647 mfii_init_ccb(struct mfii_softc *sc)
2648 {
2649 struct mfii_ccb *ccb;
2650 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2651 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2652 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2653 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2654 u_int i;
2655 int error;
2656
2657 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2658 M_DEVBUF, M_WAITOK|M_ZERO);
2659
2660 for (i = 0; i < sc->sc_max_cmds; i++) {
2661 ccb = &sc->sc_ccb[i];
2662 ccb->ccb_sc = sc;
2663
2664 /* create a dma map for transfer */
2665 error = bus_dmamap_create(sc->sc_dmat,
2666 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2667 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2668 if (error) {
2669 printf("%s: cannot create ccb dmamap32 (%d)\n",
2670 DEVNAME(sc), error);
2671 goto destroy;
2672 }
2673 error = bus_dmamap_create(sc->sc_dmat64,
2674 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2675 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2676 if (error) {
2677 printf("%s: cannot create ccb dmamap64 (%d)\n",
2678 DEVNAME(sc), error);
2679 goto destroy32;
2680 }
2681
2682 /* select i + 1'th request. 0 is reserved for events */
2683 ccb->ccb_smid = i + 1;
2684 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2685 ccb->ccb_request = request + ccb->ccb_request_offset;
2686 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2687 ccb->ccb_request_offset;
2688
2689 /* select i'th MFI command frame */
2690 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2691 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2692 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2693 ccb->ccb_mfi_offset;
2694
2695 /* select i'th sense */
2696 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2697 ccb->ccb_sense = (struct mfi_sense *)(sense +
2698 ccb->ccb_sense_offset);
2699 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2700 ccb->ccb_sense_offset;
2701
2702 /* select i'th sgl */
2703 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2704 sc->sc_max_sgl * i;
2705 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2706 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2707 ccb->ccb_sgl_offset;
2708
2709 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2710 cv_init(&ccb->ccb_cv, "mfiiexec");
2711
2712 /* add ccb to queue */
2713 mfii_put_ccb(sc, ccb);
2714 }
2715
2716 return (0);
2717
2718 destroy32:
2719 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2720 destroy:
2721 /* free dma maps and ccb memory */
2722 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2723 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2724 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2725 }
2726
2727 free(sc->sc_ccb, M_DEVBUF);
2728
2729 return (1);
2730 }
2731
2732 #if NBIO > 0
2733 int
2734 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2735 {
2736 struct mfii_softc *sc = device_private(dev);
2737 int error = 0;
2738
2739 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2740
2741 mutex_enter(&sc->sc_lock);
2742
2743 switch (cmd) {
2744 case BIOCINQ:
2745 DNPRINTF(MFII_D_IOCTL, "inq\n");
2746 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2747 break;
2748
2749 case BIOCVOL:
2750 DNPRINTF(MFII_D_IOCTL, "vol\n");
2751 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2752 break;
2753
2754 case BIOCDISK:
2755 DNPRINTF(MFII_D_IOCTL, "disk\n");
2756 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2757 break;
2758
2759 case BIOCALARM:
2760 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2761 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2762 break;
2763
2764 case BIOCBLINK:
2765 DNPRINTF(MFII_D_IOCTL, "blink\n");
2766 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2767 break;
2768
2769 case BIOCSETSTATE:
2770 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2771 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2772 break;
2773
2774 #if 0
2775 case BIOCPATROL:
2776 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2777 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2778 break;
2779 #endif
2780
2781 default:
2782 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2783 error = ENOTTY;
2784 }
2785
2786 mutex_exit(&sc->sc_lock);
2787
2788 return (error);
2789 }
2790
2791 int
2792 mfii_bio_getitall(struct mfii_softc *sc)
2793 {
2794 int i, d, rv = EINVAL;
2795 size_t size;
2796 union mfi_mbox mbox;
2797 struct mfi_conf *cfg = NULL;
2798 struct mfi_ld_details *ld_det = NULL;
2799
2800 /* get info */
2801 if (mfii_get_info(sc)) {
2802 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2803 DEVNAME(sc));
2804 goto done;
2805 }
2806
2807 /* send single element command to retrieve size for full structure */
2808 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2809 if (cfg == NULL)
2810 goto done;
2811 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2812 MFII_DATA_IN, false)) {
2813 free(cfg, M_DEVBUF);
2814 goto done;
2815 }
2816
2817 size = cfg->mfc_size;
2818 free(cfg, M_DEVBUF);
2819
2820 /* memory for read config */
2821 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2822 if (cfg == NULL)
2823 goto done;
2824 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2825 MFII_DATA_IN, false)) {
2826 free(cfg, M_DEVBUF);
2827 goto done;
2828 }
2829
2830 /* replace current pointer with new one */
2831 if (sc->sc_cfg)
2832 free(sc->sc_cfg, M_DEVBUF);
2833 sc->sc_cfg = cfg;
2834
2835 /* get all ld info */
2836 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2837 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2838 goto done;
2839
2840 /* get memory for all ld structures */
2841 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2842 if (sc->sc_ld_sz != size) {
2843 if (sc->sc_ld_details)
2844 free(sc->sc_ld_details, M_DEVBUF);
2845
2846 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2847 if (ld_det == NULL)
2848 goto done;
2849 sc->sc_ld_sz = size;
2850 sc->sc_ld_details = ld_det;
2851 }
2852
2853 /* find used physical disks */
2854 size = sizeof(struct mfi_ld_details);
2855 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2856 memset(&mbox, 0, sizeof(mbox));
2857 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2858 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2859 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2860 goto done;
2861
2862 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2863 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2864 }
2865 sc->sc_no_pd = d;
2866
2867 rv = 0;
2868 done:
2869 return (rv);
2870 }
2871
2872 int
2873 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2874 {
2875 int rv = EINVAL;
2876 struct mfi_conf *cfg = NULL;
2877
2878 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2879
2880 if (mfii_bio_getitall(sc)) {
2881 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2882 DEVNAME(sc));
2883 goto done;
2884 }
2885
2886 /* count unused disks as volumes */
2887 if (sc->sc_cfg == NULL)
2888 goto done;
2889 cfg = sc->sc_cfg;
2890
2891 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2892 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2893 #if notyet
2894 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2895 (bi->bi_nodisk - sc->sc_no_pd);
2896 #endif
2897 /* tell bio who we are */
2898 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2899
2900 rv = 0;
2901 done:
2902 return (rv);
2903 }
2904
2905 int
2906 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2907 {
2908 int i, per, rv = EINVAL;
2909
2910 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2911 DEVNAME(sc), bv->bv_volid);
2912
2913 /* we really could skip and expect that inq took care of it */
2914 if (mfii_bio_getitall(sc)) {
2915 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2916 DEVNAME(sc));
2917 goto done;
2918 }
2919
2920 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2921 /* go do hotspares & unused disks */
2922 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2923 goto done;
2924 }
2925
2926 i = bv->bv_volid;
2927 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
2928 sizeof(bv->bv_dev));
2929
2930 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2931 case MFI_LD_OFFLINE:
2932 bv->bv_status = BIOC_SVOFFLINE;
2933 break;
2934
2935 case MFI_LD_PART_DEGRADED:
2936 case MFI_LD_DEGRADED:
2937 bv->bv_status = BIOC_SVDEGRADED;
2938 break;
2939
2940 case MFI_LD_ONLINE:
2941 bv->bv_status = BIOC_SVONLINE;
2942 break;
2943
2944 default:
2945 bv->bv_status = BIOC_SVINVALID;
2946 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2947 DEVNAME(sc),
2948 sc->sc_ld_list.mll_list[i].mll_state);
2949 }
2950
2951 /* additional status can modify MFI status */
2952 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2953 case MFI_LD_PROG_CC:
2954 case MFI_LD_PROG_BGI:
2955 bv->bv_status = BIOC_SVSCRUB;
2956 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2957 bv->bv_percent = (per * 100) / 0xffff;
2958 bv->bv_seconds =
2959 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2960 break;
2961
2962 case MFI_LD_PROG_FGI:
2963 case MFI_LD_PROG_RECONSTRUCT:
2964 /* nothing yet */
2965 break;
2966 }
2967
2968 #if 0
2969 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
2970 bv->bv_cache = BIOC_CVWRITEBACK;
2971 else
2972 bv->bv_cache = BIOC_CVWRITETHROUGH;
2973 #endif
2974
2975 /*
2976 * The RAID levels are determined per the SNIA DDF spec, this is only
2977 * a subset that is valid for the MFI controller.
2978 */
2979 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
2980 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
2981 bv->bv_level *= 10;
2982
2983 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2984 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2985
2986 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
2987
2988 rv = 0;
2989 done:
2990 return (rv);
2991 }
2992
2993 int
2994 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
2995 {
2996 struct mfi_conf *cfg;
2997 struct mfi_array *ar;
2998 struct mfi_ld_cfg *ld;
2999 struct mfi_pd_details *pd;
3000 struct mfi_pd_list *pl;
3001 struct scsipi_inquiry_data *inqbuf;
3002 char vend[8+16+4+1], *vendp;
3003 int i, rv = EINVAL;
3004 int arr, vol, disk, span;
3005 union mfi_mbox mbox;
3006
3007 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3008 DEVNAME(sc), bd->bd_diskid);
3009
3010 /* we really could skip and expect that inq took care of it */
3011 if (mfii_bio_getitall(sc)) {
3012 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3013 DEVNAME(sc));
3014 return (rv);
3015 }
3016 cfg = sc->sc_cfg;
3017
3018 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3019 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3020
3021 ar = cfg->mfc_array;
3022 vol = bd->bd_volid;
3023 if (vol >= cfg->mfc_no_ld) {
3024 /* do hotspares */
3025 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3026 goto freeme;
3027 }
3028
3029 /* calculate offset to ld structure */
3030 ld = (struct mfi_ld_cfg *)(
3031 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3032 cfg->mfc_array_size * cfg->mfc_no_array);
3033
3034 /* use span 0 only when raid group is not spanned */
3035 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3036 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3037 else
3038 span = 0;
3039 arr = ld[vol].mlc_span[span].mls_index;
3040
3041 /* offset disk into pd list */
3042 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3043
3044 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3045 /* disk is missing but succeed command */
3046 bd->bd_status = BIOC_SDFAILED;
3047 rv = 0;
3048
3049 /* try to find an unused disk for the target to rebuild */
3050 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3051 MFII_DATA_IN, false))
3052 goto freeme;
3053
3054 for (i = 0; i < pl->mpl_no_pd; i++) {
3055 if (pl->mpl_address[i].mpa_scsi_type != 0)
3056 continue;
3057
3058 memset(&mbox, 0, sizeof(mbox));
3059 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3060 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3061 pd, sizeof(*pd), MFII_DATA_IN, false))
3062 continue;
3063
3064 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3065 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3066 break;
3067 }
3068
3069 if (i == pl->mpl_no_pd)
3070 goto freeme;
3071 } else {
3072 memset(&mbox, 0, sizeof(mbox));
3073 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3074 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3075 MFII_DATA_IN, false)) {
3076 bd->bd_status = BIOC_SDINVALID;
3077 goto freeme;
3078 }
3079 }
3080
3081 /* get the remaining fields */
3082 bd->bd_channel = pd->mpd_enc_idx;
3083 bd->bd_target = pd->mpd_enc_slot;
3084
3085 /* get status */
3086 switch (pd->mpd_fw_state){
3087 case MFI_PD_UNCONFIG_GOOD:
3088 case MFI_PD_UNCONFIG_BAD:
3089 bd->bd_status = BIOC_SDUNUSED;
3090 break;
3091
3092 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3093 bd->bd_status = BIOC_SDHOTSPARE;
3094 break;
3095
3096 case MFI_PD_OFFLINE:
3097 bd->bd_status = BIOC_SDOFFLINE;
3098 break;
3099
3100 case MFI_PD_FAILED:
3101 bd->bd_status = BIOC_SDFAILED;
3102 break;
3103
3104 case MFI_PD_REBUILD:
3105 bd->bd_status = BIOC_SDREBUILD;
3106 break;
3107
3108 case MFI_PD_ONLINE:
3109 bd->bd_status = BIOC_SDONLINE;
3110 break;
3111
3112 case MFI_PD_COPYBACK:
3113 case MFI_PD_SYSTEM:
3114 default:
3115 bd->bd_status = BIOC_SDINVALID;
3116 break;
3117 }
3118
3119 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3120
3121 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3122 vendp = inqbuf->vendor;
3123 memcpy(vend, vendp, sizeof vend - 1);
3124 vend[sizeof vend - 1] = '\0';
3125 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3126
3127 /* XXX find a way to retrieve serial nr from drive */
3128 /* XXX find a way to get bd_procdev */
3129
3130 #if 0
3131 mfp = &pd->mpd_progress;
3132 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3133 mp = &mfp->mfp_patrol_read;
3134 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3135 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3136 }
3137 #endif
3138
3139 rv = 0;
3140 freeme:
3141 free(pd, M_DEVBUF);
3142 free(pl, M_DEVBUF);
3143
3144 return (rv);
3145 }
3146
3147 int
3148 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3149 {
3150 uint32_t opc;
3151 int rv = 0;
3152 int8_t ret;
3153 mfii_direction_t dir = MFII_DATA_NONE;
3154
3155 switch(ba->ba_opcode) {
3156 case BIOC_SADISABLE:
3157 opc = MR_DCMD_SPEAKER_DISABLE;
3158 break;
3159
3160 case BIOC_SAENABLE:
3161 opc = MR_DCMD_SPEAKER_ENABLE;
3162 break;
3163
3164 case BIOC_SASILENCE:
3165 opc = MR_DCMD_SPEAKER_SILENCE;
3166 break;
3167
3168 case BIOC_GASTATUS:
3169 opc = MR_DCMD_SPEAKER_GET;
3170 dir = MFII_DATA_IN;
3171 break;
3172
3173 case BIOC_SATEST:
3174 opc = MR_DCMD_SPEAKER_TEST;
3175 break;
3176
3177 default:
3178 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3179 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3180 return (EINVAL);
3181 }
3182
3183 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3184 rv = EINVAL;
3185 else
3186 if (ba->ba_opcode == BIOC_GASTATUS)
3187 ba->ba_status = ret;
3188 else
3189 ba->ba_status = 0;
3190
3191 return (rv);
3192 }
3193
3194 int
3195 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3196 {
3197 int i, found, rv = EINVAL;
3198 union mfi_mbox mbox;
3199 uint32_t cmd;
3200 struct mfi_pd_list *pd;
3201
3202 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3203 bb->bb_status);
3204
3205 /* channel 0 means not in an enclosure so can't be blinked */
3206 if (bb->bb_channel == 0)
3207 return (EINVAL);
3208
3209 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3210
3211 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3212 MFII_DATA_IN, false))
3213 goto done;
3214
3215 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3216 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3217 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3218 found = 1;
3219 break;
3220 }
3221
3222 if (!found)
3223 goto done;
3224
3225 memset(&mbox, 0, sizeof(mbox));
3226 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3227
3228 switch (bb->bb_status) {
3229 case BIOC_SBUNBLINK:
3230 cmd = MR_DCMD_PD_UNBLINK;
3231 break;
3232
3233 case BIOC_SBBLINK:
3234 cmd = MR_DCMD_PD_BLINK;
3235 break;
3236
3237 case BIOC_SBALARM:
3238 default:
3239 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3240 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3241 goto done;
3242 }
3243
3244
3245 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3246 goto done;
3247
3248 rv = 0;
3249 done:
3250 free(pd, M_DEVBUF);
3251 return (rv);
3252 }
3253
3254 static int
3255 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3256 {
3257 struct mfii_foreign_scan_info *fsi;
3258 struct mfi_pd_details *pd;
3259 union mfi_mbox mbox;
3260 int rv;
3261
3262 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3263 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3264
3265 memset(&mbox, 0, sizeof mbox);
3266 mbox.s[0] = pd_id;
3267 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3268 MFII_DATA_IN, false);
3269 if (rv != 0)
3270 goto done;
3271
3272 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3273 mbox.s[0] = pd_id;
3274 mbox.s[1] = pd->mpd_pd.mfp_seq;
3275 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3276 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3277 MFII_DATA_NONE, false);
3278 if (rv != 0)
3279 goto done;
3280 }
3281
3282 memset(&mbox, 0, sizeof mbox);
3283 mbox.s[0] = pd_id;
3284 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3285 MFII_DATA_IN, false);
3286 if (rv != 0)
3287 goto done;
3288
3289 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3290 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3291 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3292 if (rv != 0)
3293 goto done;
3294
3295 if (fsi->count > 0) {
3296 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3297 NULL, 0, MFII_DATA_NONE, false);
3298 if (rv != 0)
3299 goto done;
3300 }
3301 }
3302
3303 memset(&mbox, 0, sizeof mbox);
3304 mbox.s[0] = pd_id;
3305 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3306 MFII_DATA_IN, false);
3307 if (rv != 0)
3308 goto done;
3309
3310 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3311 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3312 rv = ENXIO;
3313
3314 done:
3315 free(fsi, M_DEVBUF);
3316 free(pd, M_DEVBUF);
3317
3318 return (rv);
3319 }
3320
3321 static int
3322 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3323 {
3324 struct mfi_hotspare *hs;
3325 struct mfi_pd_details *pd;
3326 union mfi_mbox mbox;
3327 size_t size;
3328 int rv = EINVAL;
3329
3330 /* we really could skip and expect that inq took care of it */
3331 if (mfii_bio_getitall(sc)) {
3332 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3333 DEVNAME(sc));
3334 return (rv);
3335 }
3336 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3337
3338 hs = malloc(size, M_DEVBUF, M_WAITOK);
3339 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3340
3341 memset(&mbox, 0, sizeof mbox);
3342 mbox.s[0] = pd_id;
3343 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3344 MFII_DATA_IN, false);
3345 if (rv != 0)
3346 goto done;
3347
3348 memset(hs, 0, size);
3349 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3350 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3351 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3352 MFII_DATA_OUT, false);
3353
3354 done:
3355 free(hs, M_DEVBUF);
3356 free(pd, M_DEVBUF);
3357
3358 return (rv);
3359 }
3360
3361 int
3362 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3363 {
3364 struct mfi_pd_details *pd;
3365 struct mfi_pd_list *pl;
3366 int i, found, rv = EINVAL;
3367 union mfi_mbox mbox;
3368
3369 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3370 bs->bs_status);
3371
3372 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3373 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3374
3375 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3376 MFII_DATA_IN, false))
3377 goto done;
3378
3379 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3380 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3381 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3382 found = 1;
3383 break;
3384 }
3385
3386 if (!found)
3387 goto done;
3388
3389 memset(&mbox, 0, sizeof(mbox));
3390 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3391
3392 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3393 MFII_DATA_IN, false))
3394 goto done;
3395
3396 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3397 mbox.s[1] = pd->mpd_pd.mfp_seq;
3398
3399 switch (bs->bs_status) {
3400 case BIOC_SSONLINE:
3401 mbox.b[4] = MFI_PD_ONLINE;
3402 break;
3403
3404 case BIOC_SSOFFLINE:
3405 mbox.b[4] = MFI_PD_OFFLINE;
3406 break;
3407
3408 case BIOC_SSHOTSPARE:
3409 mbox.b[4] = MFI_PD_HOTSPARE;
3410 break;
3411
3412 case BIOC_SSREBUILD:
3413 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3414 if ((rv = mfii_makegood(sc,
3415 pl->mpl_address[i].mpa_pd_id)))
3416 goto done;
3417
3418 if ((rv = mfii_makespare(sc,
3419 pl->mpl_address[i].mpa_pd_id)))
3420 goto done;
3421
3422 memset(&mbox, 0, sizeof(mbox));
3423 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3424 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3425 pd, sizeof(*pd), MFII_DATA_IN, false);
3426 if (rv != 0)
3427 goto done;
3428
3429 /* rebuilding might be started by mfii_makespare() */
3430 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3431 rv = 0;
3432 goto done;
3433 }
3434
3435 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3436 mbox.s[1] = pd->mpd_pd.mfp_seq;
3437 }
3438 mbox.b[4] = MFI_PD_REBUILD;
3439 break;
3440
3441 default:
3442 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3443 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3444 goto done;
3445 }
3446
3447
3448 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3449 MFII_DATA_NONE, false);
3450 done:
3451 free(pd, M_DEVBUF);
3452 free(pl, M_DEVBUF);
3453 return (rv);
3454 }
3455
3456 #if 0
3457 int
3458 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3459 {
3460 uint32_t opc;
3461 int rv = 0;
3462 struct mfi_pr_properties prop;
3463 struct mfi_pr_status status;
3464 uint32_t time, exec_freq;
3465
3466 switch (bp->bp_opcode) {
3467 case BIOC_SPSTOP:
3468 case BIOC_SPSTART:
3469 if (bp->bp_opcode == BIOC_SPSTART)
3470 opc = MR_DCMD_PR_START;
3471 else
3472 opc = MR_DCMD_PR_STOP;
3473 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3474 return (EINVAL);
3475 break;
3476
3477 case BIOC_SPMANUAL:
3478 case BIOC_SPDISABLE:
3479 case BIOC_SPAUTO:
3480 /* Get device's time. */
3481 opc = MR_DCMD_TIME_SECS_GET;
3482 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3483 MFII_DATA_IN, false))
3484 return (EINVAL);
3485
3486 opc = MR_DCMD_PR_GET_PROPERTIES;
3487 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3488 MFII_DATA_IN, false))
3489 return (EINVAL);
3490
3491 switch (bp->bp_opcode) {
3492 case BIOC_SPMANUAL:
3493 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3494 break;
3495 case BIOC_SPDISABLE:
3496 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3497 break;
3498 case BIOC_SPAUTO:
3499 if (bp->bp_autoival != 0) {
3500 if (bp->bp_autoival == -1)
3501 /* continuously */
3502 exec_freq = 0xffffffffU;
3503 else if (bp->bp_autoival > 0)
3504 exec_freq = bp->bp_autoival;
3505 else
3506 return (EINVAL);
3507 prop.exec_freq = exec_freq;
3508 }
3509 if (bp->bp_autonext != 0) {
3510 if (bp->bp_autonext < 0)
3511 return (EINVAL);
3512 else
3513 prop.next_exec = time + bp->bp_autonext;
3514 }
3515 prop.op_mode = MFI_PR_OPMODE_AUTO;
3516 break;
3517 }
3518
3519 opc = MR_DCMD_PR_SET_PROPERTIES;
3520 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3521 MFII_DATA_OUT, false))
3522 return (EINVAL);
3523
3524 break;
3525
3526 case BIOC_GPSTATUS:
3527 opc = MR_DCMD_PR_GET_PROPERTIES;
3528 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3529 MFII_DATA_IN, false))
3530 return (EINVAL);
3531
3532 opc = MR_DCMD_PR_GET_STATUS;
3533 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3534 MFII_DATA_IN, false))
3535 return (EINVAL);
3536
3537 /* Get device's time. */
3538 opc = MR_DCMD_TIME_SECS_GET;
3539 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3540 MFII_DATA_IN, false))
3541 return (EINVAL);
3542
3543 switch (prop.op_mode) {
3544 case MFI_PR_OPMODE_AUTO:
3545 bp->bp_mode = BIOC_SPMAUTO;
3546 bp->bp_autoival = prop.exec_freq;
3547 bp->bp_autonext = prop.next_exec;
3548 bp->bp_autonow = time;
3549 break;
3550 case MFI_PR_OPMODE_MANUAL:
3551 bp->bp_mode = BIOC_SPMMANUAL;
3552 break;
3553 case MFI_PR_OPMODE_DISABLED:
3554 bp->bp_mode = BIOC_SPMDISABLED;
3555 break;
3556 default:
3557 printf("%s: unknown patrol mode %d\n",
3558 DEVNAME(sc), prop.op_mode);
3559 break;
3560 }
3561
3562 switch (status.state) {
3563 case MFI_PR_STATE_STOPPED:
3564 bp->bp_status = BIOC_SPSSTOPPED;
3565 break;
3566 case MFI_PR_STATE_READY:
3567 bp->bp_status = BIOC_SPSREADY;
3568 break;
3569 case MFI_PR_STATE_ACTIVE:
3570 bp->bp_status = BIOC_SPSACTIVE;
3571 break;
3572 case MFI_PR_STATE_ABORTED:
3573 bp->bp_status = BIOC_SPSABORTED;
3574 break;
3575 default:
3576 printf("%s: unknown patrol state %d\n",
3577 DEVNAME(sc), status.state);
3578 break;
3579 }
3580
3581 break;
3582
3583 default:
3584 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3585 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3586 return (EINVAL);
3587 }
3588
3589 return (rv);
3590 }
3591 #endif
3592
3593 int
3594 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3595 {
3596 struct mfi_conf *cfg;
3597 struct mfi_hotspare *hs;
3598 struct mfi_pd_details *pd;
3599 struct bioc_disk *sdhs;
3600 struct bioc_vol *vdhs;
3601 struct scsipi_inquiry_data *inqbuf;
3602 char vend[8+16+4+1], *vendp;
3603 int i, rv = EINVAL;
3604 uint32_t size;
3605 union mfi_mbox mbox;
3606
3607 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3608
3609 if (!bio_hs)
3610 return (EINVAL);
3611
3612 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3613
3614 /* send single element command to retrieve size for full structure */
3615 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3616 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3617 MFII_DATA_IN, false))
3618 goto freeme;
3619
3620 size = cfg->mfc_size;
3621 free(cfg, M_DEVBUF);
3622
3623 /* memory for read config */
3624 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3625 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3626 MFII_DATA_IN, false))
3627 goto freeme;
3628
3629 /* calculate offset to hs structure */
3630 hs = (struct mfi_hotspare *)(
3631 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3632 cfg->mfc_array_size * cfg->mfc_no_array +
3633 cfg->mfc_ld_size * cfg->mfc_no_ld);
3634
3635 if (volid < cfg->mfc_no_ld)
3636 goto freeme; /* not a hotspare */
3637
3638 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3639 goto freeme; /* not a hotspare */
3640
3641 /* offset into hotspare structure */
3642 i = volid - cfg->mfc_no_ld;
3643
3644 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3645 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3646 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3647
3648 /* get pd fields */
3649 memset(&mbox, 0, sizeof(mbox));
3650 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3651 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3652 MFII_DATA_IN, false)) {
3653 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3654 DEVNAME(sc));
3655 goto freeme;
3656 }
3657
3658 switch (type) {
3659 case MFI_MGMT_VD:
3660 vdhs = bio_hs;
3661 vdhs->bv_status = BIOC_SVONLINE;
3662 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3663 vdhs->bv_level = -1; /* hotspare */
3664 vdhs->bv_nodisk = 1;
3665 break;
3666
3667 case MFI_MGMT_SD:
3668 sdhs = bio_hs;
3669 sdhs->bd_status = BIOC_SDHOTSPARE;
3670 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3671 sdhs->bd_channel = pd->mpd_enc_idx;
3672 sdhs->bd_target = pd->mpd_enc_slot;
3673 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3674 vendp = inqbuf->vendor;
3675 memcpy(vend, vendp, sizeof vend - 1);
3676 vend[sizeof vend - 1] = '\0';
3677 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3678 break;
3679
3680 default:
3681 goto freeme;
3682 }
3683
3684 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3685 rv = 0;
3686 freeme:
3687 free(pd, M_DEVBUF);
3688 free(cfg, M_DEVBUF);
3689
3690 return (rv);
3691 }
3692
3693 #endif /* NBIO > 0 */
3694
3695 #define MFI_BBU_SENSORS 4
3696
3697 void
3698 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3699 {
3700 struct mfi_bbu_status bbu;
3701 u_int32_t status;
3702 u_int32_t mask;
3703 u_int32_t soh_bad;
3704 int rv;
3705
3706 mutex_enter(&sc->sc_lock);
3707 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3708 sizeof(bbu), MFII_DATA_IN, false);
3709 mutex_exit(&sc->sc_lock);
3710 if (rv != 0) {
3711 edata->state = ENVSYS_SINVALID;
3712 edata->value_cur = 0;
3713 return;
3714 }
3715
3716 switch (bbu.battery_type) {
3717 case MFI_BBU_TYPE_IBBU:
3718 mask = MFI_BBU_STATE_BAD_IBBU;
3719 soh_bad = 0;
3720 break;
3721 case MFI_BBU_TYPE_BBU:
3722 mask = MFI_BBU_STATE_BAD_BBU;
3723 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3724 break;
3725
3726 case MFI_BBU_TYPE_NONE:
3727 default:
3728 edata->state = ENVSYS_SCRITICAL;
3729 edata->value_cur = 0;
3730 return;
3731 }
3732
3733 status = le32toh(bbu.fw_status) & mask;
3734 switch(edata->sensor) {
3735 case 0:
3736 edata->value_cur = (status || soh_bad) ? 0 : 1;
3737 edata->state =
3738 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3739 return;
3740 case 1:
3741 edata->value_cur = le16toh(bbu.voltage) * 1000;
3742 edata->state = ENVSYS_SVALID;
3743 return;
3744 case 2:
3745 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3746 edata->state = ENVSYS_SVALID;
3747 return;
3748 case 3:
3749 edata->value_cur = le16toh(bbu.temperature) * 1000000 + 273150000;
3750 edata->state = ENVSYS_SVALID;
3751 return;
3752 }
3753 }
3754
3755 void
3756 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3757 {
3758 struct bioc_vol bv;
3759 int error;
3760
3761 memset(&bv, 0, sizeof(bv));
3762 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3763 mutex_enter(&sc->sc_lock);
3764 error = mfii_ioctl_vol(sc, &bv);
3765 mutex_exit(&sc->sc_lock);
3766 if (error)
3767 bv.bv_status = BIOC_SVINVALID;
3768 bio_vol_to_envsys(edata, &bv);
3769 }
3770
3771 void
3772 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3773 {
3774 sensor->units = ENVSYS_DRIVE;
3775 sensor->state = ENVSYS_SINVALID;
3776 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3777 /* Enable monitoring for drive state changes */
3778 sensor->flags |= ENVSYS_FMONSTCHANGED;
3779 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3780 }
3781
3782 static void
3783 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3784 {
3785 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3786 aprint_error_dev(sc->sc_dev,
3787 "failed to attach sensor %s\n", s->desc);
3788 }
3789
3790 int
3791 mfii_create_sensors(struct mfii_softc *sc)
3792 {
3793 int i, rv;
3794 const int nsensors = MFI_BBU_SENSORS + MFI_MAX_LD;
3795
3796 sc->sc_sme = sysmon_envsys_create();
3797 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3798 M_DEVBUF, M_NOWAIT | M_ZERO);
3799
3800 if (sc->sc_sensors == NULL) {
3801 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
3802 return ENOMEM;
3803 }
3804 /* BBU */
3805 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3806 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3807 sc->sc_sensors[0].value_cur = 0;
3808 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3809 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3810 sc->sc_sensors[1].value_cur = 0;
3811 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3812 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3813 sc->sc_sensors[2].value_cur = 0;
3814 sc->sc_sensors[3].units = ENVSYS_STEMP;
3815 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3816 sc->sc_sensors[3].value_cur = 0;
3817
3818 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3819 sc->sc_bbuok = true;
3820 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3821 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3822 "%s BBU state", DEVNAME(sc));
3823 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3824 "%s BBU voltage", DEVNAME(sc));
3825 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3826 "%s BBU current", DEVNAME(sc));
3827 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3828 "%s BBU temperature", DEVNAME(sc));
3829 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3830 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3831 }
3832 }
3833
3834 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3835 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3836 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3837 }
3838
3839 sc->sc_sme->sme_name = DEVNAME(sc);
3840 sc->sc_sme->sme_cookie = sc;
3841 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3842 rv = sysmon_envsys_register(sc->sc_sme);
3843 if (rv) {
3844 aprint_error_dev(sc->sc_dev,
3845 "unable to register with sysmon (rv = %d)\n", rv);
3846 }
3847 return rv;
3848
3849 }
3850
3851 static int
3852 mfii_destroy_sensors(struct mfii_softc *sc)
3853 {
3854 if (sc->sc_sme == NULL)
3855 return 0;
3856 sysmon_envsys_unregister(sc->sc_sme);
3857 sc->sc_sme = NULL;
3858 free(sc->sc_sensors, M_DEVBUF);
3859 return 0;
3860 }
3861
3862 void
3863 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
3864 {
3865 struct mfii_softc *sc = sme->sme_cookie;
3866
3867 if (edata->sensor >= MFI_BBU_SENSORS + MFI_MAX_LD)
3868 return;
3869
3870 if (edata->sensor < MFI_BBU_SENSORS) {
3871 if (sc->sc_bbuok)
3872 mfii_bbu(sc, edata);
3873 } else {
3874 mfii_refresh_ld_sensor(sc, edata);
3875 }
3876 }
3877