mpii.c revision 1.26 1 /* $NetBSD: mpii.c,v 1.26 2021/02/23 07:15:30 skrll Exp $ */
2 /* $OpenBSD: mpii.c,v 1.115 2018/08/14 05:22:21 jmatthew Exp $ */
3 /*
4 * Copyright (c) 2010, 2012 Mike Belopuhov
5 * Copyright (c) 2009 James Giannoules
6 * Copyright (c) 2005 - 2010 David Gwynne <dlg (at) openbsd.org>
7 * Copyright (c) 2005 - 2010 Marco Peereboom <marco (at) openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: mpii.c,v 1.26 2021/02/23 07:15:30 skrll Exp $");
24
25 #include "bio.h"
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/device.h>
31 #include <sys/ioctl.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/mutex.h>
35 #include <sys/condvar.h>
36 #include <sys/dkio.h>
37 #include <sys/tree.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #include <dev/sysmon/sysmonvar.h>
50 #include <sys/envsys.h>
51 #endif
52
53 #include <dev/pci/mpiireg.h>
54
55 // #define MPII_DEBUG
56 #ifdef MPII_DEBUG
57 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
58 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
59 #define MPII_D_CMD (0x0001)
60 #define MPII_D_INTR (0x0002)
61 #define MPII_D_MISC (0x0004)
62 #define MPII_D_DMA (0x0008)
63 #define MPII_D_IOCTL (0x0010)
64 #define MPII_D_RW (0x0020)
65 #define MPII_D_MEM (0x0040)
66 #define MPII_D_CCB (0x0080)
67 #define MPII_D_PPR (0x0100)
68 #define MPII_D_RAID (0x0200)
69 #define MPII_D_EVT (0x0400)
70 #define MPII_D_CFG (0x0800)
71 #define MPII_D_MAP (0x1000)
72
73 u_int32_t mpii_debug = 0
74 // | MPII_D_CMD
75 // | MPII_D_INTR
76 // | MPII_D_MISC
77 // | MPII_D_DMA
78 // | MPII_D_IOCTL
79 // | MPII_D_RW
80 // | MPII_D_MEM
81 // | MPII_D_CCB
82 // | MPII_D_PPR
83 // | MPII_D_RAID
84 // | MPII_D_EVT
85 // | MPII_D_CFG
86 // | MPII_D_MAP
87 ;
88 #else
89 #define DPRINTF(x...)
90 #define DNPRINTF(n,x...)
91 #endif
92
93 #define MPII_REQUEST_SIZE (512)
94 #define MPII_REQUEST_CREDIT (128)
95
96 struct mpii_dmamem {
97 bus_dmamap_t mdm_map;
98 bus_dma_segment_t mdm_seg;
99 size_t mdm_size;
100 void *mdm_kva;
101 };
102 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
103 #define MPII_DMA_DVA(_mdm) ((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
104 #define MPII_DMA_KVA(_mdm) ((_mdm)->mdm_kva)
105
106 struct mpii_softc;
107
108 struct mpii_rcb {
109 SIMPLEQ_ENTRY(mpii_rcb) rcb_link;
110 void *rcb_reply;
111 u_int32_t rcb_reply_dva;
112 };
113
114 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
115
116 struct mpii_device {
117 int flags;
118 #define MPII_DF_ATTACH (0x0001)
119 #define MPII_DF_DETACH (0x0002)
120 #define MPII_DF_HIDDEN (0x0004)
121 #define MPII_DF_UNUSED (0x0008)
122 #define MPII_DF_VOLUME (0x0010)
123 #define MPII_DF_VOLUME_DISK (0x0020)
124 #define MPII_DF_HOT_SPARE (0x0040)
125 short slot;
126 short percent;
127 u_int16_t dev_handle;
128 u_int16_t enclosure;
129 u_int16_t expander;
130 u_int8_t phy_num;
131 u_int8_t physical_port;
132 };
133
134 struct mpii_ccb {
135 struct mpii_softc *ccb_sc;
136
137 void * ccb_cookie;
138 kmutex_t ccb_mtx;
139 kcondvar_t ccb_cv;
140
141 bus_dmamap_t ccb_dmamap;
142
143 bus_addr_t ccb_offset;
144 void *ccb_cmd;
145 bus_addr_t ccb_cmd_dva;
146 u_int16_t ccb_dev_handle;
147 u_int16_t ccb_smid;
148
149 volatile enum {
150 MPII_CCB_FREE,
151 MPII_CCB_READY,
152 MPII_CCB_QUEUED,
153 MPII_CCB_TIMEOUT
154 } ccb_state;
155
156 void (*ccb_done)(struct mpii_ccb *);
157 struct mpii_rcb *ccb_rcb;
158
159 SIMPLEQ_ENTRY(mpii_ccb) ccb_link;
160 };
161
162 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
163
164 struct mpii_softc {
165 device_t sc_dev;
166
167 pci_chipset_tag_t sc_pc;
168 pcitag_t sc_tag;
169
170 void *sc_ih;
171 pci_intr_handle_t *sc_pihp;
172
173 struct scsipi_adapter sc_adapt;
174 struct scsipi_channel sc_chan;
175 device_t sc_child; /* our scsibus */
176
177 int sc_flags;
178 #define MPII_F_RAID (1<<1)
179 #define MPII_F_SAS3 (1<<2)
180
181 struct mpii_device **sc_devs;
182 kmutex_t sc_devs_mtx;
183
184 bus_space_tag_t sc_iot;
185 bus_space_handle_t sc_ioh;
186 bus_size_t sc_ios;
187 bus_dma_tag_t sc_dmat;
188
189 kmutex_t sc_req_mtx;
190 kmutex_t sc_rep_mtx;
191
192 ushort sc_reply_size;
193 ushort sc_request_size;
194
195 ushort sc_max_cmds;
196 ushort sc_num_reply_frames;
197 u_int sc_reply_free_qdepth;
198 u_int sc_reply_post_qdepth;
199
200 ushort sc_chain_sge;
201 ushort sc_max_sgl;
202
203 u_int8_t sc_ioc_event_replay;
204
205 u_int8_t sc_porttype;
206 u_int8_t sc_max_volumes;
207 u_int16_t sc_max_devices;
208 u_int16_t sc_vd_count;
209 u_int16_t sc_vd_id_low;
210 u_int16_t sc_pd_id_start;
211 int sc_ioc_number;
212 u_int8_t sc_vf_id;
213
214 struct mpii_ccb *sc_ccbs;
215 struct mpii_ccb_list sc_ccb_free;
216 kmutex_t sc_ccb_free_mtx;
217 kcondvar_t sc_ccb_free_cv;
218
219 struct mpii_ccb_list sc_ccb_tmos;
220 kmutex_t sc_ssb_tmomtx;
221 struct workqueue *sc_ssb_tmowk;
222 struct work sc_ssb_tmowork;
223
224 struct mpii_dmamem *sc_requests;
225
226 struct mpii_dmamem *sc_replies;
227 struct mpii_rcb *sc_rcbs;
228
229 struct mpii_dmamem *sc_reply_postq;
230 struct mpii_reply_descr *sc_reply_postq_kva;
231 u_int sc_reply_post_host_index;
232
233 struct mpii_dmamem *sc_reply_freeq;
234 u_int sc_reply_free_host_index;
235 kmutex_t sc_reply_free_mtx;
236
237 struct mpii_rcb_list sc_evt_sas_queue;
238 kmutex_t sc_evt_sas_mtx;
239 struct workqueue *sc_evt_sas_wq;
240 struct work sc_evt_sas_work;
241
242 struct mpii_rcb_list sc_evt_ack_queue;
243 kmutex_t sc_evt_ack_mtx;
244 struct workqueue *sc_evt_ack_wq;
245 struct work sc_evt_ack_work;
246
247 #if NBIO > 0
248 struct sysmon_envsys *sc_sme;
249 envsys_data_t *sc_sensors;
250 #endif
251 };
252
253 static int mpii_match(device_t, cfdata_t, void *);
254 static void mpii_attach(device_t, device_t, void *);
255 static int mpii_detach(device_t, int);
256 static void mpii_childdetached(device_t, device_t);
257 static int mpii_rescan(device_t, const char *, const int *);
258
259 static int mpii_intr(void *);
260
261 CFATTACH_DECL3_NEW(mpii, sizeof(struct mpii_softc),
262 mpii_match, mpii_attach, mpii_detach, NULL, mpii_rescan,
263 mpii_childdetached, DVF_DETACH_SHUTDOWN);
264
265 static void mpii_scsipi_request(struct scsipi_channel *,
266 scsipi_adapter_req_t, void *);
267 static void mpii_scsi_cmd_done(struct mpii_ccb *);
268
269 static struct mpii_dmamem *
270 mpii_dmamem_alloc(struct mpii_softc *, size_t);
271 static void mpii_dmamem_free(struct mpii_softc *,
272 struct mpii_dmamem *);
273 static int mpii_alloc_ccbs(struct mpii_softc *);
274 static struct mpii_ccb *mpii_get_ccb(struct mpii_softc *);
275 static void mpii_put_ccb(struct mpii_softc *, struct mpii_ccb *);
276 static int mpii_alloc_replies(struct mpii_softc *);
277 static int mpii_alloc_queues(struct mpii_softc *);
278 static void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
279 static void mpii_push_replies(struct mpii_softc *);
280
281 static void mpii_scsi_cmd_tmo(void *);
282 static void mpii_scsi_cmd_tmo_handler(struct work *, void *);
283 static void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
284
285 static int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
286 static int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
287 static struct mpii_device *
288 mpii_find_dev(struct mpii_softc *, u_int16_t);
289
290 static void mpii_start(struct mpii_softc *, struct mpii_ccb *);
291 static int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
292 static void mpii_poll_done(struct mpii_ccb *);
293 static struct mpii_rcb *
294 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
295
296 static void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
297 static void mpii_wait_done(struct mpii_ccb *);
298
299 static void mpii_init_queues(struct mpii_softc *);
300
301 static int mpii_load_xs(struct mpii_ccb *);
302 static int mpii_load_xs_sas3(struct mpii_ccb *);
303
304 static u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
305 static void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
306 static int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
307 u_int32_t);
308 static int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
309 u_int32_t);
310
311 static int mpii_init(struct mpii_softc *);
312 static int mpii_reset_soft(struct mpii_softc *);
313 static int mpii_reset_hard(struct mpii_softc *);
314
315 static int mpii_handshake_send(struct mpii_softc *, void *, size_t);
316 static int mpii_handshake_recv_dword(struct mpii_softc *,
317 u_int32_t *);
318 static int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
319
320 static void mpii_empty_done(struct mpii_ccb *);
321
322 static int mpii_iocinit(struct mpii_softc *);
323 static int mpii_iocfacts(struct mpii_softc *);
324 static int mpii_portfacts(struct mpii_softc *);
325 static int mpii_portenable(struct mpii_softc *);
326 static int mpii_cfg_coalescing(struct mpii_softc *);
327 static int mpii_board_info(struct mpii_softc *);
328 static int mpii_target_map(struct mpii_softc *);
329
330 static int mpii_eventnotify(struct mpii_softc *);
331 static void mpii_eventnotify_done(struct mpii_ccb *);
332 static void mpii_eventack(struct work *, void *);
333 static void mpii_eventack_done(struct mpii_ccb *);
334 static void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
335 static void mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
336 static void mpii_event_sas(struct mpii_softc *, struct mpii_rcb *);
337 static void mpii_event_sas_work(struct work *, void *);
338 static void mpii_event_raid(struct mpii_softc *,
339 struct mpii_msg_event_reply *);
340 static void mpii_event_discovery(struct mpii_softc *,
341 struct mpii_msg_event_reply *);
342
343 static void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
344
345 static int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
346 u_int8_t, u_int32_t, int, void *);
347 static int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
348 void *, int, void *, size_t);
349
350 #if 0
351 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
352 #endif
353
354 #if NBIO > 0
355 static int mpii_ioctl(device_t, u_long, void *);
356 static int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
357 static int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
358 static int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
359 static int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
360 int, int *);
361 static int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
362 u_int8_t);
363 static struct mpii_device *
364 mpii_find_vol(struct mpii_softc *, int);
365 #ifndef SMALL_KERNEL
366 static int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
367 static int mpii_create_sensors(struct mpii_softc *);
368 static void mpii_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
369 static int mpii_destroy_sensors(struct mpii_softc *);
370 #endif /* SMALL_KERNEL */
371 #endif /* NBIO > 0 */
372
373 #define DEVNAME(s) (device_xname((s)->sc_dev))
374
375 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
376
377 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL)
378 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v))
379 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS)
380 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v))
381 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
382 == MPII_INTR_STATUS_REPLY)
383
384 #define mpii_write_reply_free(s, v) \
385 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
386 MPII_REPLY_FREE_HOST_INDEX, (v))
387 #define mpii_write_reply_post(s, v) \
388 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
389 MPII_REPLY_POST_HOST_INDEX, (v))
390
391 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \
392 MPII_INTR_STATUS_IOC2SYSDB, 0)
393 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \
394 MPII_INTR_STATUS_SYS2IOCDB, 0)
395
396 static inline void
397 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
398 {
399 sge->sg_addr_lo = htole32(dva);
400 sge->sg_addr_hi = htole32(dva >> 32);
401 }
402
403 #define MPII_PG_EXTENDED (1<<0)
404 #define MPII_PG_POLL (1<<1)
405 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED"
406
407 static const struct mpii_pci_product {
408 pci_vendor_id_t mpii_vendor;
409 pci_product_id_t mpii_product;
410 } mpii_devices[] = {
411 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 },
412 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 },
413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 },
414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 },
415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 },
416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 },
417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 },
418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 },
419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 },
420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 },
421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 },
422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 },
423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 },
424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 },
425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 },
426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 },
427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 },
428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 },
429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 },
430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 },
431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 },
432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 },
433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 },
434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 },
435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 },
436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 },
437 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 },
438 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 },
439 { 0, 0}
440 };
441
442 static int
443 mpii_match(device_t parent, cfdata_t match, void *aux)
444 {
445 struct pci_attach_args *pa = aux;
446 const struct mpii_pci_product *mpii;
447
448 for (mpii = mpii_devices; mpii->mpii_vendor != 0; mpii++) {
449 if (PCI_VENDOR(pa->pa_id) == mpii->mpii_vendor &&
450 PCI_PRODUCT(pa->pa_id) == mpii->mpii_product)
451 return (1);
452 }
453 return (0);
454 }
455
456 static void
457 mpii_attach(device_t parent, device_t self, void *aux)
458 {
459 struct mpii_softc *sc = device_private(self);
460 struct pci_attach_args *pa = aux;
461 pcireg_t memtype;
462 int r;
463 struct mpii_ccb *ccb;
464 struct scsipi_adapter *adapt = &sc->sc_adapt;
465 struct scsipi_channel *chan = &sc->sc_chan;
466 char intrbuf[PCI_INTRSTR_LEN];
467 const char *intrstr;
468
469 pci_aprint_devinfo(pa, NULL);
470
471 sc->sc_pc = pa->pa_pc;
472 sc->sc_tag = pa->pa_tag;
473 sc->sc_dmat = pa->pa_dmat;
474 sc->sc_dev = self;
475
476 mutex_init(&sc->sc_req_mtx, MUTEX_DEFAULT, IPL_BIO);
477 mutex_init(&sc->sc_rep_mtx, MUTEX_DEFAULT, IPL_BIO);
478
479 /* find the appropriate memory base */
480 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
481 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
482 if (PCI_MAPREG_TYPE(memtype) == PCI_MAPREG_TYPE_MEM)
483 break;
484 }
485 if (r >= PCI_MAPREG_END) {
486 aprint_error_dev(self,
487 "unable to locate system interface registers\n");
488 return;
489 }
490
491 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
492 NULL, &sc->sc_ios) != 0) {
493 aprint_error_dev(self,
494 "unable to map system interface registers\n");
495 return;
496 }
497
498 /* disable the expansion rom */
499 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM,
500 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM) &
501 ~PCI_MAPREG_ROM_ENABLE);
502
503 /* disable interrupts */
504 mpii_write(sc, MPII_INTR_MASK,
505 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
506 MPII_INTR_MASK_DOORBELL);
507
508 /* hook up the interrupt */
509 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) {
510 aprint_error_dev(self, "unable to map interrupt\n");
511 goto unmap;
512 }
513 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0],
514 intrbuf, sizeof(intrbuf));
515 pci_intr_setattr(pa->pa_pc, &sc->sc_pihp[0], PCI_INTR_MPSAFE, true);
516 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_BIO,
517 mpii_intr, sc, device_xname(self));
518 if (sc->sc_ih == NULL) {
519 aprint_error_dev(self, "couldn't establish interrupt");
520 if (intrstr != NULL)
521 aprint_error(" at %s", intrstr);
522 aprint_error("\n");
523 return;
524 }
525 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
526 aprint_naive("\n");
527
528 if (mpii_iocfacts(sc) != 0) {
529 aprint_error_dev(self, "unable to get iocfacts\n");
530 goto unmap;
531 }
532
533 if (mpii_init(sc) != 0) {
534 aprint_error_dev(self, "unable to initialize ioc\n");
535 goto unmap;
536 }
537
538 if (mpii_alloc_ccbs(sc) != 0) {
539 /* error already printed */
540 goto unmap;
541 }
542
543 if (mpii_alloc_replies(sc) != 0) {
544 aprint_error_dev(self, "unable to allocated reply space\n");
545 goto free_ccbs;
546 }
547
548 if (mpii_alloc_queues(sc) != 0) {
549 aprint_error_dev(self, "unable to allocate reply queues\n");
550 goto free_replies;
551 }
552
553 if (mpii_iocinit(sc) != 0) {
554 aprint_error_dev(self, "unable to send iocinit\n");
555 goto free_queues;
556 }
557
558 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
559 MPII_DOORBELL_STATE_OPER) != 0) {
560 aprint_error_dev(self, "state: 0x%08x\n",
561 mpii_read_db(sc) & MPII_DOORBELL_STATE);
562 aprint_error_dev(self, "operational state timeout\n");
563 goto free_queues;
564 }
565
566 mpii_push_replies(sc);
567 mpii_init_queues(sc);
568
569 if (mpii_board_info(sc) != 0) {
570 aprint_error_dev(self, "unable to get manufacturing page 0\n");
571 goto free_queues;
572 }
573
574 if (mpii_portfacts(sc) != 0) {
575 aprint_error_dev(self, "unable to get portfacts\n");
576 goto free_queues;
577 }
578
579 if (mpii_target_map(sc) != 0) {
580 aprint_error_dev(self, "unable to setup target mappings\n");
581 goto free_queues;
582 }
583
584 if (mpii_cfg_coalescing(sc) != 0) {
585 aprint_error_dev(self, "unable to configure coalescing\n");
586 goto free_queues;
587 }
588
589 /* XXX bail on unsupported porttype? */
590 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
591 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
592 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
593 if (mpii_eventnotify(sc) != 0) {
594 aprint_error_dev(self, "unable to enable events\n");
595 goto free_queues;
596 }
597 }
598
599 mutex_init(&sc->sc_devs_mtx, MUTEX_DEFAULT, IPL_BIO);
600 sc->sc_devs = malloc(sc->sc_max_devices * sizeof(struct mpii_device *),
601 M_DEVBUF, M_WAITOK | M_ZERO);
602
603 if (mpii_portenable(sc) != 0) {
604 aprint_error_dev(self, "unable to enable port\n");
605 goto free_devs;
606 }
607
608 /* we should be good to go now, attach scsibus */
609 memset(adapt, 0, sizeof(*adapt));
610 adapt->adapt_dev = sc->sc_dev;
611 adapt->adapt_nchannels = 1;
612 adapt->adapt_openings = sc->sc_max_cmds - 4;
613 adapt->adapt_max_periph = adapt->adapt_openings;
614 adapt->adapt_request = mpii_scsipi_request;
615 adapt->adapt_minphys = minphys;
616 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
617
618 memset(chan, 0, sizeof(*chan));
619 chan->chan_adapter = adapt;
620 chan->chan_bustype = &scsi_sas_bustype;
621 chan->chan_channel = 0;
622 chan->chan_flags = 0;
623 chan->chan_nluns = 8;
624 chan->chan_ntargets = sc->sc_max_devices;
625 chan->chan_id = -1;
626
627 mpii_rescan(self, "scsi", NULL);
628
629 /* enable interrupts */
630 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
631 | MPII_INTR_MASK_RESET);
632
633 #if NBIO > 0
634 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
635 if (bio_register(sc->sc_dev, mpii_ioctl) != 0)
636 panic("%s: controller registration failed",
637 DEVNAME(sc));
638 if (mpii_create_sensors(sc) != 0)
639 aprint_error_dev(self, "unable to create sensors\n");
640 }
641 #endif
642
643 return;
644
645 free_devs:
646 free(sc->sc_devs, M_DEVBUF);
647 sc->sc_devs = NULL;
648
649 free_queues:
650 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
651 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
652 mpii_dmamem_free(sc, sc->sc_reply_freeq);
653
654 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
655 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
656 mpii_dmamem_free(sc, sc->sc_reply_postq);
657
658 free_replies:
659 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
660 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
661 mpii_dmamem_free(sc, sc->sc_replies);
662
663 free_ccbs:
664 while ((ccb = mpii_get_ccb(sc)) != NULL)
665 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
666 mpii_dmamem_free(sc, sc->sc_requests);
667 free(sc->sc_ccbs, M_DEVBUF);
668
669 unmap:
670 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
671 sc->sc_ios = 0;
672 }
673
674 static int
675 mpii_detach(device_t self, int flags)
676 {
677 struct mpii_softc *sc = device_private(self);
678 int error;
679 struct mpii_ccb *ccb;
680
681 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
682 return error;
683
684 #if NBIO > 0
685 mpii_destroy_sensors(sc);
686 bio_unregister(sc->sc_dev);
687 #endif /* NBIO > 0 */
688
689 if (sc->sc_ih != NULL) {
690 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
691 sc->sc_ih = NULL;
692 }
693 if (sc->sc_ios != 0) {
694 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
695 free(sc->sc_devs, M_DEVBUF);
696 sc->sc_devs = NULL;
697
698 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
699 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
700 mpii_dmamem_free(sc, sc->sc_reply_freeq);
701
702 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
703 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
704 mpii_dmamem_free(sc, sc->sc_reply_postq);
705
706 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
707 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
708 mpii_dmamem_free(sc, sc->sc_replies);
709
710 while ((ccb = mpii_get_ccb(sc)) != NULL)
711 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
712 mpii_dmamem_free(sc, sc->sc_requests);
713 free(sc->sc_ccbs, M_DEVBUF);
714
715 sc->sc_ios = 0;
716 }
717
718 return (0);
719 }
720
721 static int
722 mpii_rescan(device_t self, const char *ifattr, const int *locators)
723 {
724 struct mpii_softc *sc = device_private(self);
725
726 if (sc->sc_child != NULL)
727 return 0;
728
729 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
730 scsiprint, NULL);
731
732 return 0;
733 }
734
735 static void
736 mpii_childdetached(device_t self, device_t child)
737 {
738 struct mpii_softc *sc = device_private(self);
739
740 KASSERT(self == sc->sc_dev);
741 KASSERT(child == sc->sc_child);
742
743 if (child == sc->sc_child)
744 sc->sc_child = NULL;
745 }
746
747
748 static int
749 mpii_intr(void *arg)
750 {
751 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts);
752 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
753 struct mpii_softc *sc = arg;
754 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
755 struct mpii_ccb *ccb;
756 struct mpii_rcb *rcb;
757 int smid;
758 u_int idx;
759 int rv = 0;
760
761 mutex_enter(&sc->sc_rep_mtx);
762 bus_dmamap_sync(sc->sc_dmat,
763 MPII_DMA_MAP(sc->sc_reply_postq),
764 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
765 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
766
767 idx = sc->sc_reply_post_host_index;
768 for (;;) {
769 rdp = &postq[idx];
770 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
771 MPII_REPLY_DESCR_UNUSED)
772 break;
773 if (rdp->data == 0xffffffff) {
774 /*
775 * ioc is still writing to the reply post queue
776 * race condition - bail!
777 */
778 break;
779 }
780
781 smid = le16toh(rdp->smid);
782 rcb = mpii_reply(sc, rdp);
783
784 if (smid) {
785 ccb = &sc->sc_ccbs[smid - 1];
786 ccb->ccb_state = MPII_CCB_READY;
787 ccb->ccb_rcb = rcb;
788 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
789 } else
790 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
791
792 if (++idx >= sc->sc_reply_post_qdepth)
793 idx = 0;
794
795 rv = 1;
796 }
797
798 bus_dmamap_sync(sc->sc_dmat,
799 MPII_DMA_MAP(sc->sc_reply_postq),
800 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
802
803 if (rv)
804 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
805
806 mutex_exit(&sc->sc_rep_mtx);
807
808 if (rv == 0)
809 return (0);
810
811 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
812 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
813 ccb->ccb_done(ccb);
814 }
815 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
816 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
817 mpii_event_process(sc, rcb);
818 }
819
820 return (1);
821 }
822
823 static int
824 mpii_load_xs_sas3(struct mpii_ccb *ccb)
825 {
826 struct mpii_softc *sc = ccb->ccb_sc;
827 struct scsipi_xfer *xs = ccb->ccb_cookie;
828 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
829 struct mpii_ieee_sge *csge, *nsge, *sge;
830 bus_dmamap_t dmap = ccb->ccb_dmamap;
831 int i, error;
832
833 /* Request frame structure is described in the mpii_iocfacts */
834 nsge = (struct mpii_ieee_sge *)(io + 1);
835 csge = nsge + sc->sc_chain_sge;
836
837 /* zero length transfer still requires an SGE */
838 if (xs->datalen == 0) {
839 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
840 return (0);
841 }
842
843 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
844 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
845 if (error) {
846 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
847 return (1);
848 }
849
850 sge = nsge;
851 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
852 if (nsge == csge) {
853 nsge++;
854 /* offset to the chain sge from the beginning */
855 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
856 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
857 MPII_IEEE_SGE_ADDR_SYSTEM;
858 /* address of the next sge */
859 csge->sg_addr = htole64(ccb->ccb_cmd_dva +
860 ((uintptr_t)nsge - (uintptr_t)io));
861 csge->sg_len = htole32((dmap->dm_nsegs - i) *
862 sizeof(*sge));
863 }
864
865 sge = nsge;
866 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
867 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
868 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
869 }
870
871 /* terminate list */
872 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
873
874 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
875 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
876 BUS_DMASYNC_PREWRITE);
877
878 return (0);
879 }
880
881 static int
882 mpii_load_xs(struct mpii_ccb *ccb)
883 {
884 struct mpii_softc *sc = ccb->ccb_sc;
885 struct scsipi_xfer *xs = ccb->ccb_cookie;
886 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
887 struct mpii_sge *csge, *nsge, *sge;
888 bus_dmamap_t dmap = ccb->ccb_dmamap;
889 u_int32_t flags;
890 u_int16_t len;
891 int i, error;
892
893 /* Request frame structure is described in the mpii_iocfacts */
894 nsge = (struct mpii_sge *)(io + 1);
895 csge = nsge + sc->sc_chain_sge;
896
897 /* zero length transfer still requires an SGE */
898 if (xs->datalen == 0) {
899 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
900 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
901 return (0);
902 }
903
904 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
905 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
906 if (error) {
907 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
908 return (1);
909 }
910
911 /* safe default starting flags */
912 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
913 if (xs->xs_control & XS_CTL_DATA_OUT)
914 flags |= MPII_SGE_FL_DIR_OUT;
915
916 sge = nsge;
917 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
918 if (nsge == csge) {
919 nsge++;
920 /* offset to the chain sge from the beginning */
921 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
922 /* length of the sgl segment we're pointing to */
923 len = (dmap->dm_nsegs - i) * sizeof(*sge);
924 csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
925 MPII_SGE_FL_SIZE_64 | len);
926 /* address of the next sge */
927 mpii_dvatosge(csge, ccb->ccb_cmd_dva +
928 ((uintptr_t)nsge - (uintptr_t)io));
929 }
930
931 sge = nsge;
932 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
933 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
934 }
935
936 /* terminate list */
937 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
938 MPII_SGE_FL_EOL);
939
940 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
941 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
942 BUS_DMASYNC_PREWRITE);
943
944 return (0);
945 }
946
947 static u_int32_t
948 mpii_read(struct mpii_softc *sc, bus_size_t r)
949 {
950 u_int32_t rv;
951
952 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
953 BUS_SPACE_BARRIER_READ);
954 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
955
956 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
957
958 return (rv);
959 }
960
961 static void
962 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
963 {
964 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
965
966 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
967 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
968 BUS_SPACE_BARRIER_WRITE);
969 }
970
971
972 static int
973 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
974 u_int32_t target)
975 {
976 int i;
977
978 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
979 mask, target);
980
981 for (i = 0; i < 15000; i++) {
982 if ((mpii_read(sc, r) & mask) == target)
983 return (0);
984 delay(1000);
985 }
986
987 return (1);
988 }
989
990 static int
991 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
992 u_int32_t target)
993 {
994 int i;
995
996 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
997 mask, target);
998
999 for (i = 0; i < 15000; i++) {
1000 if ((mpii_read(sc, r) & mask) != target)
1001 return (0);
1002 delay(1000);
1003 }
1004
1005 return (1);
1006 }
1007
1008 static int
1009 mpii_init(struct mpii_softc *sc)
1010 {
1011 u_int32_t db;
1012 int i;
1013
1014 /* spin until the ioc leaves the reset state */
1015 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1016 MPII_DOORBELL_STATE_RESET) != 0) {
1017 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1018 "reset state\n", DEVNAME(sc));
1019 return (1);
1020 }
1021
1022 /* check current ownership */
1023 db = mpii_read_db(sc);
1024 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1025 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1026 DEVNAME(sc));
1027 return (0);
1028 }
1029
1030 for (i = 0; i < 5; i++) {
1031 switch (db & MPII_DOORBELL_STATE) {
1032 case MPII_DOORBELL_STATE_READY:
1033 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1034 DEVNAME(sc));
1035 return (0);
1036
1037 case MPII_DOORBELL_STATE_OPER:
1038 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1039 DEVNAME(sc));
1040 if (sc->sc_ioc_event_replay)
1041 mpii_reset_soft(sc);
1042 else
1043 mpii_reset_hard(sc);
1044 break;
1045
1046 case MPII_DOORBELL_STATE_FAULT:
1047 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1048 "reset hard\n" , DEVNAME(sc));
1049 mpii_reset_hard(sc);
1050 break;
1051
1052 case MPII_DOORBELL_STATE_RESET:
1053 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1054 "out of reset\n", DEVNAME(sc));
1055 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1056 MPII_DOORBELL_STATE_RESET) != 0)
1057 return (1);
1058 break;
1059 }
1060 db = mpii_read_db(sc);
1061 }
1062
1063 return (1);
1064 }
1065
1066 static int
1067 mpii_reset_soft(struct mpii_softc *sc)
1068 {
1069 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1070
1071 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1072 return (1);
1073 }
1074
1075 mpii_write_db(sc,
1076 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1077
1078 /* XXX LSI waits 15 sec */
1079 if (mpii_wait_db_ack(sc) != 0)
1080 return (1);
1081
1082 /* XXX LSI waits 15 sec */
1083 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1084 MPII_DOORBELL_STATE_READY) != 0)
1085 return (1);
1086
1087 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1088
1089 return (0);
1090 }
1091
1092 static int
1093 mpii_reset_hard(struct mpii_softc *sc)
1094 {
1095 u_int16_t i;
1096
1097 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1098
1099 mpii_write_intr(sc, 0);
1100
1101 /* enable diagnostic register */
1102 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1103 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1104 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1105 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1106 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1107 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1108 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1109
1110 delay(100);
1111
1112 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1113 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1114 "diagnostic read/write\n", DEVNAME(sc));
1115 return(1);
1116 }
1117
1118 /* reset ioc */
1119 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1120
1121 /* 240 milliseconds */
1122 delay(240000);
1123
1124
1125 /* XXX this whole function should be more robust */
1126
1127 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1128 for (i = 0; i < 30000; i++) {
1129 if ((mpii_read(sc, MPII_HOSTDIAG) &
1130 MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1131 break;
1132 delay(10000);
1133 }
1134
1135 /* disable diagnostic register */
1136 mpii_write(sc, MPII_WRITESEQ, 0xff);
1137
1138 /* XXX what else? */
1139
1140 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1141
1142 return(0);
1143 }
1144
1145 static int
1146 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1147 {
1148 u_int32_t *query = buf;
1149 int i;
1150
1151 /* make sure the doorbell is not in use. */
1152 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1153 return (1);
1154
1155 /* clear pending doorbell interrupts */
1156 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1157 mpii_write_intr(sc, 0);
1158
1159 /*
1160 * first write the doorbell with the handshake function and the
1161 * dword count.
1162 */
1163 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1164 MPII_DOORBELL_DWORDS(dwords));
1165
1166 /*
1167 * the doorbell used bit will be set because a doorbell function has
1168 * started. wait for the interrupt and then ack it.
1169 */
1170 if (mpii_wait_db_int(sc) != 0)
1171 return (1);
1172 mpii_write_intr(sc, 0);
1173
1174 /* poll for the acknowledgement. */
1175 if (mpii_wait_db_ack(sc) != 0)
1176 return (1);
1177
1178 /* write the query through the doorbell. */
1179 for (i = 0; i < dwords; i++) {
1180 mpii_write_db(sc, htole32(query[i]));
1181 if (mpii_wait_db_ack(sc) != 0)
1182 return (1);
1183 }
1184
1185 return (0);
1186 }
1187
1188 static int
1189 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1190 {
1191 u_int16_t *words = (u_int16_t *)dword;
1192 int i;
1193
1194 for (i = 0; i < 2; i++) {
1195 if (mpii_wait_db_int(sc) != 0)
1196 return (1);
1197 words[i] = le16toh(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1198 mpii_write_intr(sc, 0);
1199 }
1200
1201 return (0);
1202 }
1203
1204 static int
1205 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1206 {
1207 struct mpii_msg_reply *reply = buf;
1208 u_int32_t *dbuf = buf, dummy;
1209 int i;
1210
1211 /* get the first dword so we can read the length out of the header. */
1212 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1213 return (1);
1214
1215 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1216 DEVNAME(sc), dwords, reply->msg_length);
1217
1218 /*
1219 * the total length, in dwords, is in the message length field of the
1220 * reply header.
1221 */
1222 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1223 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1224 return (1);
1225 }
1226
1227 /* if there's extra stuff to come off the ioc, discard it */
1228 while (i++ < reply->msg_length) {
1229 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1230 return (1);
1231 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1232 "0x%08x\n", DEVNAME(sc), dummy);
1233 }
1234
1235 /* wait for the doorbell used bit to be reset and clear the intr */
1236 if (mpii_wait_db_int(sc) != 0)
1237 return (1);
1238
1239 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1240 return (1);
1241
1242 mpii_write_intr(sc, 0);
1243
1244 return (0);
1245 }
1246
1247 static void
1248 mpii_empty_done(struct mpii_ccb *ccb)
1249 {
1250 /* nothing to do */
1251 }
1252
1253 static int
1254 mpii_iocfacts(struct mpii_softc *sc)
1255 {
1256 struct mpii_msg_iocfacts_request ifq;
1257 struct mpii_msg_iocfacts_reply ifp;
1258 int irs;
1259 int sge_size;
1260 u_int qdepth;
1261
1262 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1263
1264 memset(&ifq, 0, sizeof(ifq));
1265 memset(&ifp, 0, sizeof(ifp));
1266
1267 ifq.function = MPII_FUNCTION_IOC_FACTS;
1268
1269 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1270 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1271 DEVNAME(sc));
1272 return (1);
1273 }
1274
1275 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1276 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1277 DEVNAME(sc));
1278 return (1);
1279 }
1280
1281 sc->sc_ioc_number = ifp.ioc_number;
1282 sc->sc_vf_id = ifp.vf_id;
1283
1284 sc->sc_max_volumes = ifp.max_volumes;
1285 sc->sc_max_devices = ifp.max_volumes + le16toh(ifp.max_targets);
1286
1287 if (ISSET(le32toh(ifp.ioc_capabilities),
1288 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1289 SET(sc->sc_flags, MPII_F_RAID);
1290 if (ISSET(le32toh(ifp.ioc_capabilities),
1291 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1292 sc->sc_ioc_event_replay = 1;
1293
1294 sc->sc_max_cmds = MIN(le16toh(ifp.request_credit),
1295 MPII_REQUEST_CREDIT);
1296
1297 /* SAS3 and 3.5 controllers have different sgl layouts */
1298 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1299 || (ifp.msg_version_min == 6)))
1300 SET(sc->sc_flags, MPII_F_SAS3);
1301
1302 /*
1303 * The host driver must ensure that there is at least one
1304 * unused entry in the Reply Free Queue. One way to ensure
1305 * that this requirement is met is to never allocate a number
1306 * of reply frames that is a multiple of 16.
1307 */
1308 sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1309 if (!(sc->sc_num_reply_frames % 16))
1310 sc->sc_num_reply_frames--;
1311
1312 /* must be multiple of 16 */
1313 sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1314 sc->sc_num_reply_frames;
1315 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1316
1317 qdepth = le16toh(ifp.max_reply_descriptor_post_queue_depth);
1318 if (sc->sc_reply_post_qdepth > qdepth) {
1319 sc->sc_reply_post_qdepth = qdepth;
1320 if (sc->sc_reply_post_qdepth < 16) {
1321 printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1322 return (1);
1323 }
1324 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1325 sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1326 }
1327
1328 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1329 16 - (sc->sc_num_reply_frames % 16);
1330
1331 /*
1332 * Our request frame for an I/O operation looks like this:
1333 *
1334 * +-------------------+ -.
1335 * | mpii_msg_scsi_io | |
1336 * +-------------------| |
1337 * | mpii_sge | |
1338 * + - - - - - - - - - + |
1339 * | ... | > ioc_request_frame_size
1340 * + - - - - - - - - - + |
1341 * | mpii_sge (tail) | |
1342 * + - - - - - - - - - + |
1343 * | mpii_sge (csge) | | --.
1344 * + - - - - - - - - - + -' | chain sge points to the next sge
1345 * | mpii_sge |<-----'
1346 * + - - - - - - - - - +
1347 * | ... |
1348 * + - - - - - - - - - +
1349 * | mpii_sge (tail) |
1350 * +-------------------+
1351 * | |
1352 * ~~~~~~~~~~~~~~~~~~~~~
1353 * | |
1354 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1355 * | scsi_sense_data |
1356 * +-------------------+
1357 */
1358
1359 /* both sizes are in 32-bit words */
1360 sc->sc_reply_size = ifp.reply_frame_size * 4;
1361 irs = le16toh(ifp.ioc_request_frame_size) * 4;
1362 sc->sc_request_size = MPII_REQUEST_SIZE;
1363 /* make sure we have enough space for scsi sense data */
1364 if (irs > sc->sc_request_size) {
1365 sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1366 sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1367 }
1368
1369 if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1370 sge_size = sizeof(struct mpii_ieee_sge);
1371 } else {
1372 sge_size = sizeof(struct mpii_sge);
1373 }
1374
1375 /* offset to the chain sge */
1376 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1377 sge_size - 1;
1378
1379 /*
1380 * A number of simple scatter-gather elements we can fit into the
1381 * request buffer after the I/O command minus the chain element.
1382 */
1383 sc->sc_max_sgl = (sc->sc_request_size -
1384 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1385 sge_size - 1;
1386
1387 return (0);
1388 }
1389
1390 static int
1391 mpii_iocinit(struct mpii_softc *sc)
1392 {
1393 struct mpii_msg_iocinit_request iiq;
1394 struct mpii_msg_iocinit_reply iip;
1395
1396 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1397
1398 memset(&iiq, 0, sizeof(iiq));
1399 memset(&iip, 0, sizeof(iip));
1400
1401 iiq.function = MPII_FUNCTION_IOC_INIT;
1402 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1403
1404 /* XXX JPG do something about vf_id */
1405 iiq.vf_id = 0;
1406
1407 iiq.msg_version_maj = 0x02;
1408 iiq.msg_version_min = 0x00;
1409
1410 /* XXX JPG ensure compliance with some level and hard-code? */
1411 iiq.hdr_version_unit = 0x00;
1412 iiq.hdr_version_dev = 0x00;
1413
1414 iiq.system_request_frame_size = htole16(sc->sc_request_size / 4);
1415
1416 iiq.reply_descriptor_post_queue_depth =
1417 htole16(sc->sc_reply_post_qdepth);
1418
1419 iiq.reply_free_queue_depth = htole16(sc->sc_reply_free_qdepth);
1420
1421 iiq.sense_buffer_address_high =
1422 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1423
1424 iiq.system_reply_address_high =
1425 htole32(MPII_DMA_DVA(sc->sc_replies) >> 32);
1426
1427 iiq.system_request_frame_base_address_lo =
1428 htole32(MPII_DMA_DVA(sc->sc_requests));
1429 iiq.system_request_frame_base_address_hi =
1430 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1431
1432 iiq.reply_descriptor_post_queue_address_lo =
1433 htole32(MPII_DMA_DVA(sc->sc_reply_postq));
1434 iiq.reply_descriptor_post_queue_address_hi =
1435 htole32(MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1436
1437 iiq.reply_free_queue_address_lo =
1438 htole32(MPII_DMA_DVA(sc->sc_reply_freeq));
1439 iiq.reply_free_queue_address_hi =
1440 htole32(MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1441
1442 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1443 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1444 DEVNAME(sc));
1445 return (1);
1446 }
1447
1448 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1449 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1450 DEVNAME(sc));
1451 return (1);
1452 }
1453
1454 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1455 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1456 iip.msg_length, iip.whoinit);
1457 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1458 iip.msg_flags);
1459 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1460 iip.vf_id, iip.vp_id);
1461 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1462 le16toh(iip.ioc_status));
1463 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1464 le32toh(iip.ioc_loginfo));
1465
1466 if (le16toh(iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1467 le32toh(iip.ioc_loginfo))
1468 return (1);
1469
1470 return (0);
1471 }
1472
1473 static void
1474 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1475 {
1476 u_int32_t *rfp;
1477 u_int idx;
1478
1479 if (rcb == NULL)
1480 return;
1481
1482 mutex_enter(&sc->sc_reply_free_mtx);
1483 idx = sc->sc_reply_free_host_index;
1484
1485 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1486 rfp[idx] = htole32(rcb->rcb_reply_dva);
1487
1488 if (++idx >= sc->sc_reply_free_qdepth)
1489 idx = 0;
1490
1491 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1492 mutex_exit(&sc->sc_reply_free_mtx);
1493 }
1494
1495 static int
1496 mpii_portfacts(struct mpii_softc *sc)
1497 {
1498 struct mpii_msg_portfacts_request *pfq;
1499 struct mpii_msg_portfacts_reply *pfp;
1500 struct mpii_ccb *ccb;
1501 int rv = 1;
1502
1503 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1504
1505 ccb = mpii_get_ccb(sc);
1506 if (ccb == NULL) {
1507 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1508 DEVNAME(sc));
1509 return (rv);
1510 }
1511
1512 ccb->ccb_done = mpii_empty_done;
1513 pfq = ccb->ccb_cmd;
1514
1515 memset(pfq, 0, sizeof(*pfq));
1516
1517 pfq->function = MPII_FUNCTION_PORT_FACTS;
1518 pfq->chain_offset = 0;
1519 pfq->msg_flags = 0;
1520 pfq->port_number = 0;
1521 pfq->vp_id = 0;
1522 pfq->vf_id = 0;
1523
1524 if (mpii_poll(sc, ccb) != 0) {
1525 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1526 DEVNAME(sc));
1527 goto err;
1528 }
1529
1530 if (ccb->ccb_rcb == NULL) {
1531 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1532 DEVNAME(sc));
1533 goto err;
1534 }
1535
1536 pfp = ccb->ccb_rcb->rcb_reply;
1537 sc->sc_porttype = pfp->port_type;
1538
1539 mpii_push_reply(sc, ccb->ccb_rcb);
1540 rv = 0;
1541 err:
1542 mpii_put_ccb(sc, ccb);
1543
1544 return (rv);
1545 }
1546
1547 static void
1548 mpii_eventack(struct work *wk, void * cookie)
1549 {
1550 struct mpii_softc *sc = cookie;
1551 struct mpii_ccb *ccb;
1552 struct mpii_rcb *rcb, *next;
1553 struct mpii_msg_event_reply *enp;
1554 struct mpii_msg_eventack_request *eaq;
1555
1556 mutex_enter(&sc->sc_evt_ack_mtx);
1557 next = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1558 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1559 mutex_exit(&sc->sc_evt_ack_mtx);
1560
1561 while (next != NULL) {
1562 rcb = next;
1563 next = SIMPLEQ_NEXT(rcb, rcb_link);
1564
1565 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1566
1567 ccb = mpii_get_ccb(sc);
1568 ccb->ccb_done = mpii_eventack_done;
1569 eaq = ccb->ccb_cmd;
1570
1571 eaq->function = MPII_FUNCTION_EVENT_ACK;
1572
1573 eaq->event = enp->event;
1574 eaq->event_context = enp->event_context;
1575
1576 mpii_push_reply(sc, rcb);
1577
1578 mpii_start(sc, ccb);
1579 }
1580 }
1581
1582 static void
1583 mpii_eventack_done(struct mpii_ccb *ccb)
1584 {
1585 struct mpii_softc *sc = ccb->ccb_sc;
1586
1587 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1588
1589 mpii_push_reply(sc, ccb->ccb_rcb);
1590 mpii_put_ccb(sc, ccb);
1591 }
1592
1593 static int
1594 mpii_portenable(struct mpii_softc *sc)
1595 {
1596 struct mpii_msg_portenable_request *peq;
1597 struct mpii_ccb *ccb;
1598
1599 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1600
1601 ccb = mpii_get_ccb(sc);
1602 if (ccb == NULL) {
1603 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1604 DEVNAME(sc));
1605 return (1);
1606 }
1607
1608 ccb->ccb_done = mpii_empty_done;
1609 peq = ccb->ccb_cmd;
1610
1611 peq->function = MPII_FUNCTION_PORT_ENABLE;
1612 peq->vf_id = sc->sc_vf_id;
1613
1614 if (mpii_poll(sc, ccb) != 0) {
1615 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1616 DEVNAME(sc));
1617 return (1);
1618 }
1619
1620 if (ccb->ccb_rcb == NULL) {
1621 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1622 DEVNAME(sc));
1623 return (1);
1624 }
1625
1626 mpii_push_reply(sc, ccb->ccb_rcb);
1627 mpii_put_ccb(sc, ccb);
1628
1629 return (0);
1630 }
1631
1632 static int
1633 mpii_cfg_coalescing(struct mpii_softc *sc)
1634 {
1635 struct mpii_cfg_hdr hdr;
1636 struct mpii_cfg_ioc_pg1 ipg;
1637
1638 hdr.page_version = 0;
1639 hdr.page_length = sizeof(ipg) / 4;
1640 hdr.page_number = 1;
1641 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1642 memset(&ipg, 0, sizeof(ipg));
1643 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1644 sizeof(ipg)) != 0) {
1645 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1646 "page 1\n", DEVNAME(sc));
1647 return (1);
1648 }
1649
1650 if (!ISSET(le32toh(ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1651 return (0);
1652
1653 /* Disable coalescing */
1654 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1655 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1656 sizeof(ipg)) != 0) {
1657 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1658 DEVNAME(sc));
1659 return (1);
1660 }
1661
1662 return (0);
1663 }
1664
1665 #define MPII_EVENT_MASKALL(enq) do { \
1666 enq->event_masks[0] = 0xffffffff; \
1667 enq->event_masks[1] = 0xffffffff; \
1668 enq->event_masks[2] = 0xffffffff; \
1669 enq->event_masks[3] = 0xffffffff; \
1670 } while (0)
1671
1672 #define MPII_EVENT_UNMASK(enq, evt) do { \
1673 enq->event_masks[evt / 32] &= \
1674 htole32(~(1 << (evt % 32))); \
1675 } while (0)
1676
1677 static int
1678 mpii_eventnotify(struct mpii_softc *sc)
1679 {
1680 struct mpii_msg_event_request *enq;
1681 struct mpii_ccb *ccb;
1682 char wkname[15];
1683
1684 ccb = mpii_get_ccb(sc);
1685 if (ccb == NULL) {
1686 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1687 DEVNAME(sc));
1688 return (1);
1689 }
1690
1691 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1692 mutex_init(&sc->sc_evt_sas_mtx, MUTEX_DEFAULT, IPL_BIO);
1693 snprintf(wkname, sizeof(wkname), "%ssas", DEVNAME(sc));
1694 if (workqueue_create(&sc->sc_evt_sas_wq, wkname,
1695 mpii_event_sas_work, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1696 mpii_put_ccb(sc, ccb);
1697 aprint_error_dev(sc->sc_dev,
1698 "can't create %s workqueue\n", wkname);
1699 return 1;
1700 }
1701
1702 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1703 mutex_init(&sc->sc_evt_ack_mtx, MUTEX_DEFAULT, IPL_BIO);
1704 snprintf(wkname, sizeof(wkname), "%sevt", DEVNAME(sc));
1705 if (workqueue_create(&sc->sc_evt_ack_wq, wkname,
1706 mpii_eventack, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1707 mpii_put_ccb(sc, ccb);
1708 aprint_error_dev(sc->sc_dev,
1709 "can't create %s workqueue\n", wkname);
1710 return 1;
1711 }
1712
1713 ccb->ccb_done = mpii_eventnotify_done;
1714 enq = ccb->ccb_cmd;
1715
1716 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1717
1718 /*
1719 * Enable reporting of the following events:
1720 *
1721 * MPII_EVENT_SAS_DISCOVERY
1722 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1723 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1724 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1725 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1726 * MPII_EVENT_IR_VOLUME
1727 * MPII_EVENT_IR_PHYSICAL_DISK
1728 * MPII_EVENT_IR_OPERATION_STATUS
1729 */
1730
1731 MPII_EVENT_MASKALL(enq);
1732 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1733 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1734 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1735 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1736 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1737 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1738 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1739 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1740
1741 mpii_start(sc, ccb);
1742
1743 return (0);
1744 }
1745
1746 static void
1747 mpii_eventnotify_done(struct mpii_ccb *ccb)
1748 {
1749 struct mpii_softc *sc = ccb->ccb_sc;
1750 struct mpii_rcb *rcb = ccb->ccb_rcb;
1751
1752 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1753
1754 mpii_put_ccb(sc, ccb);
1755 mpii_event_process(sc, rcb);
1756 }
1757
1758 static void
1759 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1760 {
1761 struct mpii_evt_ir_cfg_change_list *ccl;
1762 struct mpii_evt_ir_cfg_element *ce;
1763 struct mpii_device *dev;
1764 u_int16_t type;
1765 int i;
1766
1767 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1768 if (ccl->num_elements == 0)
1769 return;
1770
1771 if (ISSET(le32toh(ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1772 /* bail on foreign configurations */
1773 return;
1774 }
1775
1776 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1777
1778 for (i = 0; i < ccl->num_elements; i++, ce++) {
1779 type = (le16toh(ce->element_flags) &
1780 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1781
1782 switch (type) {
1783 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1784 switch (ce->reason_code) {
1785 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1786 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1787 dev = malloc(sizeof(*dev), M_DEVBUF,
1788 M_WAITOK | M_ZERO);
1789 mutex_enter(&sc->sc_devs_mtx);
1790 if (mpii_find_dev(sc,
1791 le16toh(ce->vol_dev_handle))) {
1792 mutex_exit(&sc->sc_devs_mtx);
1793 free(dev, M_DEVBUF);
1794 printf("%s: device %#x is already "
1795 "configured\n", DEVNAME(sc),
1796 le16toh(ce->vol_dev_handle));
1797 break;
1798 }
1799 SET(dev->flags, MPII_DF_VOLUME);
1800 dev->slot = sc->sc_vd_id_low;
1801 dev->dev_handle = le16toh(ce->vol_dev_handle);
1802 if (mpii_insert_dev(sc, dev)) {
1803 mutex_exit(&sc->sc_devs_mtx);
1804 free(dev, M_DEVBUF);
1805 break;
1806 }
1807 sc->sc_vd_count++;
1808 mutex_exit(&sc->sc_devs_mtx);
1809 break;
1810 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1811 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1812 mutex_enter(&sc->sc_devs_mtx);
1813 if (!(dev = mpii_find_dev(sc,
1814 le16toh(ce->vol_dev_handle)))) {
1815 mutex_exit(&sc->sc_devs_mtx);
1816 break;
1817 }
1818 mpii_remove_dev(sc, dev);
1819 sc->sc_vd_count--;
1820 mutex_exit(&sc->sc_devs_mtx);
1821 break;
1822 }
1823 break;
1824 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1825 if (ce->reason_code ==
1826 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1827 ce->reason_code ==
1828 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1829 /* there should be an underlying sas drive */
1830 mutex_enter(&sc->sc_devs_mtx);
1831 if (!(dev = mpii_find_dev(sc,
1832 le16toh(ce->phys_disk_dev_handle)))) {
1833 mutex_exit(&sc->sc_devs_mtx);
1834 break;
1835 }
1836 /* promoted from a hot spare? */
1837 CLR(dev->flags, MPII_DF_HOT_SPARE);
1838 SET(dev->flags, MPII_DF_VOLUME_DISK |
1839 MPII_DF_HIDDEN);
1840 mutex_exit(&sc->sc_devs_mtx);
1841 }
1842 break;
1843 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1844 if (ce->reason_code ==
1845 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1846 /* there should be an underlying sas drive */
1847 mutex_enter(&sc->sc_devs_mtx);
1848 if (!(dev = mpii_find_dev(sc,
1849 le16toh(ce->phys_disk_dev_handle)))) {
1850 mutex_exit(&sc->sc_devs_mtx);
1851 break;
1852 }
1853 SET(dev->flags, MPII_DF_HOT_SPARE |
1854 MPII_DF_HIDDEN);
1855 mutex_exit(&sc->sc_devs_mtx);
1856 }
1857 break;
1858 }
1859 }
1860 }
1861
1862 static void
1863 mpii_event_sas(struct mpii_softc *sc, struct mpii_rcb *rcb)
1864 {
1865 struct mpii_msg_event_reply *enp;
1866 struct mpii_evt_sas_tcl *tcl;
1867 struct mpii_evt_phy_entry *pe;
1868 struct mpii_device *dev;
1869 int i;
1870 u_int16_t handle;
1871 int need_queue = 0;
1872
1873 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1874 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas 0x%x\n",
1875 DEVNAME(sc), le16toh(enp->event));
1876 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1877
1878 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1879 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1880
1881 for (i = 0; i < tcl->num_entries; i++, pe++) {
1882 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1883 DEVNAME(sc), i, pe->phy_status,
1884 le16toh(pe->dev_handle),
1885 sc->sc_pd_id_start + tcl->start_phy_num + i,
1886 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1887
1888 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1889 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1890 handle = le16toh(pe->dev_handle);
1891 DNPRINTF(MPII_D_EVT, "%s: sas add handle %d\n",
1892 DEVNAME(sc), handle);
1893 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1894 mutex_enter(&sc->sc_devs_mtx);
1895 if (mpii_find_dev(sc, handle)) {
1896 mutex_exit(&sc->sc_devs_mtx);
1897 free(dev, M_DEVBUF);
1898 printf("%s: device %#x is already "
1899 "configured\n", DEVNAME(sc), handle);
1900 break;
1901 }
1902
1903 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1904 dev->dev_handle = handle;
1905 dev->phy_num = tcl->start_phy_num + i;
1906 if (tcl->enclosure_handle)
1907 dev->physical_port = tcl->physical_port;
1908 dev->enclosure = le16toh(tcl->enclosure_handle);
1909 dev->expander = le16toh(tcl->expander_handle);
1910
1911 if (mpii_insert_dev(sc, dev)) {
1912 mutex_exit(&sc->sc_devs_mtx);
1913 free(dev, M_DEVBUF);
1914 break;
1915 }
1916 printf("%s: physical device inserted in slot %d\n",
1917 DEVNAME(sc), dev->slot);
1918 mutex_exit(&sc->sc_devs_mtx);
1919 break;
1920
1921 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1922 /* defer to workqueue thread */
1923 need_queue++;
1924 break;
1925 }
1926 }
1927
1928 if (need_queue) {
1929 bool start_wk;
1930 mutex_enter(&sc->sc_evt_sas_mtx);
1931 start_wk = (SIMPLEQ_FIRST(&sc->sc_evt_sas_queue) == 0);
1932 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1933 if (start_wk) {
1934 workqueue_enqueue(sc->sc_evt_sas_wq,
1935 &sc->sc_evt_sas_work, NULL);
1936 }
1937 mutex_exit(&sc->sc_evt_sas_mtx);
1938 } else
1939 mpii_event_done(sc, rcb);
1940 }
1941
1942 static void
1943 mpii_event_sas_work(struct work *wq, void *xsc)
1944 {
1945 struct mpii_softc *sc = xsc;
1946 struct mpii_rcb *rcb, *next;
1947 struct mpii_msg_event_reply *enp;
1948 struct mpii_evt_sas_tcl *tcl;
1949 struct mpii_evt_phy_entry *pe;
1950 struct mpii_device *dev;
1951 int i;
1952
1953 mutex_enter(&sc->sc_evt_sas_mtx);
1954 next = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1955 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1956 mutex_exit(&sc->sc_evt_sas_mtx);
1957
1958 while (next != NULL) {
1959 rcb = next;
1960 next = SIMPLEQ_NEXT(rcb, rcb_link);
1961
1962 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1963 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas_work 0x%x\n",
1964 DEVNAME(sc), le16toh(enp->event));
1965 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1966 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1967 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1968
1969 for (i = 0; i < tcl->num_entries; i++, pe++) {
1970 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1971 DEVNAME(sc), i, pe->phy_status,
1972 le16toh(pe->dev_handle),
1973 sc->sc_pd_id_start + tcl->start_phy_num + i,
1974 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1975
1976 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1977 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1978 /* already handled */
1979 break;
1980
1981 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1982 mutex_enter(&sc->sc_devs_mtx);
1983 dev = mpii_find_dev(sc, le16toh(pe->dev_handle));
1984 if (dev == NULL) {
1985 mutex_exit(&sc->sc_devs_mtx);
1986 break;
1987 }
1988
1989 printf(
1990 "%s: physical device removed from slot %d\n",
1991 DEVNAME(sc), dev->slot);
1992 mpii_remove_dev(sc, dev);
1993 mutex_exit(&sc->sc_devs_mtx);
1994 mpii_sas_remove_device(sc, dev->dev_handle);
1995 if (!ISSET(dev->flags, MPII_DF_HIDDEN)) {
1996 scsipi_target_detach(&sc->sc_chan,
1997 dev->slot, 0, DETACH_FORCE);
1998 }
1999
2000 free(dev, M_DEVBUF);
2001 break;
2002 }
2003 }
2004 mpii_event_done(sc, rcb);
2005 }
2006 }
2007
2008 static void
2009 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
2010 {
2011 struct mpii_evt_sas_discovery *esd =
2012 (struct mpii_evt_sas_discovery *)(enp + 1);
2013
2014 if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
2015 if (esd->discovery_status != 0) {
2016 printf("%s: sas discovery completed with status %#x\n",
2017 DEVNAME(sc), esd->discovery_status);
2018 }
2019
2020 }
2021 }
2022
2023 static void
2024 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2025 {
2026 struct mpii_msg_event_reply *enp;
2027
2028 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2029
2030 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2031 le16toh(enp->event));
2032
2033 switch (le16toh(enp->event)) {
2034 case MPII_EVENT_EVENT_CHANGE:
2035 /* should be properly ignored */
2036 break;
2037 case MPII_EVENT_SAS_DISCOVERY:
2038 mpii_event_discovery(sc, enp);
2039 break;
2040 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2041 mpii_event_sas(sc, rcb);
2042 return;
2043 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2044 break;
2045 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2046 break;
2047 case MPII_EVENT_IR_VOLUME: {
2048 struct mpii_evt_ir_volume *evd =
2049 (struct mpii_evt_ir_volume *)(enp + 1);
2050 struct mpii_device *dev;
2051 #if NBIO > 0
2052 const char *vol_states[] = {
2053 BIOC_SVINVALID_S,
2054 BIOC_SVOFFLINE_S,
2055 BIOC_SVBUILDING_S,
2056 BIOC_SVONLINE_S,
2057 BIOC_SVDEGRADED_S,
2058 BIOC_SVONLINE_S,
2059 };
2060 #endif
2061
2062 if (cold)
2063 break;
2064 mutex_enter(&sc->sc_devs_mtx);
2065 dev = mpii_find_dev(sc, le16toh(evd->vol_dev_handle));
2066 if (dev == NULL) {
2067 mutex_exit(&sc->sc_devs_mtx);
2068 break;
2069 }
2070 #if NBIO > 0
2071 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2072 printf("%s: volume %d state changed from %s to %s\n",
2073 DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2074 vol_states[evd->prev_value],
2075 vol_states[evd->new_value]);
2076 #endif
2077 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2078 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2079 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2080 printf("%s: started resync on a volume %d\n",
2081 DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2082 }
2083 mutex_exit(&sc->sc_devs_mtx);
2084 break;
2085 case MPII_EVENT_IR_PHYSICAL_DISK:
2086 break;
2087 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2088 mpii_event_raid(sc, enp);
2089 break;
2090 case MPII_EVENT_IR_OPERATION_STATUS: {
2091 struct mpii_evt_ir_status *evs =
2092 (struct mpii_evt_ir_status *)(enp + 1);
2093 struct mpii_device *dev;
2094
2095 mutex_enter(&sc->sc_devs_mtx);
2096 dev = mpii_find_dev(sc, le16toh(evs->vol_dev_handle));
2097 if (dev != NULL &&
2098 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2099 dev->percent = evs->percent;
2100 mutex_exit(&sc->sc_devs_mtx);
2101 break;
2102 }
2103 default:
2104 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2105 DEVNAME(sc), le16toh(enp->event));
2106 }
2107
2108 mpii_event_done(sc, rcb);
2109 }
2110
2111 static void
2112 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2113 {
2114 struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2115 bool need_start;
2116
2117 if (enp->ack_required) {
2118 mutex_enter(&sc->sc_evt_ack_mtx);
2119 need_start = (SIMPLEQ_FIRST(&sc->sc_evt_ack_queue) == 0);
2120 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2121 if (need_start)
2122 workqueue_enqueue(sc->sc_evt_ack_wq,
2123 &sc->sc_evt_ack_work, NULL);
2124 mutex_exit(&sc->sc_evt_ack_mtx);
2125 } else
2126 mpii_push_reply(sc, rcb);
2127 }
2128
2129 static void
2130 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2131 {
2132 struct mpii_msg_scsi_task_request *stq;
2133 struct mpii_msg_sas_oper_request *soq;
2134 struct mpii_ccb *ccb;
2135
2136 ccb = mpii_get_ccb(sc);
2137 if (ccb == NULL)
2138 return;
2139
2140 stq = ccb->ccb_cmd;
2141 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2142 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2143 stq->dev_handle = htole16(handle);
2144
2145 ccb->ccb_done = mpii_empty_done;
2146 mpii_wait(sc, ccb);
2147
2148 if (ccb->ccb_rcb != NULL)
2149 mpii_push_reply(sc, ccb->ccb_rcb);
2150
2151 /* reuse a ccb */
2152 ccb->ccb_state = MPII_CCB_READY;
2153 ccb->ccb_rcb = NULL;
2154
2155 soq = ccb->ccb_cmd;
2156 memset(soq, 0, sizeof(*soq));
2157 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2158 soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2159 soq->dev_handle = htole16(handle);
2160
2161 ccb->ccb_done = mpii_empty_done;
2162 mpii_wait(sc, ccb);
2163 if (ccb->ccb_rcb != NULL)
2164 mpii_push_reply(sc, ccb->ccb_rcb);
2165
2166 mpii_put_ccb(sc, ccb);
2167 }
2168
2169 static int
2170 mpii_board_info(struct mpii_softc *sc)
2171 {
2172 struct mpii_msg_iocfacts_request ifq;
2173 struct mpii_msg_iocfacts_reply ifp;
2174 struct mpii_cfg_manufacturing_pg0 mpg;
2175 struct mpii_cfg_hdr hdr;
2176
2177 memset(&ifq, 0, sizeof(ifq));
2178 memset(&ifp, 0, sizeof(ifp));
2179
2180 ifq.function = MPII_FUNCTION_IOC_FACTS;
2181
2182 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2183 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2184 DEVNAME(sc));
2185 return (1);
2186 }
2187
2188 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2189 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2190 DEVNAME(sc));
2191 return (1);
2192 }
2193
2194 hdr.page_version = 0;
2195 hdr.page_length = sizeof(mpg) / 4;
2196 hdr.page_number = 0;
2197 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2198 memset(&mpg, 0, sizeof(mpg));
2199 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2200 sizeof(mpg)) != 0) {
2201 printf("%s: unable to fetch manufacturing page 0\n",
2202 DEVNAME(sc));
2203 return (EINVAL);
2204 }
2205
2206 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2207 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2208 ifp.fw_version_unit, ifp.fw_version_dev,
2209 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2210 ifp.msg_version_maj, ifp.msg_version_min);
2211
2212 return (0);
2213 }
2214
2215 static int
2216 mpii_target_map(struct mpii_softc *sc)
2217 {
2218 struct mpii_cfg_hdr hdr;
2219 struct mpii_cfg_ioc_pg8 ipg;
2220 int flags, pad = 0;
2221
2222 hdr.page_version = 0;
2223 hdr.page_length = sizeof(ipg) / 4;
2224 hdr.page_number = 8;
2225 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2226 memset(&ipg, 0, sizeof(ipg));
2227 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2228 sizeof(ipg)) != 0) {
2229 printf("%s: unable to fetch ioc page 8\n",
2230 DEVNAME(sc));
2231 return (EINVAL);
2232 }
2233
2234 if (le16toh(ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2235 pad = 1;
2236
2237 flags = le16toh(ipg.ir_volume_mapping_flags) &
2238 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2239 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2240 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2241 sc->sc_vd_id_low += pad;
2242 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2243 } else
2244 sc->sc_vd_id_low = sc->sc_max_devices -
2245 sc->sc_max_volumes;
2246 }
2247
2248 sc->sc_pd_id_start += pad;
2249
2250 return (0);
2251 }
2252
2253 static int
2254 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2255 u_int32_t address, int flags, void *p)
2256 {
2257 struct mpii_msg_config_request *cq;
2258 struct mpii_msg_config_reply *cp;
2259 struct mpii_ccb *ccb;
2260 struct mpii_cfg_hdr *hdr = p;
2261 struct mpii_ecfg_hdr *ehdr = p;
2262 int etype = 0;
2263 int rv = 0;
2264
2265 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2266 "address: 0x%08x flags: 0x%x\n", DEVNAME(sc), type, number,
2267 address, flags);
2268
2269 ccb = mpii_get_ccb(sc);
2270 if (ccb == NULL) {
2271 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2272 DEVNAME(sc));
2273 return (1);
2274 }
2275
2276 if (ISSET(flags, MPII_PG_EXTENDED)) {
2277 etype = type;
2278 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2279 }
2280
2281 cq = ccb->ccb_cmd;
2282
2283 cq->function = MPII_FUNCTION_CONFIG;
2284
2285 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2286
2287 cq->config_header.page_number = number;
2288 cq->config_header.page_type = type;
2289 cq->ext_page_type = etype;
2290 cq->page_address = htole32(address);
2291 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2292 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2293
2294 ccb->ccb_done = mpii_empty_done;
2295 if (ISSET(flags, MPII_PG_POLL)) {
2296 if (mpii_poll(sc, ccb) != 0) {
2297 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2298 DEVNAME(sc));
2299 return (1);
2300 }
2301 } else
2302 mpii_wait(sc, ccb);
2303
2304 if (ccb->ccb_rcb == NULL) {
2305 mpii_put_ccb(sc, ccb);
2306 return (1);
2307 }
2308 cp = ccb->ccb_rcb->rcb_reply;
2309
2310 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2311 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2312 cp->sgl_flags, cp->msg_length, cp->function);
2313 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2314 "msg_flags: 0x%02x\n", DEVNAME(sc),
2315 le16toh(cp->ext_page_length), cp->ext_page_type,
2316 cp->msg_flags);
2317 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2318 cp->vp_id, cp->vf_id);
2319 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2320 le16toh(cp->ioc_status));
2321 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2322 le32toh(cp->ioc_loginfo));
2323 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2324 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2325 cp->config_header.page_version,
2326 cp->config_header.page_length,
2327 cp->config_header.page_number,
2328 cp->config_header.page_type);
2329
2330 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2331 rv = 1;
2332 else if (ISSET(flags, MPII_PG_EXTENDED)) {
2333 memset(ehdr, 0, sizeof(*ehdr));
2334 ehdr->page_version = cp->config_header.page_version;
2335 ehdr->page_number = cp->config_header.page_number;
2336 ehdr->page_type = cp->config_header.page_type;
2337 ehdr->ext_page_length = cp->ext_page_length;
2338 ehdr->ext_page_type = cp->ext_page_type;
2339 } else
2340 *hdr = cp->config_header;
2341
2342 mpii_push_reply(sc, ccb->ccb_rcb);
2343 mpii_put_ccb(sc, ccb);
2344
2345 return (rv);
2346 }
2347
2348 static int
2349 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2350 void *p, int read, void *page, size_t len)
2351 {
2352 struct mpii_msg_config_request *cq;
2353 struct mpii_msg_config_reply *cp;
2354 struct mpii_ccb *ccb;
2355 struct mpii_cfg_hdr *hdr = p;
2356 struct mpii_ecfg_hdr *ehdr = p;
2357 uintptr_t kva;
2358 int page_length;
2359 int rv = 0;
2360
2361 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2362 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2363
2364 page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2365 le16toh(ehdr->ext_page_length) : hdr->page_length;
2366
2367 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2368 return (1);
2369
2370 ccb = mpii_get_ccb(sc);
2371 if (ccb == NULL) {
2372 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2373 DEVNAME(sc));
2374 return (1);
2375 }
2376
2377 cq = ccb->ccb_cmd;
2378
2379 cq->function = MPII_FUNCTION_CONFIG;
2380
2381 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2382 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2383
2384 if (ISSET(flags, MPII_PG_EXTENDED)) {
2385 cq->config_header.page_version = ehdr->page_version;
2386 cq->config_header.page_number = ehdr->page_number;
2387 cq->config_header.page_type = ehdr->page_type;
2388 cq->ext_page_len = ehdr->ext_page_length;
2389 cq->ext_page_type = ehdr->ext_page_type;
2390 } else
2391 cq->config_header = *hdr;
2392 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2393 cq->page_address = htole32(address);
2394 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2395 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2396 MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2397 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2398
2399 /* bounce the page via the request space to avoid more bus_dma games */
2400 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2401 sizeof(struct mpii_msg_config_request));
2402
2403 kva = (uintptr_t)ccb->ccb_cmd;
2404 kva += sizeof(struct mpii_msg_config_request);
2405
2406 if (!read)
2407 memcpy((void *)kva, page, len);
2408
2409 ccb->ccb_done = mpii_empty_done;
2410 if (ISSET(flags, MPII_PG_POLL)) {
2411 if (mpii_poll(sc, ccb) != 0) {
2412 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2413 DEVNAME(sc));
2414 return (1);
2415 }
2416 } else
2417 mpii_wait(sc, ccb);
2418
2419 if (ccb->ccb_rcb == NULL) {
2420 mpii_put_ccb(sc, ccb);
2421 return (1);
2422 }
2423 cp = ccb->ccb_rcb->rcb_reply;
2424
2425 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d "
2426 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2427 cp->function);
2428 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2429 "msg_flags: 0x%02x\n", DEVNAME(sc),
2430 le16toh(cp->ext_page_length), cp->ext_page_type,
2431 cp->msg_flags);
2432 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2433 cp->vp_id, cp->vf_id);
2434 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2435 le16toh(cp->ioc_status));
2436 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2437 le32toh(cp->ioc_loginfo));
2438 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2439 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2440 cp->config_header.page_version,
2441 cp->config_header.page_length,
2442 cp->config_header.page_number,
2443 cp->config_header.page_type);
2444
2445 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2446 rv = 1;
2447 else if (read)
2448 memcpy(page, (void *)kva, len);
2449
2450 mpii_push_reply(sc, ccb->ccb_rcb);
2451 mpii_put_ccb(sc, ccb);
2452
2453 return (rv);
2454 }
2455
2456 static struct mpii_rcb *
2457 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2458 {
2459 struct mpii_rcb *rcb = NULL;
2460 u_int32_t rfid;
2461
2462 KASSERT(mutex_owned(&sc->sc_rep_mtx));
2463 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2464
2465 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2466 MPII_REPLY_DESCR_ADDRESS_REPLY) {
2467 rfid = (le32toh(rdp->frame_addr) -
2468 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2469 sc->sc_reply_size;
2470
2471 bus_dmamap_sync(sc->sc_dmat,
2472 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2473 sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2474
2475 rcb = &sc->sc_rcbs[rfid];
2476 }
2477
2478 memset(rdp, 0xff, sizeof(*rdp));
2479
2480 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2481 8 * sc->sc_reply_post_host_index, 8,
2482 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2483
2484 return (rcb);
2485 }
2486
2487 static struct mpii_dmamem *
2488 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2489 {
2490 struct mpii_dmamem *mdm;
2491 int nsegs;
2492
2493 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2494 mdm->mdm_size = size;
2495
2496 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2497 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2498 goto mdmfree;
2499
2500 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2501 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2502 goto destroy;
2503
2504 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2505 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2506 goto free;
2507
2508 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2509 NULL, BUS_DMA_NOWAIT) != 0)
2510 goto unmap;
2511
2512 memset(mdm->mdm_kva, 0, size);
2513
2514 return (mdm);
2515
2516 unmap:
2517 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2518 free:
2519 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2520 destroy:
2521 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2522 mdmfree:
2523 free(mdm, M_DEVBUF);
2524
2525 return (NULL);
2526 }
2527
2528 static void
2529 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2530 {
2531 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2532
2533 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2534 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2535 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2536 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2537 free(mdm, M_DEVBUF);
2538 }
2539
2540 static int
2541 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2542 {
2543 int slot; /* initial hint */
2544
2545 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2546 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev wants slot %d\n",
2547 DEVNAME(sc), dev->slot);
2548 if (dev == NULL || dev->slot < 0)
2549 return (1);
2550 slot = dev->slot;
2551
2552 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2553 slot++;
2554
2555 if (slot >= sc->sc_max_devices)
2556 return (1);
2557
2558 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev alloc slot %d\n",
2559 DEVNAME(sc), slot);
2560
2561 dev->slot = slot;
2562 sc->sc_devs[slot] = dev;
2563
2564 return (0);
2565 }
2566
2567 static int
2568 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2569 {
2570 int i;
2571
2572 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2573 if (dev == NULL)
2574 return (1);
2575
2576 for (i = 0; i < sc->sc_max_devices; i++) {
2577 if (sc->sc_devs[i] == NULL)
2578 continue;
2579
2580 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2581 sc->sc_devs[i] = NULL;
2582 return (0);
2583 }
2584 }
2585
2586 return (1);
2587 }
2588
2589 static struct mpii_device *
2590 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2591 {
2592 int i;
2593 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2594
2595 for (i = 0; i < sc->sc_max_devices; i++) {
2596 if (sc->sc_devs[i] == NULL)
2597 continue;
2598
2599 if (sc->sc_devs[i]->dev_handle == handle)
2600 return (sc->sc_devs[i]);
2601 }
2602
2603 return (NULL);
2604 }
2605
2606 static int
2607 mpii_alloc_ccbs(struct mpii_softc *sc)
2608 {
2609 struct mpii_ccb *ccb;
2610 u_int8_t *cmd;
2611 int i;
2612 char wqname[16];
2613
2614 SIMPLEQ_INIT(&sc->sc_ccb_free);
2615 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2616 mutex_init(&sc->sc_ccb_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2617 cv_init(&sc->sc_ccb_free_cv, "mpii_ccbs");
2618 mutex_init(&sc->sc_ssb_tmomtx, MUTEX_DEFAULT, IPL_BIO);
2619 snprintf(wqname, sizeof(wqname) - 1, "%sabrt", DEVNAME(sc));
2620 workqueue_create(&sc->sc_ssb_tmowk, wqname, mpii_scsi_cmd_tmo_handler,
2621 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
2622 if (sc->sc_ssb_tmowk == NULL)
2623 return 1;
2624
2625 sc->sc_ccbs = malloc((sc->sc_max_cmds-1) * sizeof(*ccb),
2626 M_DEVBUF, M_WAITOK | M_ZERO);
2627 sc->sc_requests = mpii_dmamem_alloc(sc,
2628 sc->sc_request_size * sc->sc_max_cmds);
2629 if (sc->sc_requests == NULL) {
2630 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2631 goto free_ccbs;
2632 }
2633 cmd = MPII_DMA_KVA(sc->sc_requests);
2634
2635 /*
2636 * we have sc->sc_max_cmds system request message
2637 * frames, but smid zero cannot be used. so we then
2638 * have (sc->sc_max_cmds - 1) number of ccbs
2639 */
2640 for (i = 1; i < sc->sc_max_cmds; i++) {
2641 ccb = &sc->sc_ccbs[i - 1];
2642
2643 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2644 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2645 &ccb->ccb_dmamap) != 0) {
2646 printf("%s: unable to create dma map\n", DEVNAME(sc));
2647 goto free_maps;
2648 }
2649
2650 ccb->ccb_sc = sc;
2651 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2652 cv_init(&ccb->ccb_cv, "mpiiexec");
2653
2654 ccb->ccb_smid = htole16(i);
2655 ccb->ccb_offset = sc->sc_request_size * i;
2656
2657 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2658 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2659 ccb->ccb_offset;
2660
2661 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2662 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2663 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2664 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2665 ccb->ccb_cmd_dva);
2666
2667 mpii_put_ccb(sc, ccb);
2668 }
2669
2670 return (0);
2671
2672 free_maps:
2673 while ((ccb = mpii_get_ccb(sc)) != NULL)
2674 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2675
2676 mpii_dmamem_free(sc, sc->sc_requests);
2677 free_ccbs:
2678 free(sc->sc_ccbs, M_DEVBUF);
2679
2680 return (1);
2681 }
2682
2683 static void
2684 mpii_put_ccb(struct mpii_softc *sc, struct mpii_ccb *ccb)
2685 {
2686 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2687
2688 ccb->ccb_state = MPII_CCB_FREE;
2689 ccb->ccb_cookie = NULL;
2690 ccb->ccb_done = NULL;
2691 ccb->ccb_rcb = NULL;
2692 memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2693
2694 mutex_enter(&sc->sc_ccb_free_mtx);
2695 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2696 mutex_exit(&sc->sc_ccb_free_mtx);
2697 }
2698
2699 static struct mpii_ccb *
2700 mpii_get_ccb(struct mpii_softc *sc)
2701 {
2702 struct mpii_ccb *ccb;
2703
2704 mutex_enter(&sc->sc_ccb_free_mtx);
2705 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2706 if (ccb != NULL) {
2707 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2708 ccb->ccb_state = MPII_CCB_READY;
2709 KASSERT(ccb->ccb_sc == sc);
2710 }
2711 mutex_exit(&sc->sc_ccb_free_mtx);
2712
2713 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2714
2715 return (ccb);
2716 }
2717
2718 static int
2719 mpii_alloc_replies(struct mpii_softc *sc)
2720 {
2721 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2722
2723 sc->sc_rcbs = malloc(sc->sc_num_reply_frames * sizeof(struct mpii_rcb),
2724 M_DEVBUF, M_WAITOK);
2725
2726 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2727 sc->sc_num_reply_frames);
2728 if (sc->sc_replies == NULL) {
2729 free(sc->sc_rcbs, M_DEVBUF);
2730 return (1);
2731 }
2732
2733 return (0);
2734 }
2735
2736 static void
2737 mpii_push_replies(struct mpii_softc *sc)
2738 {
2739 struct mpii_rcb *rcb;
2740 uintptr_t kva = (uintptr_t)MPII_DMA_KVA(sc->sc_replies);
2741 int i;
2742
2743 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2744 0, sc->sc_reply_size * sc->sc_num_reply_frames,
2745 BUS_DMASYNC_PREREAD);
2746
2747 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2748 rcb = &sc->sc_rcbs[i];
2749
2750 rcb->rcb_reply = (void *)(kva + sc->sc_reply_size * i);
2751 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2752 sc->sc_reply_size * i;
2753 mpii_push_reply(sc, rcb);
2754 }
2755 }
2756
2757 static void
2758 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2759 {
2760 struct mpii_request_header *rhp;
2761 struct mpii_request_descr descr;
2762 #if defined(__LP64__) && 0
2763 u_long *rdp = (u_long *)&descr;
2764 #else
2765 u_int32_t *rdp = (u_int32_t *)&descr;
2766 #endif
2767
2768 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2769 ccb->ccb_cmd_dva);
2770
2771 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2772 ccb->ccb_offset, sc->sc_request_size,
2773 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2774
2775 ccb->ccb_state = MPII_CCB_QUEUED;
2776
2777 rhp = ccb->ccb_cmd;
2778
2779 memset(&descr, 0, sizeof(descr));
2780
2781 switch (rhp->function) {
2782 case MPII_FUNCTION_SCSI_IO_REQUEST:
2783 descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2784 descr.dev_handle = htole16(ccb->ccb_dev_handle);
2785 break;
2786 case MPII_FUNCTION_SCSI_TASK_MGMT:
2787 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2788 break;
2789 default:
2790 descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2791 }
2792
2793 descr.vf_id = sc->sc_vf_id;
2794 descr.smid = ccb->ccb_smid;
2795
2796 #if defined(__LP64__) && 0
2797 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2798 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2799 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2800 MPII_REQ_DESCR_POST_LOW, *rdp);
2801 #else
2802 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2803 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2804
2805 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2806 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2807
2808 mutex_enter(&sc->sc_req_mtx);
2809 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2810 MPII_REQ_DESCR_POST_LOW, rdp[0]);
2811 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2812 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2813
2814 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2815 MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2816 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2817 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2818 mutex_exit(&sc->sc_req_mtx);
2819 #endif
2820 }
2821
2822 static int
2823 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2824 {
2825 void (*done)(struct mpii_ccb *);
2826 void *cookie;
2827 int rv = 1;
2828
2829 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2830
2831 done = ccb->ccb_done;
2832 cookie = ccb->ccb_cookie;
2833
2834 ccb->ccb_done = mpii_poll_done;
2835 ccb->ccb_cookie = &rv;
2836
2837 mpii_start(sc, ccb);
2838
2839 while (rv == 1) {
2840 /* avoid excessive polling */
2841 if (mpii_reply_waiting(sc))
2842 mpii_intr(sc);
2843 else
2844 delay(10);
2845 }
2846
2847 ccb->ccb_cookie = cookie;
2848 done(ccb);
2849
2850 return (0);
2851 }
2852
2853 static void
2854 mpii_poll_done(struct mpii_ccb *ccb)
2855 {
2856 int *rv = ccb->ccb_cookie;
2857
2858 *rv = 0;
2859 }
2860
2861 static int
2862 mpii_alloc_queues(struct mpii_softc *sc)
2863 {
2864 u_int32_t *rfp;
2865 int i;
2866
2867 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2868
2869 mutex_init(&sc->sc_reply_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2870 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2871 sc->sc_reply_free_qdepth * sizeof(*rfp));
2872 if (sc->sc_reply_freeq == NULL)
2873 return (1);
2874 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2875 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2876 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2877 sc->sc_reply_size * i;
2878 }
2879
2880 sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2881 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2882 if (sc->sc_reply_postq == NULL)
2883 goto free_reply_freeq;
2884 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2885 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2886 sizeof(struct mpii_reply_descr));
2887
2888 return (0);
2889
2890 free_reply_freeq:
2891 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2892 return (1);
2893 }
2894
2895 static void
2896 mpii_init_queues(struct mpii_softc *sc)
2897 {
2898 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2899
2900 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2901 sc->sc_reply_post_host_index = 0;
2902 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2903 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2904 }
2905
2906 static void
2907 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2908 {
2909 void (*done)(struct mpii_ccb *);
2910 void *cookie;
2911
2912 done = ccb->ccb_done;
2913 cookie = ccb->ccb_cookie;
2914
2915 ccb->ccb_done = mpii_wait_done;
2916 ccb->ccb_cookie = ccb;
2917
2918 /* XXX this will wait forever for the ccb to complete */
2919
2920 mpii_start(sc, ccb);
2921
2922 mutex_enter(&ccb->ccb_mtx);
2923 while (ccb->ccb_cookie != NULL)
2924 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
2925 mutex_exit(&ccb->ccb_mtx);
2926
2927 ccb->ccb_cookie = cookie;
2928 done(ccb);
2929 }
2930
2931 static void
2932 mpii_wait_done(struct mpii_ccb *ccb)
2933 {
2934 mutex_enter(&ccb->ccb_mtx);
2935 ccb->ccb_cookie = NULL;
2936 cv_signal(&ccb->ccb_cv);
2937 mutex_exit(&ccb->ccb_mtx);
2938 }
2939
2940 static void
2941 mpii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2942 void *arg)
2943 {
2944 struct scsipi_periph *periph;
2945 struct scsipi_xfer *xs;
2946 struct scsipi_adapter *adapt = chan->chan_adapter;
2947 struct mpii_softc *sc = device_private(adapt->adapt_dev);
2948 struct mpii_ccb *ccb;
2949 struct mpii_msg_scsi_io *io;
2950 struct mpii_device *dev;
2951 int target, timeout, ret;
2952 u_int16_t dev_handle;
2953
2954 DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request\n", DEVNAME(sc));
2955
2956 switch (req) {
2957 case ADAPTER_REQ_GROW_RESOURCES:
2958 /* Not supported. */
2959 return;
2960 case ADAPTER_REQ_SET_XFER_MODE:
2961 {
2962 struct scsipi_xfer_mode *xm = arg;
2963 xm->xm_mode = PERIPH_CAP_TQING;
2964 xm->xm_period = 0;
2965 xm->xm_offset = 0;
2966 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2967 return;
2968 }
2969 case ADAPTER_REQ_RUN_XFER:
2970 break;
2971 }
2972
2973 xs = arg;
2974 periph = xs->xs_periph;
2975 target = periph->periph_target;
2976
2977 if (xs->cmdlen > MPII_CDB_LEN) {
2978 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2979 DEVNAME(sc), xs->cmdlen);
2980 memset(&xs->sense, 0, sizeof(xs->sense));
2981 xs->sense.scsi_sense.response_code =
2982 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
2983 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
2984 xs->sense.scsi_sense.asc = 0x20;
2985 xs->error = XS_SENSE;
2986 scsipi_done(xs);
2987 return;
2988 }
2989
2990 mutex_enter(&sc->sc_devs_mtx);
2991 if ((dev = sc->sc_devs[target]) == NULL) {
2992 mutex_exit(&sc->sc_devs_mtx);
2993 /* device no longer exists */
2994 xs->error = XS_SELTIMEOUT;
2995 scsipi_done(xs);
2996 return;
2997 }
2998 dev_handle = dev->dev_handle;
2999 mutex_exit(&sc->sc_devs_mtx);
3000
3001 ccb = mpii_get_ccb(sc);
3002 if (ccb == NULL) {
3003 xs->error = XS_RESOURCE_SHORTAGE;
3004 scsipi_done(xs);
3005 return;
3006 }
3007 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->cmd->opcode: 0x%02x xs->xs_control: 0x%x\n",
3008 DEVNAME(sc), ccb->ccb_smid, xs->cmd->opcode, xs->xs_control);
3009
3010 ccb->ccb_cookie = xs;
3011 ccb->ccb_done = mpii_scsi_cmd_done;
3012 ccb->ccb_dev_handle = dev_handle;
3013
3014 io = ccb->ccb_cmd;
3015 memset(io, 0, sizeof(*io));
3016 io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
3017 io->sense_buffer_length = sizeof(xs->sense);
3018 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
3019 io->io_flags = htole16(xs->cmdlen);
3020 io->dev_handle = htole16(ccb->ccb_dev_handle);
3021 io->lun[0] = htobe16(periph->periph_lun);
3022
3023 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
3024 case XS_CTL_DATA_IN:
3025 io->direction = MPII_SCSIIO_DIR_READ;
3026 break;
3027 case XS_CTL_DATA_OUT:
3028 io->direction = MPII_SCSIIO_DIR_WRITE;
3029 break;
3030 default:
3031 io->direction = MPII_SCSIIO_DIR_NONE;
3032 break;
3033 }
3034
3035 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
3036
3037 memcpy(io->cdb, xs->cmd, xs->cmdlen);
3038
3039 io->data_length = htole32(xs->datalen);
3040
3041 /* sense data is at the end of a request */
3042 io->sense_buffer_low_address = htole32(ccb->ccb_cmd_dva +
3043 sc->sc_request_size - sizeof(struct scsi_sense_data));
3044
3045 if (ISSET(sc->sc_flags, MPII_F_SAS3))
3046 ret = mpii_load_xs_sas3(ccb);
3047 else
3048 ret = mpii_load_xs(ccb);
3049
3050 if (ret != 0) {
3051 xs->error = XS_DRIVER_STUFFUP;
3052 goto done;
3053 }
3054
3055 if (xs->xs_control & XS_CTL_POLL) {
3056 if (mpii_poll(sc, ccb) != 0) {
3057 xs->error = XS_DRIVER_STUFFUP;
3058 goto done;
3059 }
3060 return;
3061 }
3062 timeout = mstohz(xs->timeout);
3063 if (timeout == 0)
3064 timeout = 1;
3065 callout_reset(&xs->xs_callout, timeout, mpii_scsi_cmd_tmo, ccb);
3066 mpii_start(sc, ccb);
3067 return;
3068 done:
3069 mpii_put_ccb(sc, ccb);
3070 scsipi_done(xs);
3071 }
3072
3073 static void
3074 mpii_scsi_cmd_tmo(void *xccb)
3075 {
3076 struct mpii_ccb *ccb = xccb;
3077 struct mpii_softc *sc = ccb->ccb_sc;
3078 bool start_work;
3079
3080 printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
3081
3082 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3083 mutex_enter(&sc->sc_ssb_tmomtx);
3084 start_work = (SIMPLEQ_FIRST(&sc->sc_ccb_tmos) == 0);
3085 ccb->ccb_state = MPII_CCB_TIMEOUT;
3086 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3087 if (start_work) {
3088 workqueue_enqueue(sc->sc_ssb_tmowk,
3089 &sc->sc_ssb_tmowork, NULL);
3090 }
3091 mutex_exit(&sc->sc_ssb_tmomtx);
3092 }
3093 }
3094
3095 static void
3096 mpii_scsi_cmd_tmo_handler(struct work *wk, void *cookie)
3097 {
3098 struct mpii_softc *sc = cookie;
3099 struct mpii_ccb *next;
3100 struct mpii_ccb *ccb;
3101 struct mpii_ccb *tccb;
3102 struct mpii_msg_scsi_task_request *stq;
3103
3104 mutex_enter(&sc->sc_ssb_tmomtx);
3105 next = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3106 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
3107 mutex_exit(&sc->sc_ssb_tmomtx);
3108
3109 while (next != NULL) {
3110 ccb = next;
3111 next = SIMPLEQ_NEXT(ccb, ccb_link);
3112 if (ccb->ccb_state != MPII_CCB_TIMEOUT)
3113 continue;
3114 tccb = mpii_get_ccb(sc);
3115 stq = tccb->ccb_cmd;
3116 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3117 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3118 stq->dev_handle = htole16(ccb->ccb_dev_handle);
3119
3120 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3121 mpii_wait(sc, tccb);
3122 }
3123 }
3124
3125 static void
3126 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3127 {
3128 mpii_put_ccb(tccb->ccb_sc, tccb);
3129 }
3130
3131 static u_int8_t
3132 map_scsi_status(u_int8_t mpii_scsi_status)
3133 {
3134 u_int8_t scsi_status;
3135
3136 switch (mpii_scsi_status)
3137 {
3138 case MPII_SCSIIO_STATUS_GOOD:
3139 scsi_status = SCSI_OK;
3140 break;
3141
3142 case MPII_SCSIIO_STATUS_CHECK_COND:
3143 scsi_status = SCSI_CHECK;
3144 break;
3145
3146 case MPII_SCSIIO_STATUS_BUSY:
3147 scsi_status = SCSI_BUSY;
3148 break;
3149
3150 case MPII_SCSIIO_STATUS_INTERMEDIATE:
3151 scsi_status = SCSI_INTERM;
3152 break;
3153
3154 case MPII_SCSIIO_STATUS_INTERMEDIATE_CONDMET:
3155 scsi_status = SCSI_INTERM;
3156 break;
3157
3158 case MPII_SCSIIO_STATUS_RESERVATION_CONFLICT:
3159 scsi_status = SCSI_RESV_CONFLICT;
3160 break;
3161
3162 case MPII_SCSIIO_STATUS_CMD_TERM:
3163 case MPII_SCSIIO_STATUS_TASK_ABORTED:
3164 scsi_status = SCSI_TERMINATED;
3165 break;
3166
3167 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3168 scsi_status = SCSI_QUEUE_FULL;
3169 break;
3170
3171 case MPII_SCSIIO_STATUS_ACA_ACTIVE:
3172 scsi_status = SCSI_ACA_ACTIVE;
3173 break;
3174
3175 default:
3176 /* XXX: for the lack of anything better and other than OK */
3177 scsi_status = 0xFF;
3178 break;
3179 }
3180
3181 return scsi_status;
3182 }
3183
3184 static void
3185 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3186 {
3187 struct mpii_msg_scsi_io_error *sie;
3188 struct mpii_softc *sc = ccb->ccb_sc;
3189 struct scsipi_xfer *xs = ccb->ccb_cookie;
3190 struct scsi_sense_data *sense;
3191 bus_dmamap_t dmap = ccb->ccb_dmamap;
3192 bool timeout = 1;
3193
3194 callout_stop(&xs->xs_callout);
3195 if (ccb->ccb_state == MPII_CCB_TIMEOUT)
3196 timeout = 1;
3197 ccb->ccb_state = MPII_CCB_READY;
3198
3199 if (xs->datalen != 0) {
3200 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3201 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3202 BUS_DMASYNC_POSTWRITE);
3203
3204 bus_dmamap_unload(sc->sc_dmat, dmap);
3205 }
3206
3207 KASSERT(xs->error == XS_NOERROR);
3208 KASSERT(xs->resid == xs->datalen);
3209 KASSERT(xs->status == SCSI_OK);
3210
3211 if (ccb->ccb_rcb == NULL) {
3212 /* no scsi error, we're ok so drop out early */
3213 xs->resid = 0;
3214 goto done;
3215 }
3216
3217 sie = ccb->ccb_rcb->rcb_reply;
3218
3219 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3220 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3221 xs->xs_control);
3222 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3223 "function: 0x%02x\n", DEVNAME(sc), le16toh(sie->dev_handle),
3224 sie->msg_length, sie->function);
3225 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3226 sie->vp_id, sie->vf_id);
3227 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3228 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3229 sie->scsi_state, le16toh(sie->ioc_status));
3230 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3231 le32toh(sie->ioc_loginfo));
3232 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3233 le32toh(sie->transfer_count));
3234 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3235 le32toh(sie->sense_count));
3236 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3237 le32toh(sie->response_info));
3238 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3239 le16toh(sie->task_tag));
3240 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3241 DEVNAME(sc), le32toh(sie->bidirectional_transfer_count));
3242
3243 xs->status = map_scsi_status(sie->scsi_status);
3244
3245 switch (le16toh(sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3246 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3247 switch(sie->scsi_status) {
3248 case MPII_SCSIIO_STATUS_CHECK_COND:
3249 xs->error = XS_SENSE;
3250 /* FALLTHROUGH */
3251 case MPII_SCSIIO_STATUS_GOOD:
3252 xs->resid = xs->datalen - le32toh(sie->transfer_count);
3253 break;
3254 default:
3255 xs->error = XS_DRIVER_STUFFUP;
3256 break;
3257 }
3258 break;
3259
3260 case MPII_IOCSTATUS_SUCCESS:
3261 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3262 switch (sie->scsi_status) {
3263 case MPII_SCSIIO_STATUS_GOOD:
3264 xs->resid = 0;
3265 break;
3266
3267 case MPII_SCSIIO_STATUS_CHECK_COND:
3268 xs->resid = 0;
3269 xs->error = XS_SENSE;
3270 break;
3271
3272 case MPII_SCSIIO_STATUS_BUSY:
3273 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3274 xs->error = XS_BUSY;
3275 break;
3276
3277 default:
3278 xs->error = XS_DRIVER_STUFFUP;
3279 }
3280 break;
3281
3282 case MPII_IOCSTATUS_BUSY:
3283 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3284 xs->error = XS_BUSY;
3285 break;
3286
3287 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3288 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3289 xs->error = timeout ? XS_TIMEOUT : XS_RESET;
3290 break;
3291
3292 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3293 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3294 xs->error = XS_SELTIMEOUT;
3295 break;
3296
3297 default:
3298 xs->error = XS_DRIVER_STUFFUP;
3299 break;
3300 }
3301
3302 sense = (struct scsi_sense_data *)((uintptr_t)ccb->ccb_cmd +
3303 sc->sc_request_size - sizeof(*sense));
3304 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3305 memcpy(&xs->sense, sense, sizeof(xs->sense));
3306
3307 mpii_push_reply(sc, ccb->ccb_rcb);
3308
3309 done:
3310 mpii_put_ccb(sc, ccb);
3311
3312 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x len: %d resid: %d\n",
3313 DEVNAME(sc), xs->error, xs->status, xs->datalen, xs->resid);
3314
3315 scsipi_done(xs);
3316 }
3317
3318 #if 0
3319 int
3320 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, void *addr, int flag)
3321 {
3322 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3323 struct mpii_device *dev = sc->sc_devs[link->target];
3324
3325 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3326
3327 switch (cmd) {
3328 case DIOCGCACHE:
3329 case DIOCSCACHE:
3330 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3331 return (mpii_ioctl_cache(link, cmd,
3332 (struct dk_cache *)addr));
3333 }
3334 break;
3335
3336 default:
3337 if (sc->sc_ioctl)
3338 return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3339
3340 break;
3341 }
3342
3343 return (ENOTTY);
3344 }
3345
3346 int
3347 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3348 {
3349 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3350 struct mpii_device *dev = sc->sc_devs[link->target];
3351 struct mpii_cfg_raid_vol_pg0 *vpg;
3352 struct mpii_msg_raid_action_request *req;
3353 struct mpii_msg_raid_action_reply *rep;
3354 struct mpii_cfg_hdr hdr;
3355 struct mpii_ccb *ccb;
3356 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3357 size_t pagelen;
3358 int rv = 0;
3359 int enabled;
3360
3361 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3362 addr, MPII_PG_POLL, &hdr) != 0)
3363 return (EINVAL);
3364
3365 pagelen = hdr.page_length * 4;
3366 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3367 if (vpg == NULL)
3368 return (ENOMEM);
3369
3370 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3371 vpg, pagelen) != 0) {
3372 rv = EINVAL;
3373 goto done;
3374 }
3375
3376 enabled = ((le16toh(vpg->volume_settings) &
3377 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3378 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3379
3380 if (cmd == DIOCGCACHE) {
3381 dc->wrcache = enabled;
3382 dc->rdcache = 0;
3383 goto done;
3384 } /* else DIOCSCACHE */
3385
3386 if (dc->rdcache) {
3387 rv = EOPNOTSUPP;
3388 goto done;
3389 }
3390
3391 if (((dc->wrcache) ? 1 : 0) == enabled)
3392 goto done;
3393
3394 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3395 if (ccb == NULL) {
3396 rv = ENOMEM;
3397 goto done;
3398 }
3399
3400 ccb->ccb_done = mpii_empty_done;
3401
3402 req = ccb->ccb_cmd;
3403 memset(req, 0, sizeof(*req));
3404 req->function = MPII_FUNCTION_RAID_ACTION;
3405 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3406 req->vol_dev_handle = htole16(dev->dev_handle);
3407 req->action_data = htole32(dc->wrcache ?
3408 MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3409 MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3410
3411 if (mpii_poll(sc, ccb) != 0) {
3412 rv = EIO;
3413 goto done;
3414 }
3415
3416 if (ccb->ccb_rcb != NULL) {
3417 rep = ccb->ccb_rcb->rcb_reply;
3418 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3419 ((rep->action_data[0] &
3420 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3421 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3422 MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3423 rv = EINVAL;
3424 mpii_push_reply(sc, ccb->ccb_rcb);
3425 }
3426
3427 scsi_io_put(&sc->sc_iopool, ccb);
3428
3429 done:
3430 free(vpg, M_TEMP);
3431 return (rv);
3432 }
3433 #endif /* 0 */
3434
3435 #if NBIO > 0
3436 static int
3437 mpii_ioctl(device_t dev, u_long cmd, void *addr)
3438 {
3439 struct mpii_softc *sc = device_private(dev);
3440 int error = 0;
3441
3442 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3443
3444 switch (cmd) {
3445 case BIOCINQ:
3446 DNPRINTF(MPII_D_IOCTL, "inq\n");
3447 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3448 break;
3449 case BIOCVOL:
3450 DNPRINTF(MPII_D_IOCTL, "vol\n");
3451 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3452 break;
3453 case BIOCDISK:
3454 DNPRINTF(MPII_D_IOCTL, "disk\n");
3455 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3456 break;
3457 default:
3458 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3459 error = ENOTTY;
3460 }
3461
3462 return (error);
3463 }
3464
3465 static int
3466 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3467 {
3468 int i;
3469
3470 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3471
3472 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3473 mutex_enter(&sc->sc_devs_mtx);
3474 for (i = 0; i < sc->sc_max_devices; i++)
3475 if (sc->sc_devs[i] &&
3476 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3477 bi->bi_novol++;
3478 mutex_exit(&sc->sc_devs_mtx);
3479 return (0);
3480 }
3481
3482 static int
3483 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3484 {
3485 struct mpii_cfg_raid_vol_pg0 *vpg;
3486 struct mpii_cfg_hdr hdr;
3487 struct mpii_device *dev;
3488 size_t pagelen;
3489 u_int16_t volh;
3490 int rv, hcnt = 0;
3491 int percent;
3492
3493 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3494 DEVNAME(sc), bv->bv_volid);
3495
3496 mutex_enter(&sc->sc_devs_mtx);
3497 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3498 mutex_exit(&sc->sc_devs_mtx);
3499 return (ENODEV);
3500 }
3501 volh = dev->dev_handle;
3502 percent = dev->percent;
3503 mutex_exit(&sc->sc_devs_mtx);
3504
3505 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3506 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3507 printf("%s: unable to fetch header for raid volume page 0\n",
3508 DEVNAME(sc));
3509 return (EINVAL);
3510 }
3511
3512 pagelen = hdr.page_length * 4;
3513 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3514 if (vpg == NULL) {
3515 printf("%s: unable to allocate space for raid "
3516 "volume page 0\n", DEVNAME(sc));
3517 return (ENOMEM);
3518 }
3519
3520 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3521 &hdr, 1, vpg, pagelen) != 0) {
3522 printf("%s: unable to fetch raid volume page 0\n",
3523 DEVNAME(sc));
3524 free(vpg, M_TEMP);
3525 return (EINVAL);
3526 }
3527
3528 switch (vpg->volume_state) {
3529 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3530 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3531 bv->bv_status = BIOC_SVONLINE;
3532 break;
3533 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3534 if (ISSET(le32toh(vpg->volume_status),
3535 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3536 bv->bv_status = BIOC_SVREBUILD;
3537 bv->bv_percent = percent;
3538 } else
3539 bv->bv_status = BIOC_SVDEGRADED;
3540 break;
3541 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3542 bv->bv_status = BIOC_SVOFFLINE;
3543 break;
3544 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3545 bv->bv_status = BIOC_SVBUILDING;
3546 break;
3547 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3548 default:
3549 bv->bv_status = BIOC_SVINVALID;
3550 break;
3551 }
3552
3553 switch (vpg->volume_type) {
3554 case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3555 bv->bv_level = 0;
3556 break;
3557 case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3558 bv->bv_level = 1;
3559 break;
3560 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3561 case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3562 bv->bv_level = 10;
3563 break;
3564 default:
3565 bv->bv_level = -1;
3566 }
3567
3568 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3569 free(vpg, M_TEMP);
3570 return (rv);
3571 }
3572
3573 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3574
3575 bv->bv_size = le64toh(vpg->max_lba) * le16toh(vpg->block_size);
3576
3577 free(vpg, M_TEMP);
3578 return (0);
3579 }
3580
3581 static int
3582 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3583 {
3584 struct mpii_cfg_raid_vol_pg0 *vpg;
3585 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3586 struct mpii_cfg_hdr hdr;
3587 struct mpii_device *dev;
3588 size_t pagelen;
3589 u_int16_t volh;
3590 u_int8_t dn;
3591
3592 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3593 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3594
3595 mutex_enter(&sc->sc_devs_mtx);
3596 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) {
3597 mutex_exit(&sc->sc_devs_mtx);
3598 return (ENODEV);
3599 }
3600 volh = dev->dev_handle;
3601 mutex_exit(&sc->sc_devs_mtx);
3602
3603 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3604 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3605 printf("%s: unable to fetch header for raid volume page 0\n",
3606 DEVNAME(sc));
3607 return (EINVAL);
3608 }
3609
3610 pagelen = hdr.page_length * 4;
3611 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3612 if (vpg == NULL) {
3613 printf("%s: unable to allocate space for raid "
3614 "volume page 0\n", DEVNAME(sc));
3615 return (ENOMEM);
3616 }
3617
3618 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3619 &hdr, 1, vpg, pagelen) != 0) {
3620 printf("%s: unable to fetch raid volume page 0\n",
3621 DEVNAME(sc));
3622 free(vpg, M_TEMP);
3623 return (EINVAL);
3624 }
3625
3626 if (bd->bd_diskid >= vpg->num_phys_disks) {
3627 int nvdsk = vpg->num_phys_disks;
3628 int hsmap = vpg->hot_spare_pool;
3629
3630 free(vpg, M_TEMP);
3631 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3632 }
3633
3634 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3635 bd->bd_diskid;
3636 dn = pd->phys_disk_num;
3637
3638 free(vpg, M_TEMP);
3639 return (mpii_bio_disk(sc, bd, dn));
3640 }
3641
3642 static int
3643 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3644 int hsmap, int *hscnt)
3645 {
3646 struct mpii_cfg_raid_config_pg0 *cpg;
3647 struct mpii_raid_config_element *el;
3648 struct mpii_ecfg_hdr ehdr;
3649 size_t pagelen;
3650 int i, nhs = 0;
3651
3652 if (bd) {
3653 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3654 bd->bd_diskid - nvdsk);
3655 } else {
3656 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3657 }
3658
3659 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3660 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3661 &ehdr) != 0) {
3662 printf("%s: unable to fetch header for raid config page 0\n",
3663 DEVNAME(sc));
3664 return (EINVAL);
3665 }
3666
3667 pagelen = le16toh(ehdr.ext_page_length) * 4;
3668 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3669 if (cpg == NULL) {
3670 printf("%s: unable to allocate space for raid config page 0\n",
3671 DEVNAME(sc));
3672 return (ENOMEM);
3673 }
3674
3675 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3676 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3677 printf("%s: unable to fetch raid config page 0\n",
3678 DEVNAME(sc));
3679 free(cpg, M_TEMP);
3680 return (EINVAL);
3681 }
3682
3683 el = (struct mpii_raid_config_element *)(cpg + 1);
3684 for (i = 0; i < cpg->num_elements; i++, el++) {
3685 if (ISSET(le16toh(el->element_flags),
3686 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3687 el->hot_spare_pool == hsmap) {
3688 /*
3689 * diskid comparison is based on the idea that all
3690 * disks are counted by the bio(4) in sequence, thus
3691 * substracting the number of disks in the volume
3692 * from the diskid yields us a "relative" hotspare
3693 * number, which is good enough for us.
3694 */
3695 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3696 u_int8_t dn = el->phys_disk_num;
3697
3698 free(cpg, M_TEMP);
3699 return (mpii_bio_disk(sc, bd, dn));
3700 }
3701 nhs++;
3702 }
3703 }
3704
3705 if (hscnt)
3706 *hscnt = nhs;
3707
3708 free(cpg, M_TEMP);
3709 return (0);
3710 }
3711
3712 static int
3713 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3714 {
3715 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3716 struct mpii_cfg_hdr hdr;
3717 struct mpii_device *dev;
3718 int len;
3719
3720 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3721 bd->bd_diskid);
3722
3723 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_ZERO);
3724 if (ppg == NULL) {
3725 printf("%s: unable to allocate space for raid physical disk "
3726 "page 0\n", DEVNAME(sc));
3727 return (ENOMEM);
3728 }
3729
3730 hdr.page_version = 0;
3731 hdr.page_length = sizeof(*ppg) / 4;
3732 hdr.page_number = 0;
3733 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3734
3735 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3736 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3737 printf("%s: unable to fetch raid drive page 0\n",
3738 DEVNAME(sc));
3739 free(ppg, M_TEMP);
3740 return (EINVAL);
3741 }
3742
3743 bd->bd_target = ppg->phys_disk_num;
3744
3745 mutex_enter(&sc->sc_devs_mtx);
3746 if ((dev = mpii_find_dev(sc, le16toh(ppg->dev_handle))) == NULL) {
3747 mutex_exit(&sc->sc_devs_mtx);
3748 bd->bd_status = BIOC_SDINVALID;
3749 free(ppg, M_TEMP);
3750 return (0);
3751 }
3752 mutex_exit(&sc->sc_devs_mtx);
3753
3754 switch (ppg->phys_disk_state) {
3755 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3756 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3757 bd->bd_status = BIOC_SDONLINE;
3758 break;
3759 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3760 if (ppg->offline_reason ==
3761 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3762 ppg->offline_reason ==
3763 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3764 bd->bd_status = BIOC_SDFAILED;
3765 else
3766 bd->bd_status = BIOC_SDOFFLINE;
3767 break;
3768 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3769 bd->bd_status = BIOC_SDFAILED;
3770 break;
3771 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3772 bd->bd_status = BIOC_SDREBUILD;
3773 break;
3774 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3775 bd->bd_status = BIOC_SDHOTSPARE;
3776 break;
3777 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3778 bd->bd_status = BIOC_SDUNUSED;
3779 break;
3780 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3781 default:
3782 bd->bd_status = BIOC_SDINVALID;
3783 break;
3784 }
3785
3786 bd->bd_size = le64toh(ppg->dev_max_lba) * le16toh(ppg->block_size);
3787
3788 strnvisx(bd->bd_vendor, sizeof(bd->bd_vendor),
3789 ppg->vendor_id, sizeof(ppg->vendor_id),
3790 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3791 len = strlen(bd->bd_vendor);
3792 bd->bd_vendor[len] = ' ';
3793 strnvisx(&bd->bd_vendor[len + 1], sizeof(ppg->vendor_id) - len - 1,
3794 ppg->product_id, sizeof(ppg->product_id),
3795 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3796 strnvisx(bd->bd_serial, sizeof(bd->bd_serial),
3797 ppg->serial, sizeof(ppg->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3798
3799 free(ppg, M_TEMP);
3800 return (0);
3801 }
3802
3803 static struct mpii_device *
3804 mpii_find_vol(struct mpii_softc *sc, int volid)
3805 {
3806 struct mpii_device *dev = NULL;
3807
3808 KASSERT(mutex_owned(&sc->sc_devs_mtx));
3809 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3810 return (NULL);
3811 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3812 if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3813 return (dev);
3814 return (NULL);
3815 }
3816
3817 /*
3818 * Non-sleeping lightweight version of the mpii_ioctl_vol
3819 */
3820 static int
3821 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3822 {
3823 struct mpii_cfg_raid_vol_pg0 *vpg;
3824 struct mpii_cfg_hdr hdr;
3825 struct mpii_device *dev = NULL;
3826 size_t pagelen;
3827 u_int16_t volh;
3828
3829 mutex_enter(&sc->sc_devs_mtx);
3830 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3831 mutex_exit(&sc->sc_devs_mtx);
3832 return (ENODEV);
3833 }
3834 volh = dev->dev_handle;
3835 mutex_exit(&sc->sc_devs_mtx);
3836
3837 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3838 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3839 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3840 "volume page 0\n", DEVNAME(sc));
3841 return (EINVAL);
3842 }
3843
3844 pagelen = hdr.page_length * 4;
3845 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3846 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3847 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3848 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3849 "page 0\n", DEVNAME(sc));
3850 free(vpg, M_TEMP);
3851 return (EINVAL);
3852 }
3853
3854 switch (vpg->volume_state) {
3855 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3856 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3857 bv->bv_status = BIOC_SVONLINE;
3858 break;
3859 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3860 if (ISSET(le32toh(vpg->volume_status),
3861 MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3862 bv->bv_status = BIOC_SVREBUILD;
3863 else
3864 bv->bv_status = BIOC_SVDEGRADED;
3865 break;
3866 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3867 bv->bv_status = BIOC_SVOFFLINE;
3868 break;
3869 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3870 bv->bv_status = BIOC_SVBUILDING;
3871 break;
3872 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3873 default:
3874 bv->bv_status = BIOC_SVINVALID;
3875 break;
3876 }
3877
3878 free(vpg, M_TEMP);
3879 return (0);
3880 }
3881
3882 static int
3883 mpii_create_sensors(struct mpii_softc *sc)
3884 {
3885 int i, rv;
3886
3887 DNPRINTF(MPII_D_MISC, "%s: mpii_create_sensors(%d)\n",
3888 DEVNAME(sc), sc->sc_max_volumes);
3889 sc->sc_sme = sysmon_envsys_create();
3890 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_max_volumes,
3891 M_DEVBUF, M_WAITOK | M_ZERO);
3892
3893 for (i = 0; i < sc->sc_max_volumes; i++) {
3894 sc->sc_sensors[i].units = ENVSYS_DRIVE;
3895 sc->sc_sensors[i].state = ENVSYS_SINVALID;
3896 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
3897 sc->sc_sensors[i].flags |= ENVSYS_FMONSTCHANGED;
3898
3899 /* logical drives */
3900 snprintf(sc->sc_sensors[i].desc,
3901 sizeof(sc->sc_sensors[i].desc), "%s:%d",
3902 DEVNAME(sc), i);
3903 if ((rv = sysmon_envsys_sensor_attach(sc->sc_sme,
3904 &sc->sc_sensors[i])) != 0) {
3905 aprint_error_dev(sc->sc_dev,
3906 "unable to attach sensor (rv = %d)\n", rv);
3907 goto out;
3908 }
3909 }
3910 sc->sc_sme->sme_name = DEVNAME(sc);
3911 sc->sc_sme->sme_cookie = sc;
3912 sc->sc_sme->sme_refresh = mpii_refresh_sensors;
3913
3914 rv = sysmon_envsys_register(sc->sc_sme);
3915 if (rv != 0) {
3916 aprint_error_dev(sc->sc_dev,
3917 "unable to register with sysmon (rv = %d)\n", rv);
3918 goto out;
3919 }
3920 return 0;
3921
3922 out:
3923 free(sc->sc_sensors, M_DEVBUF);
3924 sysmon_envsys_destroy(sc->sc_sme);
3925 sc->sc_sme = NULL;
3926 return 1;
3927 }
3928
3929 static int
3930 mpii_destroy_sensors(struct mpii_softc *sc)
3931 {
3932 if (sc->sc_sme == NULL)
3933 return 0;
3934 sysmon_envsys_unregister(sc->sc_sme);
3935 sc->sc_sme = NULL;
3936 free(sc->sc_sensors, M_DEVBUF);
3937 return 0;
3938
3939 }
3940
3941 static void
3942 mpii_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
3943 {
3944 struct mpii_softc *sc = sme->sme_cookie;
3945 struct bioc_vol bv;
3946
3947 memset(&bv, 0, sizeof(bv));
3948 bv.bv_volid = edata->sensor;
3949 if (mpii_bio_volstate(sc, &bv))
3950 bv.bv_status = BIOC_SVINVALID;
3951 bio_vol_to_envsys(edata, &bv);
3952 }
3953 #endif /* NBIO > 0 */
3954