mpii.c revision 1.24 1 /* $NetBSD: mpii.c,v 1.24 2019/11/28 17:09:10 maxv Exp $ */
2 /* $OpenBSD: mpii.c,v 1.115 2018/08/14 05:22:21 jmatthew Exp $ */
3 /*
4 * Copyright (c) 2010, 2012 Mike Belopuhov
5 * Copyright (c) 2009 James Giannoules
6 * Copyright (c) 2005 - 2010 David Gwynne <dlg (at) openbsd.org>
7 * Copyright (c) 2005 - 2010 Marco Peereboom <marco (at) openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: mpii.c,v 1.24 2019/11/28 17:09:10 maxv Exp $");
24
25 #include "bio.h"
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/device.h>
31 #include <sys/ioctl.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/mutex.h>
35 #include <sys/condvar.h>
36 #include <sys/dkio.h>
37 #include <sys/tree.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #include <dev/sysmon/sysmonvar.h>
50 #include <sys/envsys.h>
51 #endif
52
53 #include <dev/pci/mpiireg.h>
54
55 // #define MPII_DEBUG
56 #ifdef MPII_DEBUG
57 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
58 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
59 #define MPII_D_CMD (0x0001)
60 #define MPII_D_INTR (0x0002)
61 #define MPII_D_MISC (0x0004)
62 #define MPII_D_DMA (0x0008)
63 #define MPII_D_IOCTL (0x0010)
64 #define MPII_D_RW (0x0020)
65 #define MPII_D_MEM (0x0040)
66 #define MPII_D_CCB (0x0080)
67 #define MPII_D_PPR (0x0100)
68 #define MPII_D_RAID (0x0200)
69 #define MPII_D_EVT (0x0400)
70 #define MPII_D_CFG (0x0800)
71 #define MPII_D_MAP (0x1000)
72
73 u_int32_t mpii_debug = 0
74 // | MPII_D_CMD
75 // | MPII_D_INTR
76 // | MPII_D_MISC
77 // | MPII_D_DMA
78 // | MPII_D_IOCTL
79 // | MPII_D_RW
80 // | MPII_D_MEM
81 // | MPII_D_CCB
82 // | MPII_D_PPR
83 // | MPII_D_RAID
84 // | MPII_D_EVT
85 // | MPII_D_CFG
86 // | MPII_D_MAP
87 ;
88 #else
89 #define DPRINTF(x...)
90 #define DNPRINTF(n,x...)
91 #endif
92
93 #define MPII_REQUEST_SIZE (512)
94 #define MPII_REQUEST_CREDIT (128)
95
96 struct mpii_dmamem {
97 bus_dmamap_t mdm_map;
98 bus_dma_segment_t mdm_seg;
99 size_t mdm_size;
100 void *mdm_kva;
101 };
102 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
103 #define MPII_DMA_DVA(_mdm) ((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
104 #define MPII_DMA_KVA(_mdm) ((_mdm)->mdm_kva)
105
106 struct mpii_softc;
107
108 struct mpii_rcb {
109 SIMPLEQ_ENTRY(mpii_rcb) rcb_link;
110 void *rcb_reply;
111 u_int32_t rcb_reply_dva;
112 };
113
114 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
115
116 struct mpii_device {
117 int flags;
118 #define MPII_DF_ATTACH (0x0001)
119 #define MPII_DF_DETACH (0x0002)
120 #define MPII_DF_HIDDEN (0x0004)
121 #define MPII_DF_UNUSED (0x0008)
122 #define MPII_DF_VOLUME (0x0010)
123 #define MPII_DF_VOLUME_DISK (0x0020)
124 #define MPII_DF_HOT_SPARE (0x0040)
125 short slot;
126 short percent;
127 u_int16_t dev_handle;
128 u_int16_t enclosure;
129 u_int16_t expander;
130 u_int8_t phy_num;
131 u_int8_t physical_port;
132 };
133
134 struct mpii_ccb {
135 struct mpii_softc *ccb_sc;
136
137 void * ccb_cookie;
138 kmutex_t ccb_mtx;
139 kcondvar_t ccb_cv;
140
141 bus_dmamap_t ccb_dmamap;
142
143 bus_addr_t ccb_offset;
144 void *ccb_cmd;
145 bus_addr_t ccb_cmd_dva;
146 u_int16_t ccb_dev_handle;
147 u_int16_t ccb_smid;
148
149 volatile enum {
150 MPII_CCB_FREE,
151 MPII_CCB_READY,
152 MPII_CCB_QUEUED,
153 MPII_CCB_TIMEOUT
154 } ccb_state;
155
156 void (*ccb_done)(struct mpii_ccb *);
157 struct mpii_rcb *ccb_rcb;
158
159 SIMPLEQ_ENTRY(mpii_ccb) ccb_link;
160 };
161
162 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
163
164 struct mpii_softc {
165 device_t sc_dev;
166
167 pci_chipset_tag_t sc_pc;
168 pcitag_t sc_tag;
169
170 void *sc_ih;
171 pci_intr_handle_t *sc_pihp;
172
173 struct scsipi_adapter sc_adapt;
174 struct scsipi_channel sc_chan;
175 device_t sc_child; /* our scsibus */
176
177 int sc_flags;
178 #define MPII_F_RAID (1<<1)
179 #define MPII_F_SAS3 (1<<2)
180
181 struct mpii_device **sc_devs;
182 kmutex_t sc_devs_mtx;
183
184 bus_space_tag_t sc_iot;
185 bus_space_handle_t sc_ioh;
186 bus_size_t sc_ios;
187 bus_dma_tag_t sc_dmat;
188
189 kmutex_t sc_req_mtx;
190 kmutex_t sc_rep_mtx;
191
192 ushort sc_reply_size;
193 ushort sc_request_size;
194
195 ushort sc_max_cmds;
196 ushort sc_num_reply_frames;
197 u_int sc_reply_free_qdepth;
198 u_int sc_reply_post_qdepth;
199
200 ushort sc_chain_sge;
201 ushort sc_max_sgl;
202
203 u_int8_t sc_ioc_event_replay;
204
205 u_int8_t sc_porttype;
206 u_int8_t sc_max_volumes;
207 u_int16_t sc_max_devices;
208 u_int16_t sc_vd_count;
209 u_int16_t sc_vd_id_low;
210 u_int16_t sc_pd_id_start;
211 int sc_ioc_number;
212 u_int8_t sc_vf_id;
213
214 struct mpii_ccb *sc_ccbs;
215 struct mpii_ccb_list sc_ccb_free;
216 kmutex_t sc_ccb_free_mtx;
217 kcondvar_t sc_ccb_free_cv;
218
219 struct mpii_ccb_list sc_ccb_tmos;
220 kmutex_t sc_ssb_tmomtx;
221 struct workqueue *sc_ssb_tmowk;
222 struct work sc_ssb_tmowork;
223
224 struct mpii_dmamem *sc_requests;
225
226 struct mpii_dmamem *sc_replies;
227 struct mpii_rcb *sc_rcbs;
228
229 struct mpii_dmamem *sc_reply_postq;
230 struct mpii_reply_descr *sc_reply_postq_kva;
231 u_int sc_reply_post_host_index;
232
233 struct mpii_dmamem *sc_reply_freeq;
234 u_int sc_reply_free_host_index;
235 kmutex_t sc_reply_free_mtx;
236
237 struct mpii_rcb_list sc_evt_sas_queue;
238 kmutex_t sc_evt_sas_mtx;
239 struct workqueue *sc_evt_sas_wq;
240 struct work sc_evt_sas_work;
241
242 struct mpii_rcb_list sc_evt_ack_queue;
243 kmutex_t sc_evt_ack_mtx;
244 struct workqueue *sc_evt_ack_wq;
245 struct work sc_evt_ack_work;
246
247 struct sysmon_envsys *sc_sme;
248 envsys_data_t *sc_sensors;
249 };
250
251 static int mpii_match(device_t, cfdata_t, void *);
252 static void mpii_attach(device_t, device_t, void *);
253 static int mpii_detach(device_t, int);
254 static void mpii_childdetached(device_t, device_t);
255 static int mpii_rescan(device_t, const char *, const int *);
256
257 static int mpii_intr(void *);
258
259 CFATTACH_DECL3_NEW(mpii, sizeof(struct mpii_softc),
260 mpii_match, mpii_attach, mpii_detach, NULL, mpii_rescan,
261 mpii_childdetached, DVF_DETACH_SHUTDOWN);
262
263 static void mpii_scsipi_request(struct scsipi_channel *,
264 scsipi_adapter_req_t, void *);
265 static void mpii_scsi_cmd_done(struct mpii_ccb *);
266
267 static struct mpii_dmamem *
268 mpii_dmamem_alloc(struct mpii_softc *, size_t);
269 static void mpii_dmamem_free(struct mpii_softc *,
270 struct mpii_dmamem *);
271 static int mpii_alloc_ccbs(struct mpii_softc *);
272 static struct mpii_ccb *mpii_get_ccb(struct mpii_softc *);
273 static void mpii_put_ccb(struct mpii_softc *, struct mpii_ccb *);
274 static int mpii_alloc_replies(struct mpii_softc *);
275 static int mpii_alloc_queues(struct mpii_softc *);
276 static void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
277 static void mpii_push_replies(struct mpii_softc *);
278
279 static void mpii_scsi_cmd_tmo(void *);
280 static void mpii_scsi_cmd_tmo_handler(struct work *, void *);
281 static void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
282
283 static int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
284 static int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
285 static struct mpii_device *
286 mpii_find_dev(struct mpii_softc *, u_int16_t);
287
288 static void mpii_start(struct mpii_softc *, struct mpii_ccb *);
289 static int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
290 static void mpii_poll_done(struct mpii_ccb *);
291 static struct mpii_rcb *
292 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
293
294 static void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
295 static void mpii_wait_done(struct mpii_ccb *);
296
297 static void mpii_init_queues(struct mpii_softc *);
298
299 static int mpii_load_xs(struct mpii_ccb *);
300 static int mpii_load_xs_sas3(struct mpii_ccb *);
301
302 static u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
303 static void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
304 static int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
305 u_int32_t);
306 static int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
307 u_int32_t);
308
309 static int mpii_init(struct mpii_softc *);
310 static int mpii_reset_soft(struct mpii_softc *);
311 static int mpii_reset_hard(struct mpii_softc *);
312
313 static int mpii_handshake_send(struct mpii_softc *, void *, size_t);
314 static int mpii_handshake_recv_dword(struct mpii_softc *,
315 u_int32_t *);
316 static int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
317
318 static void mpii_empty_done(struct mpii_ccb *);
319
320 static int mpii_iocinit(struct mpii_softc *);
321 static int mpii_iocfacts(struct mpii_softc *);
322 static int mpii_portfacts(struct mpii_softc *);
323 static int mpii_portenable(struct mpii_softc *);
324 static int mpii_cfg_coalescing(struct mpii_softc *);
325 static int mpii_board_info(struct mpii_softc *);
326 static int mpii_target_map(struct mpii_softc *);
327
328 static int mpii_eventnotify(struct mpii_softc *);
329 static void mpii_eventnotify_done(struct mpii_ccb *);
330 static void mpii_eventack(struct work *, void *);
331 static void mpii_eventack_done(struct mpii_ccb *);
332 static void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
333 static void mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
334 static void mpii_event_sas(struct mpii_softc *, struct mpii_rcb *);
335 static void mpii_event_sas_work(struct work *, void *);
336 static void mpii_event_raid(struct mpii_softc *,
337 struct mpii_msg_event_reply *);
338 static void mpii_event_discovery(struct mpii_softc *,
339 struct mpii_msg_event_reply *);
340
341 static void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
342
343 static int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
344 u_int8_t, u_int32_t, int, void *);
345 static int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
346 void *, int, void *, size_t);
347
348 #if 0
349 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
350 #endif
351
352 #if NBIO > 0
353 static int mpii_ioctl(device_t, u_long, void *);
354 static int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
355 static int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
356 static int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
357 static int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
358 int, int *);
359 static int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
360 u_int8_t);
361 static struct mpii_device *
362 mpii_find_vol(struct mpii_softc *, int);
363 #ifndef SMALL_KERNEL
364 static int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
365 static int mpii_create_sensors(struct mpii_softc *);
366 static void mpii_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
367 static int mpii_destroy_sensors(struct mpii_softc *);
368 #endif /* SMALL_KERNEL */
369 #endif /* NBIO > 0 */
370
371 #define DEVNAME(s) (device_xname((s)->sc_dev))
372
373 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
374
375 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL)
376 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v))
377 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS)
378 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v))
379 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
380 == MPII_INTR_STATUS_REPLY)
381
382 #define mpii_write_reply_free(s, v) \
383 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
384 MPII_REPLY_FREE_HOST_INDEX, (v))
385 #define mpii_write_reply_post(s, v) \
386 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
387 MPII_REPLY_POST_HOST_INDEX, (v))
388
389 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \
390 MPII_INTR_STATUS_IOC2SYSDB, 0)
391 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \
392 MPII_INTR_STATUS_SYS2IOCDB, 0)
393
394 static inline void
395 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
396 {
397 sge->sg_addr_lo = htole32(dva);
398 sge->sg_addr_hi = htole32(dva >> 32);
399 }
400
401 #define MPII_PG_EXTENDED (1<<0)
402 #define MPII_PG_POLL (1<<1)
403 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED"
404
405 static const struct mpii_pci_product {
406 pci_vendor_id_t mpii_vendor;
407 pci_product_id_t mpii_product;
408 } mpii_devices[] = {
409 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 },
410 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 },
411 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 },
412 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 },
413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 },
414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 },
415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 },
416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 },
417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 },
418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 },
419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 },
420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 },
421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 },
422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 },
423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 },
424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 },
425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 },
426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 },
427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 },
428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 },
429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 },
430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 },
431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 },
432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 },
433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 },
434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 },
435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 },
436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 },
437 { 0, 0}
438 };
439
440 static int
441 mpii_match(device_t parent, cfdata_t match, void *aux)
442 {
443 struct pci_attach_args *pa = aux;
444 const struct mpii_pci_product *mpii;
445
446 for (mpii = mpii_devices; mpii->mpii_vendor != 0; mpii++) {
447 if (PCI_VENDOR(pa->pa_id) == mpii->mpii_vendor &&
448 PCI_PRODUCT(pa->pa_id) == mpii->mpii_product)
449 return (1);
450 }
451 return (0);
452 }
453
454 static void
455 mpii_attach(device_t parent, device_t self, void *aux)
456 {
457 struct mpii_softc *sc = device_private(self);
458 struct pci_attach_args *pa = aux;
459 pcireg_t memtype;
460 int r;
461 struct mpii_ccb *ccb;
462 struct scsipi_adapter *adapt = &sc->sc_adapt;
463 struct scsipi_channel *chan = &sc->sc_chan;
464 char intrbuf[PCI_INTRSTR_LEN];
465 const char *intrstr;
466
467 pci_aprint_devinfo(pa, NULL);
468
469 sc->sc_pc = pa->pa_pc;
470 sc->sc_tag = pa->pa_tag;
471 sc->sc_dmat = pa->pa_dmat;
472 sc->sc_dev = self;
473
474 mutex_init(&sc->sc_req_mtx, MUTEX_DEFAULT, IPL_BIO);
475 mutex_init(&sc->sc_rep_mtx, MUTEX_DEFAULT, IPL_BIO);
476
477 /* find the appropriate memory base */
478 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
479 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
480 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
481 break;
482 }
483 if (r >= PCI_MAPREG_END) {
484 aprint_error_dev(self,
485 "unable to locate system interface registers\n");
486 return;
487 }
488
489 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
490 NULL, &sc->sc_ios) != 0) {
491 aprint_error_dev(self,
492 "unable to map system interface registers\n");
493 return;
494 }
495
496 /* disable the expansion rom */
497 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM,
498 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_MAPREG_ROM) &
499 ~PCI_MAPREG_ROM_ENABLE);
500
501 /* disable interrupts */
502 mpii_write(sc, MPII_INTR_MASK,
503 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
504 MPII_INTR_MASK_DOORBELL);
505
506 /* hook up the interrupt */
507 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) {
508 aprint_error_dev(self, "unable to map interrupt\n");
509 goto unmap;
510 }
511 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0],
512 intrbuf, sizeof(intrbuf));
513 pci_intr_setattr(pa->pa_pc, &sc->sc_pihp[0], PCI_INTR_MPSAFE, true);
514 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_BIO,
515 mpii_intr, sc, device_xname(self));
516 if (sc->sc_ih == NULL) {
517 aprint_error_dev(self, "couldn't establish interrupt");
518 if (intrstr != NULL)
519 aprint_error(" at %s", intrstr);
520 aprint_error("\n");
521 return;
522 }
523 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
524 aprint_naive("\n");
525
526 if (mpii_iocfacts(sc) != 0) {
527 aprint_error_dev(self, "unable to get iocfacts\n");
528 goto unmap;
529 }
530
531 if (mpii_init(sc) != 0) {
532 aprint_error_dev(self, "unable to initialize ioc\n");
533 goto unmap;
534 }
535
536 if (mpii_alloc_ccbs(sc) != 0) {
537 /* error already printed */
538 goto unmap;
539 }
540
541 if (mpii_alloc_replies(sc) != 0) {
542 aprint_error_dev(self, "unable to allocated reply space\n");
543 goto free_ccbs;
544 }
545
546 if (mpii_alloc_queues(sc) != 0) {
547 aprint_error_dev(self, "unable to allocate reply queues\n");
548 goto free_replies;
549 }
550
551 if (mpii_iocinit(sc) != 0) {
552 aprint_error_dev(self, "unable to send iocinit\n");
553 goto free_queues;
554 }
555
556 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
557 MPII_DOORBELL_STATE_OPER) != 0) {
558 aprint_error_dev(self, "state: 0x%08x\n",
559 mpii_read_db(sc) & MPII_DOORBELL_STATE);
560 aprint_error_dev(self, "operational state timeout\n");
561 goto free_queues;
562 }
563
564 mpii_push_replies(sc);
565 mpii_init_queues(sc);
566
567 if (mpii_board_info(sc) != 0) {
568 aprint_error_dev(self, "unable to get manufacturing page 0\n");
569 goto free_queues;
570 }
571
572 if (mpii_portfacts(sc) != 0) {
573 aprint_error_dev(self, "unable to get portfacts\n");
574 goto free_queues;
575 }
576
577 if (mpii_target_map(sc) != 0) {
578 aprint_error_dev(self, "unable to setup target mappings\n");
579 goto free_queues;
580 }
581
582 if (mpii_cfg_coalescing(sc) != 0) {
583 aprint_error_dev(self, "unable to configure coalescing\n");
584 goto free_queues;
585 }
586
587 /* XXX bail on unsupported porttype? */
588 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
589 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
590 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
591 if (mpii_eventnotify(sc) != 0) {
592 aprint_error_dev(self, "unable to enable events\n");
593 goto free_queues;
594 }
595 }
596
597 mutex_init(&sc->sc_devs_mtx, MUTEX_DEFAULT, IPL_BIO);
598 sc->sc_devs = malloc(sc->sc_max_devices * sizeof(struct mpii_device *),
599 M_DEVBUF, M_WAITOK | M_ZERO);
600
601 if (mpii_portenable(sc) != 0) {
602 aprint_error_dev(self, "unable to enable port\n");
603 goto free_devs;
604 }
605
606 /* we should be good to go now, attach scsibus */
607 memset(adapt, 0, sizeof(*adapt));
608 adapt->adapt_dev = sc->sc_dev;
609 adapt->adapt_nchannels = 1;
610 adapt->adapt_openings = sc->sc_max_cmds - 4;
611 adapt->adapt_max_periph = adapt->adapt_openings;
612 adapt->adapt_request = mpii_scsipi_request;
613 adapt->adapt_minphys = minphys;
614 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
615
616 memset(chan, 0, sizeof(*chan));
617 chan->chan_adapter = adapt;
618 chan->chan_bustype = &scsi_sas_bustype;
619 chan->chan_channel = 0;
620 chan->chan_flags = 0;
621 chan->chan_nluns = 8;
622 chan->chan_ntargets = sc->sc_max_devices;
623 chan->chan_id = -1;
624
625 mpii_rescan(self, "scsi", NULL);
626
627 /* enable interrupts */
628 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
629 | MPII_INTR_MASK_RESET);
630
631 #if NBIO > 0
632 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
633 if (bio_register(sc->sc_dev, mpii_ioctl) != 0)
634 panic("%s: controller registration failed",
635 DEVNAME(sc));
636 if (mpii_create_sensors(sc) != 0)
637 aprint_error_dev(self, "unable to create sensors\n");
638 }
639 #endif
640
641 return;
642
643 free_devs:
644 free(sc->sc_devs, M_DEVBUF);
645 sc->sc_devs = NULL;
646
647 free_queues:
648 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
649 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
650 mpii_dmamem_free(sc, sc->sc_reply_freeq);
651
652 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
653 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
654 mpii_dmamem_free(sc, sc->sc_reply_postq);
655
656 free_replies:
657 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
658 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
659 mpii_dmamem_free(sc, sc->sc_replies);
660
661 free_ccbs:
662 while ((ccb = mpii_get_ccb(sc)) != NULL)
663 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
664 mpii_dmamem_free(sc, sc->sc_requests);
665 free(sc->sc_ccbs, M_DEVBUF);
666
667 unmap:
668 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
669 sc->sc_ios = 0;
670 }
671
672 static int
673 mpii_detach(device_t self, int flags)
674 {
675 struct mpii_softc *sc = device_private(self);
676 int error;
677 struct mpii_ccb *ccb;
678
679 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
680 return error;
681
682 #if NBIO > 0
683 mpii_destroy_sensors(sc);
684 bio_unregister(sc->sc_dev);
685 #endif /* NBIO > 0 */
686
687 if (sc->sc_ih != NULL) {
688 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
689 sc->sc_ih = NULL;
690 }
691 if (sc->sc_ios != 0) {
692 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
693 free(sc->sc_devs, M_DEVBUF);
694 sc->sc_devs = NULL;
695
696 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
697 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
698 mpii_dmamem_free(sc, sc->sc_reply_freeq);
699
700 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
701 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
702 mpii_dmamem_free(sc, sc->sc_reply_postq);
703
704 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
705 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
706 mpii_dmamem_free(sc, sc->sc_replies);
707
708 while ((ccb = mpii_get_ccb(sc)) != NULL)
709 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
710 mpii_dmamem_free(sc, sc->sc_requests);
711 free(sc->sc_ccbs, M_DEVBUF);
712
713 sc->sc_ios = 0;
714 }
715
716 return (0);
717 }
718
719 static int
720 mpii_rescan(device_t self, const char *ifattr, const int *locators)
721 {
722 struct mpii_softc *sc = device_private(self);
723
724 if (sc->sc_child != NULL)
725 return 0;
726
727 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
728 scsiprint, NULL);
729
730 return 0;
731 }
732
733 static void
734 mpii_childdetached(device_t self, device_t child)
735 {
736 struct mpii_softc *sc = device_private(self);
737
738 KASSERT(self == sc->sc_dev);
739 KASSERT(child == sc->sc_child);
740
741 if (child == sc->sc_child)
742 sc->sc_child = NULL;
743 }
744
745
746 static int
747 mpii_intr(void *arg)
748 {
749 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts);
750 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
751 struct mpii_softc *sc = arg;
752 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
753 struct mpii_ccb *ccb;
754 struct mpii_rcb *rcb;
755 int smid;
756 u_int idx;
757 int rv = 0;
758
759 mutex_enter(&sc->sc_rep_mtx);
760 bus_dmamap_sync(sc->sc_dmat,
761 MPII_DMA_MAP(sc->sc_reply_postq),
762 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
763 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
764
765 idx = sc->sc_reply_post_host_index;
766 for (;;) {
767 rdp = &postq[idx];
768 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
769 MPII_REPLY_DESCR_UNUSED)
770 break;
771 if (rdp->data == 0xffffffff) {
772 /*
773 * ioc is still writing to the reply post queue
774 * race condition - bail!
775 */
776 break;
777 }
778
779 smid = le16toh(rdp->smid);
780 rcb = mpii_reply(sc, rdp);
781
782 if (smid) {
783 ccb = &sc->sc_ccbs[smid - 1];
784 ccb->ccb_state = MPII_CCB_READY;
785 ccb->ccb_rcb = rcb;
786 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
787 } else
788 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
789
790 if (++idx >= sc->sc_reply_post_qdepth)
791 idx = 0;
792
793 rv = 1;
794 }
795
796 bus_dmamap_sync(sc->sc_dmat,
797 MPII_DMA_MAP(sc->sc_reply_postq),
798 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
800
801 if (rv)
802 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
803
804 mutex_exit(&sc->sc_rep_mtx);
805
806 if (rv == 0)
807 return (0);
808
809 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
810 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
811 ccb->ccb_done(ccb);
812 }
813 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
814 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
815 mpii_event_process(sc, rcb);
816 }
817
818 return (1);
819 }
820
821 static int
822 mpii_load_xs_sas3(struct mpii_ccb *ccb)
823 {
824 struct mpii_softc *sc = ccb->ccb_sc;
825 struct scsipi_xfer *xs = ccb->ccb_cookie;
826 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
827 struct mpii_ieee_sge *csge, *nsge, *sge;
828 bus_dmamap_t dmap = ccb->ccb_dmamap;
829 int i, error;
830
831 /* Request frame structure is described in the mpii_iocfacts */
832 nsge = (struct mpii_ieee_sge *)(io + 1);
833 csge = nsge + sc->sc_chain_sge;
834
835 /* zero length transfer still requires an SGE */
836 if (xs->datalen == 0) {
837 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
838 return (0);
839 }
840
841 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
842 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
843 if (error) {
844 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
845 return (1);
846 }
847
848 sge = nsge;
849 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
850 if (nsge == csge) {
851 nsge++;
852 /* offset to the chain sge from the beginning */
853 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
854 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
855 MPII_IEEE_SGE_ADDR_SYSTEM;
856 /* address of the next sge */
857 csge->sg_addr = htole64(ccb->ccb_cmd_dva +
858 ((uintptr_t)nsge - (uintptr_t)io));
859 csge->sg_len = htole32((dmap->dm_nsegs - i) *
860 sizeof(*sge));
861 }
862
863 sge = nsge;
864 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
865 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
866 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
867 }
868
869 /* terminate list */
870 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
871
872 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
873 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
874 BUS_DMASYNC_PREWRITE);
875
876 return (0);
877 }
878
879 static int
880 mpii_load_xs(struct mpii_ccb *ccb)
881 {
882 struct mpii_softc *sc = ccb->ccb_sc;
883 struct scsipi_xfer *xs = ccb->ccb_cookie;
884 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
885 struct mpii_sge *csge, *nsge, *sge;
886 bus_dmamap_t dmap = ccb->ccb_dmamap;
887 u_int32_t flags;
888 u_int16_t len;
889 int i, error;
890
891 /* Request frame structure is described in the mpii_iocfacts */
892 nsge = (struct mpii_sge *)(io + 1);
893 csge = nsge + sc->sc_chain_sge;
894
895 /* zero length transfer still requires an SGE */
896 if (xs->datalen == 0) {
897 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
898 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
899 return (0);
900 }
901
902 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
903 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
904 if (error) {
905 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
906 return (1);
907 }
908
909 /* safe default starting flags */
910 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
911 if (xs->xs_control & XS_CTL_DATA_OUT)
912 flags |= MPII_SGE_FL_DIR_OUT;
913
914 sge = nsge;
915 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
916 if (nsge == csge) {
917 nsge++;
918 /* offset to the chain sge from the beginning */
919 io->chain_offset = ((uintptr_t)csge - (uintptr_t)io) / 4;
920 /* length of the sgl segment we're pointing to */
921 len = (dmap->dm_nsegs - i) * sizeof(*sge);
922 csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
923 MPII_SGE_FL_SIZE_64 | len);
924 /* address of the next sge */
925 mpii_dvatosge(csge, ccb->ccb_cmd_dva +
926 ((uintptr_t)nsge - (uintptr_t)io));
927 }
928
929 sge = nsge;
930 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
931 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
932 }
933
934 /* terminate list */
935 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
936 MPII_SGE_FL_EOL);
937
938 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
939 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
940 BUS_DMASYNC_PREWRITE);
941
942 return (0);
943 }
944
945 static u_int32_t
946 mpii_read(struct mpii_softc *sc, bus_size_t r)
947 {
948 u_int32_t rv;
949
950 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
951 BUS_SPACE_BARRIER_READ);
952 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
953
954 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
955
956 return (rv);
957 }
958
959 static void
960 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
961 {
962 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
963
964 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
965 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
966 BUS_SPACE_BARRIER_WRITE);
967 }
968
969
970 static int
971 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
972 u_int32_t target)
973 {
974 int i;
975
976 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
977 mask, target);
978
979 for (i = 0; i < 15000; i++) {
980 if ((mpii_read(sc, r) & mask) == target)
981 return (0);
982 delay(1000);
983 }
984
985 return (1);
986 }
987
988 static int
989 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
990 u_int32_t target)
991 {
992 int i;
993
994 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
995 mask, target);
996
997 for (i = 0; i < 15000; i++) {
998 if ((mpii_read(sc, r) & mask) != target)
999 return (0);
1000 delay(1000);
1001 }
1002
1003 return (1);
1004 }
1005
1006 static int
1007 mpii_init(struct mpii_softc *sc)
1008 {
1009 u_int32_t db;
1010 int i;
1011
1012 /* spin until the ioc leaves the reset state */
1013 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1014 MPII_DOORBELL_STATE_RESET) != 0) {
1015 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1016 "reset state\n", DEVNAME(sc));
1017 return (1);
1018 }
1019
1020 /* check current ownership */
1021 db = mpii_read_db(sc);
1022 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1023 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1024 DEVNAME(sc));
1025 return (0);
1026 }
1027
1028 for (i = 0; i < 5; i++) {
1029 switch (db & MPII_DOORBELL_STATE) {
1030 case MPII_DOORBELL_STATE_READY:
1031 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1032 DEVNAME(sc));
1033 return (0);
1034
1035 case MPII_DOORBELL_STATE_OPER:
1036 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1037 DEVNAME(sc));
1038 if (sc->sc_ioc_event_replay)
1039 mpii_reset_soft(sc);
1040 else
1041 mpii_reset_hard(sc);
1042 break;
1043
1044 case MPII_DOORBELL_STATE_FAULT:
1045 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1046 "reset hard\n" , DEVNAME(sc));
1047 mpii_reset_hard(sc);
1048 break;
1049
1050 case MPII_DOORBELL_STATE_RESET:
1051 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1052 "out of reset\n", DEVNAME(sc));
1053 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1054 MPII_DOORBELL_STATE_RESET) != 0)
1055 return (1);
1056 break;
1057 }
1058 db = mpii_read_db(sc);
1059 }
1060
1061 return (1);
1062 }
1063
1064 static int
1065 mpii_reset_soft(struct mpii_softc *sc)
1066 {
1067 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1068
1069 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1070 return (1);
1071 }
1072
1073 mpii_write_db(sc,
1074 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1075
1076 /* XXX LSI waits 15 sec */
1077 if (mpii_wait_db_ack(sc) != 0)
1078 return (1);
1079
1080 /* XXX LSI waits 15 sec */
1081 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1082 MPII_DOORBELL_STATE_READY) != 0)
1083 return (1);
1084
1085 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1086
1087 return (0);
1088 }
1089
1090 static int
1091 mpii_reset_hard(struct mpii_softc *sc)
1092 {
1093 u_int16_t i;
1094
1095 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1096
1097 mpii_write_intr(sc, 0);
1098
1099 /* enable diagnostic register */
1100 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1101 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1102 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1103 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1104 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1105 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1106 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1107
1108 delay(100);
1109
1110 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1111 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1112 "diagnostic read/write\n", DEVNAME(sc));
1113 return(1);
1114 }
1115
1116 /* reset ioc */
1117 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1118
1119 /* 240 milliseconds */
1120 delay(240000);
1121
1122
1123 /* XXX this whole function should be more robust */
1124
1125 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1126 for (i = 0; i < 30000; i++) {
1127 if ((mpii_read(sc, MPII_HOSTDIAG) &
1128 MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1129 break;
1130 delay(10000);
1131 }
1132
1133 /* disable diagnostic register */
1134 mpii_write(sc, MPII_WRITESEQ, 0xff);
1135
1136 /* XXX what else? */
1137
1138 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1139
1140 return(0);
1141 }
1142
1143 static int
1144 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1145 {
1146 u_int32_t *query = buf;
1147 int i;
1148
1149 /* make sure the doorbell is not in use. */
1150 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1151 return (1);
1152
1153 /* clear pending doorbell interrupts */
1154 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1155 mpii_write_intr(sc, 0);
1156
1157 /*
1158 * first write the doorbell with the handshake function and the
1159 * dword count.
1160 */
1161 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1162 MPII_DOORBELL_DWORDS(dwords));
1163
1164 /*
1165 * the doorbell used bit will be set because a doorbell function has
1166 * started. wait for the interrupt and then ack it.
1167 */
1168 if (mpii_wait_db_int(sc) != 0)
1169 return (1);
1170 mpii_write_intr(sc, 0);
1171
1172 /* poll for the acknowledgement. */
1173 if (mpii_wait_db_ack(sc) != 0)
1174 return (1);
1175
1176 /* write the query through the doorbell. */
1177 for (i = 0; i < dwords; i++) {
1178 mpii_write_db(sc, htole32(query[i]));
1179 if (mpii_wait_db_ack(sc) != 0)
1180 return (1);
1181 }
1182
1183 return (0);
1184 }
1185
1186 static int
1187 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1188 {
1189 u_int16_t *words = (u_int16_t *)dword;
1190 int i;
1191
1192 for (i = 0; i < 2; i++) {
1193 if (mpii_wait_db_int(sc) != 0)
1194 return (1);
1195 words[i] = le16toh(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1196 mpii_write_intr(sc, 0);
1197 }
1198
1199 return (0);
1200 }
1201
1202 static int
1203 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1204 {
1205 struct mpii_msg_reply *reply = buf;
1206 u_int32_t *dbuf = buf, dummy;
1207 int i;
1208
1209 /* get the first dword so we can read the length out of the header. */
1210 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1211 return (1);
1212
1213 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1214 DEVNAME(sc), dwords, reply->msg_length);
1215
1216 /*
1217 * the total length, in dwords, is in the message length field of the
1218 * reply header.
1219 */
1220 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1221 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1222 return (1);
1223 }
1224
1225 /* if there's extra stuff to come off the ioc, discard it */
1226 while (i++ < reply->msg_length) {
1227 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1228 return (1);
1229 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1230 "0x%08x\n", DEVNAME(sc), dummy);
1231 }
1232
1233 /* wait for the doorbell used bit to be reset and clear the intr */
1234 if (mpii_wait_db_int(sc) != 0)
1235 return (1);
1236
1237 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1238 return (1);
1239
1240 mpii_write_intr(sc, 0);
1241
1242 return (0);
1243 }
1244
1245 static void
1246 mpii_empty_done(struct mpii_ccb *ccb)
1247 {
1248 /* nothing to do */
1249 }
1250
1251 static int
1252 mpii_iocfacts(struct mpii_softc *sc)
1253 {
1254 struct mpii_msg_iocfacts_request ifq;
1255 struct mpii_msg_iocfacts_reply ifp;
1256 int irs;
1257 int sge_size;
1258 u_int qdepth;
1259
1260 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1261
1262 memset(&ifq, 0, sizeof(ifq));
1263 memset(&ifp, 0, sizeof(ifp));
1264
1265 ifq.function = MPII_FUNCTION_IOC_FACTS;
1266
1267 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1268 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1269 DEVNAME(sc));
1270 return (1);
1271 }
1272
1273 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1274 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1275 DEVNAME(sc));
1276 return (1);
1277 }
1278
1279 sc->sc_ioc_number = ifp.ioc_number;
1280 sc->sc_vf_id = ifp.vf_id;
1281
1282 sc->sc_max_volumes = ifp.max_volumes;
1283 sc->sc_max_devices = ifp.max_volumes + le16toh(ifp.max_targets);
1284
1285 if (ISSET(le32toh(ifp.ioc_capabilities),
1286 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1287 SET(sc->sc_flags, MPII_F_RAID);
1288 if (ISSET(le32toh(ifp.ioc_capabilities),
1289 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1290 sc->sc_ioc_event_replay = 1;
1291
1292 sc->sc_max_cmds = MIN(le16toh(ifp.request_credit),
1293 MPII_REQUEST_CREDIT);
1294
1295 /* SAS3 and 3.5 controllers have different sgl layouts */
1296 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1297 || (ifp.msg_version_min == 6)))
1298 SET(sc->sc_flags, MPII_F_SAS3);
1299
1300 /*
1301 * The host driver must ensure that there is at least one
1302 * unused entry in the Reply Free Queue. One way to ensure
1303 * that this requirement is met is to never allocate a number
1304 * of reply frames that is a multiple of 16.
1305 */
1306 sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1307 if (!(sc->sc_num_reply_frames % 16))
1308 sc->sc_num_reply_frames--;
1309
1310 /* must be multiple of 16 */
1311 sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1312 sc->sc_num_reply_frames;
1313 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1314
1315 qdepth = le16toh(ifp.max_reply_descriptor_post_queue_depth);
1316 if (sc->sc_reply_post_qdepth > qdepth) {
1317 sc->sc_reply_post_qdepth = qdepth;
1318 if (sc->sc_reply_post_qdepth < 16) {
1319 printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1320 return (1);
1321 }
1322 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1323 sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1324 }
1325
1326 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1327 16 - (sc->sc_num_reply_frames % 16);
1328
1329 /*
1330 * Our request frame for an I/O operation looks like this:
1331 *
1332 * +-------------------+ -.
1333 * | mpii_msg_scsi_io | |
1334 * +-------------------| |
1335 * | mpii_sge | |
1336 * + - - - - - - - - - + |
1337 * | ... | > ioc_request_frame_size
1338 * + - - - - - - - - - + |
1339 * | mpii_sge (tail) | |
1340 * + - - - - - - - - - + |
1341 * | mpii_sge (csge) | | --.
1342 * + - - - - - - - - - + -' | chain sge points to the next sge
1343 * | mpii_sge |<-----'
1344 * + - - - - - - - - - +
1345 * | ... |
1346 * + - - - - - - - - - +
1347 * | mpii_sge (tail) |
1348 * +-------------------+
1349 * | |
1350 * ~~~~~~~~~~~~~~~~~~~~~
1351 * | |
1352 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1353 * | scsi_sense_data |
1354 * +-------------------+
1355 */
1356
1357 /* both sizes are in 32-bit words */
1358 sc->sc_reply_size = ifp.reply_frame_size * 4;
1359 irs = le16toh(ifp.ioc_request_frame_size) * 4;
1360 sc->sc_request_size = MPII_REQUEST_SIZE;
1361 /* make sure we have enough space for scsi sense data */
1362 if (irs > sc->sc_request_size) {
1363 sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1364 sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1365 }
1366
1367 if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1368 sge_size = sizeof(struct mpii_ieee_sge);
1369 } else {
1370 sge_size = sizeof(struct mpii_sge);
1371 }
1372
1373 /* offset to the chain sge */
1374 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1375 sge_size - 1;
1376
1377 /*
1378 * A number of simple scatter-gather elements we can fit into the
1379 * request buffer after the I/O command minus the chain element.
1380 */
1381 sc->sc_max_sgl = (sc->sc_request_size -
1382 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1383 sge_size - 1;
1384
1385 return (0);
1386 }
1387
1388 static int
1389 mpii_iocinit(struct mpii_softc *sc)
1390 {
1391 struct mpii_msg_iocinit_request iiq;
1392 struct mpii_msg_iocinit_reply iip;
1393
1394 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1395
1396 memset(&iiq, 0, sizeof(iiq));
1397 memset(&iip, 0, sizeof(iip));
1398
1399 iiq.function = MPII_FUNCTION_IOC_INIT;
1400 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1401
1402 /* XXX JPG do something about vf_id */
1403 iiq.vf_id = 0;
1404
1405 iiq.msg_version_maj = 0x02;
1406 iiq.msg_version_min = 0x00;
1407
1408 /* XXX JPG ensure compliance with some level and hard-code? */
1409 iiq.hdr_version_unit = 0x00;
1410 iiq.hdr_version_dev = 0x00;
1411
1412 iiq.system_request_frame_size = htole16(sc->sc_request_size / 4);
1413
1414 iiq.reply_descriptor_post_queue_depth =
1415 htole16(sc->sc_reply_post_qdepth);
1416
1417 iiq.reply_free_queue_depth = htole16(sc->sc_reply_free_qdepth);
1418
1419 iiq.sense_buffer_address_high =
1420 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1421
1422 iiq.system_reply_address_high =
1423 htole32(MPII_DMA_DVA(sc->sc_replies) >> 32);
1424
1425 iiq.system_request_frame_base_address_lo =
1426 htole32(MPII_DMA_DVA(sc->sc_requests));
1427 iiq.system_request_frame_base_address_hi =
1428 htole32(MPII_DMA_DVA(sc->sc_requests) >> 32);
1429
1430 iiq.reply_descriptor_post_queue_address_lo =
1431 htole32(MPII_DMA_DVA(sc->sc_reply_postq));
1432 iiq.reply_descriptor_post_queue_address_hi =
1433 htole32(MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1434
1435 iiq.reply_free_queue_address_lo =
1436 htole32(MPII_DMA_DVA(sc->sc_reply_freeq));
1437 iiq.reply_free_queue_address_hi =
1438 htole32(MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1439
1440 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1441 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1442 DEVNAME(sc));
1443 return (1);
1444 }
1445
1446 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1447 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1448 DEVNAME(sc));
1449 return (1);
1450 }
1451
1452 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1453 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1454 iip.msg_length, iip.whoinit);
1455 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1456 iip.msg_flags);
1457 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1458 iip.vf_id, iip.vp_id);
1459 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1460 le16toh(iip.ioc_status));
1461 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1462 le32toh(iip.ioc_loginfo));
1463
1464 if (le16toh(iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1465 le32toh(iip.ioc_loginfo))
1466 return (1);
1467
1468 return (0);
1469 }
1470
1471 static void
1472 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1473 {
1474 u_int32_t *rfp;
1475 u_int idx;
1476
1477 if (rcb == NULL)
1478 return;
1479
1480 mutex_enter(&sc->sc_reply_free_mtx);
1481 idx = sc->sc_reply_free_host_index;
1482
1483 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1484 rfp[idx] = htole32(rcb->rcb_reply_dva);
1485
1486 if (++idx >= sc->sc_reply_free_qdepth)
1487 idx = 0;
1488
1489 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1490 mutex_exit(&sc->sc_reply_free_mtx);
1491 }
1492
1493 static int
1494 mpii_portfacts(struct mpii_softc *sc)
1495 {
1496 struct mpii_msg_portfacts_request *pfq;
1497 struct mpii_msg_portfacts_reply *pfp;
1498 struct mpii_ccb *ccb;
1499 int rv = 1;
1500
1501 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1502
1503 ccb = mpii_get_ccb(sc);
1504 if (ccb == NULL) {
1505 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1506 DEVNAME(sc));
1507 return (rv);
1508 }
1509
1510 ccb->ccb_done = mpii_empty_done;
1511 pfq = ccb->ccb_cmd;
1512
1513 memset(pfq, 0, sizeof(*pfq));
1514
1515 pfq->function = MPII_FUNCTION_PORT_FACTS;
1516 pfq->chain_offset = 0;
1517 pfq->msg_flags = 0;
1518 pfq->port_number = 0;
1519 pfq->vp_id = 0;
1520 pfq->vf_id = 0;
1521
1522 if (mpii_poll(sc, ccb) != 0) {
1523 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1524 DEVNAME(sc));
1525 goto err;
1526 }
1527
1528 if (ccb->ccb_rcb == NULL) {
1529 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1530 DEVNAME(sc));
1531 goto err;
1532 }
1533
1534 pfp = ccb->ccb_rcb->rcb_reply;
1535 sc->sc_porttype = pfp->port_type;
1536
1537 mpii_push_reply(sc, ccb->ccb_rcb);
1538 rv = 0;
1539 err:
1540 mpii_put_ccb(sc, ccb);
1541
1542 return (rv);
1543 }
1544
1545 static void
1546 mpii_eventack(struct work *wk, void * cookie)
1547 {
1548 struct mpii_softc *sc = cookie;
1549 struct mpii_ccb *ccb;
1550 struct mpii_rcb *rcb, *next;
1551 struct mpii_msg_event_reply *enp;
1552 struct mpii_msg_eventack_request *eaq;
1553
1554 mutex_enter(&sc->sc_evt_ack_mtx);
1555 next = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1556 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1557 mutex_exit(&sc->sc_evt_ack_mtx);
1558
1559 while (next != NULL) {
1560 rcb = next;
1561 next = SIMPLEQ_NEXT(rcb, rcb_link);
1562
1563 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1564
1565 ccb = mpii_get_ccb(sc);
1566 ccb->ccb_done = mpii_eventack_done;
1567 eaq = ccb->ccb_cmd;
1568
1569 eaq->function = MPII_FUNCTION_EVENT_ACK;
1570
1571 eaq->event = enp->event;
1572 eaq->event_context = enp->event_context;
1573
1574 mpii_push_reply(sc, rcb);
1575
1576 mpii_start(sc, ccb);
1577 }
1578 }
1579
1580 static void
1581 mpii_eventack_done(struct mpii_ccb *ccb)
1582 {
1583 struct mpii_softc *sc = ccb->ccb_sc;
1584
1585 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1586
1587 mpii_push_reply(sc, ccb->ccb_rcb);
1588 mpii_put_ccb(sc, ccb);
1589 }
1590
1591 static int
1592 mpii_portenable(struct mpii_softc *sc)
1593 {
1594 struct mpii_msg_portenable_request *peq;
1595 struct mpii_ccb *ccb;
1596
1597 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1598
1599 ccb = mpii_get_ccb(sc);
1600 if (ccb == NULL) {
1601 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1602 DEVNAME(sc));
1603 return (1);
1604 }
1605
1606 ccb->ccb_done = mpii_empty_done;
1607 peq = ccb->ccb_cmd;
1608
1609 peq->function = MPII_FUNCTION_PORT_ENABLE;
1610 peq->vf_id = sc->sc_vf_id;
1611
1612 if (mpii_poll(sc, ccb) != 0) {
1613 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1614 DEVNAME(sc));
1615 return (1);
1616 }
1617
1618 if (ccb->ccb_rcb == NULL) {
1619 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1620 DEVNAME(sc));
1621 return (1);
1622 }
1623
1624 mpii_push_reply(sc, ccb->ccb_rcb);
1625 mpii_put_ccb(sc, ccb);
1626
1627 return (0);
1628 }
1629
1630 static int
1631 mpii_cfg_coalescing(struct mpii_softc *sc)
1632 {
1633 struct mpii_cfg_hdr hdr;
1634 struct mpii_cfg_ioc_pg1 ipg;
1635
1636 hdr.page_version = 0;
1637 hdr.page_length = sizeof(ipg) / 4;
1638 hdr.page_number = 1;
1639 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1640 memset(&ipg, 0, sizeof(ipg));
1641 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1642 sizeof(ipg)) != 0) {
1643 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1644 "page 1\n", DEVNAME(sc));
1645 return (1);
1646 }
1647
1648 if (!ISSET(le32toh(ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1649 return (0);
1650
1651 /* Disable coalescing */
1652 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1653 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1654 sizeof(ipg)) != 0) {
1655 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1656 DEVNAME(sc));
1657 return (1);
1658 }
1659
1660 return (0);
1661 }
1662
1663 #define MPII_EVENT_MASKALL(enq) do { \
1664 enq->event_masks[0] = 0xffffffff; \
1665 enq->event_masks[1] = 0xffffffff; \
1666 enq->event_masks[2] = 0xffffffff; \
1667 enq->event_masks[3] = 0xffffffff; \
1668 } while (0)
1669
1670 #define MPII_EVENT_UNMASK(enq, evt) do { \
1671 enq->event_masks[evt / 32] &= \
1672 htole32(~(1 << (evt % 32))); \
1673 } while (0)
1674
1675 static int
1676 mpii_eventnotify(struct mpii_softc *sc)
1677 {
1678 struct mpii_msg_event_request *enq;
1679 struct mpii_ccb *ccb;
1680 char wkname[15];
1681
1682 ccb = mpii_get_ccb(sc);
1683 if (ccb == NULL) {
1684 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1685 DEVNAME(sc));
1686 return (1);
1687 }
1688
1689 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1690 mutex_init(&sc->sc_evt_sas_mtx, MUTEX_DEFAULT, IPL_BIO);
1691 snprintf(wkname, sizeof(wkname), "%ssas", DEVNAME(sc));
1692 if (workqueue_create(&sc->sc_evt_sas_wq, wkname,
1693 mpii_event_sas_work, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1694 mpii_put_ccb(sc, ccb);
1695 aprint_error_dev(sc->sc_dev,
1696 "can't create %s workqueue\n", wkname);
1697 return 1;
1698 }
1699
1700 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1701 mutex_init(&sc->sc_evt_ack_mtx, MUTEX_DEFAULT, IPL_BIO);
1702 snprintf(wkname, sizeof(wkname), "%sevt", DEVNAME(sc));
1703 if (workqueue_create(&sc->sc_evt_ack_wq, wkname,
1704 mpii_eventack, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
1705 mpii_put_ccb(sc, ccb);
1706 aprint_error_dev(sc->sc_dev,
1707 "can't create %s workqueue\n", wkname);
1708 return 1;
1709 }
1710
1711 ccb->ccb_done = mpii_eventnotify_done;
1712 enq = ccb->ccb_cmd;
1713
1714 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1715
1716 /*
1717 * Enable reporting of the following events:
1718 *
1719 * MPII_EVENT_SAS_DISCOVERY
1720 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1721 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1722 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1723 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1724 * MPII_EVENT_IR_VOLUME
1725 * MPII_EVENT_IR_PHYSICAL_DISK
1726 * MPII_EVENT_IR_OPERATION_STATUS
1727 */
1728
1729 MPII_EVENT_MASKALL(enq);
1730 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1731 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1732 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1733 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1734 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1735 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1736 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1737 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1738
1739 mpii_start(sc, ccb);
1740
1741 return (0);
1742 }
1743
1744 static void
1745 mpii_eventnotify_done(struct mpii_ccb *ccb)
1746 {
1747 struct mpii_softc *sc = ccb->ccb_sc;
1748 struct mpii_rcb *rcb = ccb->ccb_rcb;
1749
1750 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1751
1752 mpii_put_ccb(sc, ccb);
1753 mpii_event_process(sc, rcb);
1754 }
1755
1756 static void
1757 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1758 {
1759 struct mpii_evt_ir_cfg_change_list *ccl;
1760 struct mpii_evt_ir_cfg_element *ce;
1761 struct mpii_device *dev;
1762 u_int16_t type;
1763 int i;
1764
1765 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1766 if (ccl->num_elements == 0)
1767 return;
1768
1769 if (ISSET(le32toh(ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1770 /* bail on foreign configurations */
1771 return;
1772 }
1773
1774 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1775
1776 for (i = 0; i < ccl->num_elements; i++, ce++) {
1777 type = (le16toh(ce->element_flags) &
1778 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1779
1780 switch (type) {
1781 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1782 switch (ce->reason_code) {
1783 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1784 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1785 dev = malloc(sizeof(*dev), M_DEVBUF,
1786 M_WAITOK | M_ZERO);
1787 mutex_enter(&sc->sc_devs_mtx);
1788 if (mpii_find_dev(sc,
1789 le16toh(ce->vol_dev_handle))) {
1790 mutex_exit(&sc->sc_devs_mtx);
1791 free(dev, M_DEVBUF);
1792 printf("%s: device %#x is already "
1793 "configured\n", DEVNAME(sc),
1794 le16toh(ce->vol_dev_handle));
1795 break;
1796 }
1797 SET(dev->flags, MPII_DF_VOLUME);
1798 dev->slot = sc->sc_vd_id_low;
1799 dev->dev_handle = le16toh(ce->vol_dev_handle);
1800 if (mpii_insert_dev(sc, dev)) {
1801 mutex_exit(&sc->sc_devs_mtx);
1802 free(dev, M_DEVBUF);
1803 break;
1804 }
1805 sc->sc_vd_count++;
1806 mutex_exit(&sc->sc_devs_mtx);
1807 break;
1808 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1809 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1810 mutex_enter(&sc->sc_devs_mtx);
1811 if (!(dev = mpii_find_dev(sc,
1812 le16toh(ce->vol_dev_handle)))) {
1813 mutex_exit(&sc->sc_devs_mtx);
1814 break;
1815 }
1816 mpii_remove_dev(sc, dev);
1817 sc->sc_vd_count--;
1818 mutex_exit(&sc->sc_devs_mtx);
1819 break;
1820 }
1821 break;
1822 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1823 if (ce->reason_code ==
1824 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1825 ce->reason_code ==
1826 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1827 /* there should be an underlying sas drive */
1828 mutex_enter(&sc->sc_devs_mtx);
1829 if (!(dev = mpii_find_dev(sc,
1830 le16toh(ce->phys_disk_dev_handle)))) {
1831 mutex_exit(&sc->sc_devs_mtx);
1832 break;
1833 }
1834 /* promoted from a hot spare? */
1835 CLR(dev->flags, MPII_DF_HOT_SPARE);
1836 SET(dev->flags, MPII_DF_VOLUME_DISK |
1837 MPII_DF_HIDDEN);
1838 mutex_exit(&sc->sc_devs_mtx);
1839 }
1840 break;
1841 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1842 if (ce->reason_code ==
1843 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1844 /* there should be an underlying sas drive */
1845 mutex_enter(&sc->sc_devs_mtx);
1846 if (!(dev = mpii_find_dev(sc,
1847 le16toh(ce->phys_disk_dev_handle)))) {
1848 mutex_exit(&sc->sc_devs_mtx);
1849 break;
1850 }
1851 SET(dev->flags, MPII_DF_HOT_SPARE |
1852 MPII_DF_HIDDEN);
1853 mutex_exit(&sc->sc_devs_mtx);
1854 }
1855 break;
1856 }
1857 }
1858 }
1859
1860 static void
1861 mpii_event_sas(struct mpii_softc *sc, struct mpii_rcb *rcb)
1862 {
1863 struct mpii_msg_event_reply *enp;
1864 struct mpii_evt_sas_tcl *tcl;
1865 struct mpii_evt_phy_entry *pe;
1866 struct mpii_device *dev;
1867 int i;
1868 u_int16_t handle;
1869 int need_queue = 0;
1870
1871 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1872 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas 0x%x\n",
1873 DEVNAME(sc), le16toh(enp->event));
1874 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1875
1876 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1877 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1878
1879 for (i = 0; i < tcl->num_entries; i++, pe++) {
1880 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1881 DEVNAME(sc), i, pe->phy_status,
1882 le16toh(pe->dev_handle),
1883 sc->sc_pd_id_start + tcl->start_phy_num + i,
1884 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1885
1886 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1887 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1888 handle = le16toh(pe->dev_handle);
1889 DNPRINTF(MPII_D_EVT, "%s: sas add handle %d\n",
1890 DEVNAME(sc), handle);
1891 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1892 mutex_enter(&sc->sc_devs_mtx);
1893 if (mpii_find_dev(sc, handle)) {
1894 mutex_exit(&sc->sc_devs_mtx);
1895 free(dev, M_DEVBUF);
1896 printf("%s: device %#x is already "
1897 "configured\n", DEVNAME(sc), handle);
1898 break;
1899 }
1900
1901 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1902 dev->dev_handle = handle;
1903 dev->phy_num = tcl->start_phy_num + i;
1904 if (tcl->enclosure_handle)
1905 dev->physical_port = tcl->physical_port;
1906 dev->enclosure = le16toh(tcl->enclosure_handle);
1907 dev->expander = le16toh(tcl->expander_handle);
1908
1909 if (mpii_insert_dev(sc, dev)) {
1910 mutex_exit(&sc->sc_devs_mtx);
1911 free(dev, M_DEVBUF);
1912 break;
1913 }
1914 printf("%s: physical device inserted in slot %d\n",
1915 DEVNAME(sc), dev->slot);
1916 mutex_exit(&sc->sc_devs_mtx);
1917 break;
1918
1919 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1920 /* defer to workqueue thread */
1921 need_queue++;
1922 break;
1923 }
1924 }
1925
1926 if (need_queue) {
1927 bool start_wk;
1928 mutex_enter(&sc->sc_evt_sas_mtx);
1929 start_wk = (SIMPLEQ_FIRST(&sc->sc_evt_sas_queue) == 0);
1930 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1931 if (start_wk) {
1932 workqueue_enqueue(sc->sc_evt_sas_wq,
1933 &sc->sc_evt_sas_work, NULL);
1934 }
1935 mutex_exit(&sc->sc_evt_sas_mtx);
1936 } else
1937 mpii_event_done(sc, rcb);
1938 }
1939
1940 static void
1941 mpii_event_sas_work(struct work *wq, void *xsc)
1942 {
1943 struct mpii_softc *sc = xsc;
1944 struct mpii_rcb *rcb, *next;
1945 struct mpii_msg_event_reply *enp;
1946 struct mpii_evt_sas_tcl *tcl;
1947 struct mpii_evt_phy_entry *pe;
1948 struct mpii_device *dev;
1949 int i;
1950
1951 mutex_enter(&sc->sc_evt_sas_mtx);
1952 next = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1953 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1954 mutex_exit(&sc->sc_evt_sas_mtx);
1955
1956 while (next != NULL) {
1957 rcb = next;
1958 next = SIMPLEQ_NEXT(rcb, rcb_link);
1959
1960 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1961 DNPRINTF(MPII_D_EVT, "%s: mpii_event_sas_work 0x%x\n",
1962 DEVNAME(sc), le16toh(enp->event));
1963 KASSERT(le16toh(enp->event) == MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1964 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1965 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1966
1967 for (i = 0; i < tcl->num_entries; i++, pe++) {
1968 DNPRINTF(MPII_D_EVT, "%s: sas change %d stat %d h %d slot %d phy %d enc %d expand %d\n",
1969 DEVNAME(sc), i, pe->phy_status,
1970 le16toh(pe->dev_handle),
1971 sc->sc_pd_id_start + tcl->start_phy_num + i,
1972 tcl->start_phy_num + i, le16toh(tcl->enclosure_handle), le16toh(tcl->expander_handle));
1973
1974 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1975 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1976 /* already handled */
1977 break;
1978
1979 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1980 mutex_enter(&sc->sc_devs_mtx);
1981 dev = mpii_find_dev(sc, le16toh(pe->dev_handle));
1982 if (dev == NULL) {
1983 mutex_exit(&sc->sc_devs_mtx);
1984 break;
1985 }
1986
1987 printf(
1988 "%s: physical device removed from slot %d\n",
1989 DEVNAME(sc), dev->slot);
1990 mpii_remove_dev(sc, dev);
1991 mutex_exit(&sc->sc_devs_mtx);
1992 mpii_sas_remove_device(sc, dev->dev_handle);
1993 if (!ISSET(dev->flags, MPII_DF_HIDDEN)) {
1994 scsipi_target_detach(&sc->sc_chan,
1995 dev->slot, 0, DETACH_FORCE);
1996 }
1997
1998 free(dev, M_DEVBUF);
1999 break;
2000 }
2001 }
2002 mpii_event_done(sc, rcb);
2003 }
2004 }
2005
2006 static void
2007 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
2008 {
2009 struct mpii_evt_sas_discovery *esd =
2010 (struct mpii_evt_sas_discovery *)(enp + 1);
2011
2012 if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) {
2013 if (esd->discovery_status != 0) {
2014 printf("%s: sas discovery completed with status %#x\n",
2015 DEVNAME(sc), esd->discovery_status);
2016 }
2017
2018 }
2019 }
2020
2021 static void
2022 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2023 {
2024 struct mpii_msg_event_reply *enp;
2025
2026 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2027
2028 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2029 le16toh(enp->event));
2030
2031 switch (le16toh(enp->event)) {
2032 case MPII_EVENT_EVENT_CHANGE:
2033 /* should be properly ignored */
2034 break;
2035 case MPII_EVENT_SAS_DISCOVERY:
2036 mpii_event_discovery(sc, enp);
2037 break;
2038 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2039 mpii_event_sas(sc, rcb);
2040 return;
2041 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2042 break;
2043 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2044 break;
2045 case MPII_EVENT_IR_VOLUME: {
2046 struct mpii_evt_ir_volume *evd =
2047 (struct mpii_evt_ir_volume *)(enp + 1);
2048 struct mpii_device *dev;
2049 #if NBIO > 0
2050 const char *vol_states[] = {
2051 BIOC_SVINVALID_S,
2052 BIOC_SVOFFLINE_S,
2053 BIOC_SVBUILDING_S,
2054 BIOC_SVONLINE_S,
2055 BIOC_SVDEGRADED_S,
2056 BIOC_SVONLINE_S,
2057 };
2058 #endif
2059
2060 if (cold)
2061 break;
2062 mutex_enter(&sc->sc_devs_mtx);
2063 dev = mpii_find_dev(sc, le16toh(evd->vol_dev_handle));
2064 if (dev == NULL) {
2065 mutex_exit(&sc->sc_devs_mtx);
2066 break;
2067 }
2068 #if NBIO > 0
2069 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2070 printf("%s: volume %d state changed from %s to %s\n",
2071 DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2072 vol_states[evd->prev_value],
2073 vol_states[evd->new_value]);
2074 #endif
2075 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2076 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2077 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2078 printf("%s: started resync on a volume %d\n",
2079 DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2080 }
2081 mutex_exit(&sc->sc_devs_mtx);
2082 break;
2083 case MPII_EVENT_IR_PHYSICAL_DISK:
2084 break;
2085 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2086 mpii_event_raid(sc, enp);
2087 break;
2088 case MPII_EVENT_IR_OPERATION_STATUS: {
2089 struct mpii_evt_ir_status *evs =
2090 (struct mpii_evt_ir_status *)(enp + 1);
2091 struct mpii_device *dev;
2092
2093 mutex_enter(&sc->sc_devs_mtx);
2094 dev = mpii_find_dev(sc, le16toh(evs->vol_dev_handle));
2095 if (dev != NULL &&
2096 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2097 dev->percent = evs->percent;
2098 mutex_exit(&sc->sc_devs_mtx);
2099 break;
2100 }
2101 default:
2102 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2103 DEVNAME(sc), le16toh(enp->event));
2104 }
2105
2106 mpii_event_done(sc, rcb);
2107 }
2108
2109 static void
2110 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2111 {
2112 struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2113 bool need_start;
2114
2115 if (enp->ack_required) {
2116 mutex_enter(&sc->sc_evt_ack_mtx);
2117 need_start = (SIMPLEQ_FIRST(&sc->sc_evt_ack_queue) == 0);
2118 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2119 if (need_start)
2120 workqueue_enqueue(sc->sc_evt_ack_wq,
2121 &sc->sc_evt_ack_work, NULL);
2122 mutex_exit(&sc->sc_evt_ack_mtx);
2123 } else
2124 mpii_push_reply(sc, rcb);
2125 }
2126
2127 static void
2128 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2129 {
2130 struct mpii_msg_scsi_task_request *stq;
2131 struct mpii_msg_sas_oper_request *soq;
2132 struct mpii_ccb *ccb;
2133
2134 ccb = mpii_get_ccb(sc);
2135 if (ccb == NULL)
2136 return;
2137
2138 stq = ccb->ccb_cmd;
2139 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2140 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2141 stq->dev_handle = htole16(handle);
2142
2143 ccb->ccb_done = mpii_empty_done;
2144 mpii_wait(sc, ccb);
2145
2146 if (ccb->ccb_rcb != NULL)
2147 mpii_push_reply(sc, ccb->ccb_rcb);
2148
2149 /* reuse a ccb */
2150 ccb->ccb_state = MPII_CCB_READY;
2151 ccb->ccb_rcb = NULL;
2152
2153 soq = ccb->ccb_cmd;
2154 memset(soq, 0, sizeof(*soq));
2155 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2156 soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2157 soq->dev_handle = htole16(handle);
2158
2159 ccb->ccb_done = mpii_empty_done;
2160 mpii_wait(sc, ccb);
2161 if (ccb->ccb_rcb != NULL)
2162 mpii_push_reply(sc, ccb->ccb_rcb);
2163
2164 mpii_put_ccb(sc, ccb);
2165 }
2166
2167 static int
2168 mpii_board_info(struct mpii_softc *sc)
2169 {
2170 struct mpii_msg_iocfacts_request ifq;
2171 struct mpii_msg_iocfacts_reply ifp;
2172 struct mpii_cfg_manufacturing_pg0 mpg;
2173 struct mpii_cfg_hdr hdr;
2174
2175 memset(&ifq, 0, sizeof(ifq));
2176 memset(&ifp, 0, sizeof(ifp));
2177
2178 ifq.function = MPII_FUNCTION_IOC_FACTS;
2179
2180 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2181 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2182 DEVNAME(sc));
2183 return (1);
2184 }
2185
2186 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2187 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2188 DEVNAME(sc));
2189 return (1);
2190 }
2191
2192 hdr.page_version = 0;
2193 hdr.page_length = sizeof(mpg) / 4;
2194 hdr.page_number = 0;
2195 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2196 memset(&mpg, 0, sizeof(mpg));
2197 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2198 sizeof(mpg)) != 0) {
2199 printf("%s: unable to fetch manufacturing page 0\n",
2200 DEVNAME(sc));
2201 return (EINVAL);
2202 }
2203
2204 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2205 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2206 ifp.fw_version_unit, ifp.fw_version_dev,
2207 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2208 ifp.msg_version_maj, ifp.msg_version_min);
2209
2210 return (0);
2211 }
2212
2213 static int
2214 mpii_target_map(struct mpii_softc *sc)
2215 {
2216 struct mpii_cfg_hdr hdr;
2217 struct mpii_cfg_ioc_pg8 ipg;
2218 int flags, pad = 0;
2219
2220 hdr.page_version = 0;
2221 hdr.page_length = sizeof(ipg) / 4;
2222 hdr.page_number = 8;
2223 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2224 memset(&ipg, 0, sizeof(ipg));
2225 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2226 sizeof(ipg)) != 0) {
2227 printf("%s: unable to fetch ioc page 8\n",
2228 DEVNAME(sc));
2229 return (EINVAL);
2230 }
2231
2232 if (le16toh(ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2233 pad = 1;
2234
2235 flags = le16toh(ipg.ir_volume_mapping_flags) &
2236 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2237 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2238 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2239 sc->sc_vd_id_low += pad;
2240 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2241 } else
2242 sc->sc_vd_id_low = sc->sc_max_devices -
2243 sc->sc_max_volumes;
2244 }
2245
2246 sc->sc_pd_id_start += pad;
2247
2248 return (0);
2249 }
2250
2251 static int
2252 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2253 u_int32_t address, int flags, void *p)
2254 {
2255 struct mpii_msg_config_request *cq;
2256 struct mpii_msg_config_reply *cp;
2257 struct mpii_ccb *ccb;
2258 struct mpii_cfg_hdr *hdr = p;
2259 struct mpii_ecfg_hdr *ehdr = p;
2260 int etype = 0;
2261 int rv = 0;
2262
2263 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2264 "address: 0x%08x flags: 0x%x\n", DEVNAME(sc), type, number,
2265 address, flags);
2266
2267 ccb = mpii_get_ccb(sc);
2268 if (ccb == NULL) {
2269 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2270 DEVNAME(sc));
2271 return (1);
2272 }
2273
2274 if (ISSET(flags, MPII_PG_EXTENDED)) {
2275 etype = type;
2276 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2277 }
2278
2279 cq = ccb->ccb_cmd;
2280
2281 cq->function = MPII_FUNCTION_CONFIG;
2282
2283 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2284
2285 cq->config_header.page_number = number;
2286 cq->config_header.page_type = type;
2287 cq->ext_page_type = etype;
2288 cq->page_address = htole32(address);
2289 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2290 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2291
2292 ccb->ccb_done = mpii_empty_done;
2293 if (ISSET(flags, MPII_PG_POLL)) {
2294 if (mpii_poll(sc, ccb) != 0) {
2295 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2296 DEVNAME(sc));
2297 return (1);
2298 }
2299 } else
2300 mpii_wait(sc, ccb);
2301
2302 if (ccb->ccb_rcb == NULL) {
2303 mpii_put_ccb(sc, ccb);
2304 return (1);
2305 }
2306 cp = ccb->ccb_rcb->rcb_reply;
2307
2308 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2309 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2310 cp->sgl_flags, cp->msg_length, cp->function);
2311 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2312 "msg_flags: 0x%02x\n", DEVNAME(sc),
2313 le16toh(cp->ext_page_length), cp->ext_page_type,
2314 cp->msg_flags);
2315 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2316 cp->vp_id, cp->vf_id);
2317 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2318 le16toh(cp->ioc_status));
2319 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2320 le32toh(cp->ioc_loginfo));
2321 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2322 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2323 cp->config_header.page_version,
2324 cp->config_header.page_length,
2325 cp->config_header.page_number,
2326 cp->config_header.page_type);
2327
2328 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2329 rv = 1;
2330 else if (ISSET(flags, MPII_PG_EXTENDED)) {
2331 memset(ehdr, 0, sizeof(*ehdr));
2332 ehdr->page_version = cp->config_header.page_version;
2333 ehdr->page_number = cp->config_header.page_number;
2334 ehdr->page_type = cp->config_header.page_type;
2335 ehdr->ext_page_length = cp->ext_page_length;
2336 ehdr->ext_page_type = cp->ext_page_type;
2337 } else
2338 *hdr = cp->config_header;
2339
2340 mpii_push_reply(sc, ccb->ccb_rcb);
2341 mpii_put_ccb(sc, ccb);
2342
2343 return (rv);
2344 }
2345
2346 static int
2347 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2348 void *p, int read, void *page, size_t len)
2349 {
2350 struct mpii_msg_config_request *cq;
2351 struct mpii_msg_config_reply *cp;
2352 struct mpii_ccb *ccb;
2353 struct mpii_cfg_hdr *hdr = p;
2354 struct mpii_ecfg_hdr *ehdr = p;
2355 uintptr_t kva;
2356 int page_length;
2357 int rv = 0;
2358
2359 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2360 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2361
2362 page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2363 le16toh(ehdr->ext_page_length) : hdr->page_length;
2364
2365 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2366 return (1);
2367
2368 ccb = mpii_get_ccb(sc);
2369 if (ccb == NULL) {
2370 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2371 DEVNAME(sc));
2372 return (1);
2373 }
2374
2375 cq = ccb->ccb_cmd;
2376
2377 cq->function = MPII_FUNCTION_CONFIG;
2378
2379 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2380 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2381
2382 if (ISSET(flags, MPII_PG_EXTENDED)) {
2383 cq->config_header.page_version = ehdr->page_version;
2384 cq->config_header.page_number = ehdr->page_number;
2385 cq->config_header.page_type = ehdr->page_type;
2386 cq->ext_page_len = ehdr->ext_page_length;
2387 cq->ext_page_type = ehdr->ext_page_type;
2388 } else
2389 cq->config_header = *hdr;
2390 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2391 cq->page_address = htole32(address);
2392 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2393 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2394 MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2395 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2396
2397 /* bounce the page via the request space to avoid more bus_dma games */
2398 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2399 sizeof(struct mpii_msg_config_request));
2400
2401 kva = (uintptr_t)ccb->ccb_cmd;
2402 kva += sizeof(struct mpii_msg_config_request);
2403
2404 if (!read)
2405 memcpy((void *)kva, page, len);
2406
2407 ccb->ccb_done = mpii_empty_done;
2408 if (ISSET(flags, MPII_PG_POLL)) {
2409 if (mpii_poll(sc, ccb) != 0) {
2410 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2411 DEVNAME(sc));
2412 return (1);
2413 }
2414 } else
2415 mpii_wait(sc, ccb);
2416
2417 if (ccb->ccb_rcb == NULL) {
2418 mpii_put_ccb(sc, ccb);
2419 return (1);
2420 }
2421 cp = ccb->ccb_rcb->rcb_reply;
2422
2423 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d "
2424 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2425 cp->function);
2426 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2427 "msg_flags: 0x%02x\n", DEVNAME(sc),
2428 le16toh(cp->ext_page_length), cp->ext_page_type,
2429 cp->msg_flags);
2430 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2431 cp->vp_id, cp->vf_id);
2432 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2433 le16toh(cp->ioc_status));
2434 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2435 le32toh(cp->ioc_loginfo));
2436 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2437 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2438 cp->config_header.page_version,
2439 cp->config_header.page_length,
2440 cp->config_header.page_number,
2441 cp->config_header.page_type);
2442
2443 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2444 rv = 1;
2445 else if (read)
2446 memcpy(page, (void *)kva, len);
2447
2448 mpii_push_reply(sc, ccb->ccb_rcb);
2449 mpii_put_ccb(sc, ccb);
2450
2451 return (rv);
2452 }
2453
2454 static struct mpii_rcb *
2455 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2456 {
2457 struct mpii_rcb *rcb = NULL;
2458 u_int32_t rfid;
2459
2460 KASSERT(mutex_owned(&sc->sc_rep_mtx));
2461 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2462
2463 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2464 MPII_REPLY_DESCR_ADDRESS_REPLY) {
2465 rfid = (le32toh(rdp->frame_addr) -
2466 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2467 sc->sc_reply_size;
2468
2469 bus_dmamap_sync(sc->sc_dmat,
2470 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2471 sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2472
2473 rcb = &sc->sc_rcbs[rfid];
2474 }
2475
2476 memset(rdp, 0xff, sizeof(*rdp));
2477
2478 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2479 8 * sc->sc_reply_post_host_index, 8,
2480 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2481
2482 return (rcb);
2483 }
2484
2485 static struct mpii_dmamem *
2486 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2487 {
2488 struct mpii_dmamem *mdm;
2489 int nsegs;
2490
2491 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2492 mdm->mdm_size = size;
2493
2494 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2495 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2496 goto mdmfree;
2497
2498 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2499 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2500 goto destroy;
2501
2502 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2503 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2504 goto free;
2505
2506 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2507 NULL, BUS_DMA_NOWAIT) != 0)
2508 goto unmap;
2509
2510 memset(mdm->mdm_kva, 0, size);
2511
2512 return (mdm);
2513
2514 unmap:
2515 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2516 free:
2517 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2518 destroy:
2519 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2520 mdmfree:
2521 free(mdm, M_DEVBUF);
2522
2523 return (NULL);
2524 }
2525
2526 static void
2527 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2528 {
2529 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2530
2531 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2532 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2533 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2534 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2535 free(mdm, M_DEVBUF);
2536 }
2537
2538 static int
2539 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2540 {
2541 int slot; /* initial hint */
2542
2543 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2544 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev wants slot %d\n",
2545 DEVNAME(sc), dev->slot);
2546 if (dev == NULL || dev->slot < 0)
2547 return (1);
2548 slot = dev->slot;
2549
2550 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2551 slot++;
2552
2553 if (slot >= sc->sc_max_devices)
2554 return (1);
2555
2556 DNPRINTF(MPII_D_EVT, "%s: mpii_insert_dev alloc slot %d\n",
2557 DEVNAME(sc), slot);
2558
2559 dev->slot = slot;
2560 sc->sc_devs[slot] = dev;
2561
2562 return (0);
2563 }
2564
2565 static int
2566 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2567 {
2568 int i;
2569
2570 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2571 if (dev == NULL)
2572 return (1);
2573
2574 for (i = 0; i < sc->sc_max_devices; i++) {
2575 if (sc->sc_devs[i] == NULL)
2576 continue;
2577
2578 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2579 sc->sc_devs[i] = NULL;
2580 return (0);
2581 }
2582 }
2583
2584 return (1);
2585 }
2586
2587 static struct mpii_device *
2588 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2589 {
2590 int i;
2591 KASSERT(mutex_owned(&sc->sc_devs_mtx));
2592
2593 for (i = 0; i < sc->sc_max_devices; i++) {
2594 if (sc->sc_devs[i] == NULL)
2595 continue;
2596
2597 if (sc->sc_devs[i]->dev_handle == handle)
2598 return (sc->sc_devs[i]);
2599 }
2600
2601 return (NULL);
2602 }
2603
2604 static int
2605 mpii_alloc_ccbs(struct mpii_softc *sc)
2606 {
2607 struct mpii_ccb *ccb;
2608 u_int8_t *cmd;
2609 int i;
2610 char wqname[16];
2611
2612 SIMPLEQ_INIT(&sc->sc_ccb_free);
2613 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2614 mutex_init(&sc->sc_ccb_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2615 cv_init(&sc->sc_ccb_free_cv, "mpii_ccbs");
2616 mutex_init(&sc->sc_ssb_tmomtx, MUTEX_DEFAULT, IPL_BIO);
2617 snprintf(wqname, sizeof(wqname) - 1, "%sabrt", DEVNAME(sc));
2618 workqueue_create(&sc->sc_ssb_tmowk, wqname, mpii_scsi_cmd_tmo_handler,
2619 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
2620 if (sc->sc_ssb_tmowk == NULL)
2621 return 1;
2622
2623 sc->sc_ccbs = malloc((sc->sc_max_cmds-1) * sizeof(*ccb),
2624 M_DEVBUF, M_WAITOK | M_ZERO);
2625 sc->sc_requests = mpii_dmamem_alloc(sc,
2626 sc->sc_request_size * sc->sc_max_cmds);
2627 if (sc->sc_requests == NULL) {
2628 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2629 goto free_ccbs;
2630 }
2631 cmd = MPII_DMA_KVA(sc->sc_requests);
2632
2633 /*
2634 * we have sc->sc_max_cmds system request message
2635 * frames, but smid zero cannot be used. so we then
2636 * have (sc->sc_max_cmds - 1) number of ccbs
2637 */
2638 for (i = 1; i < sc->sc_max_cmds; i++) {
2639 ccb = &sc->sc_ccbs[i - 1];
2640
2641 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2642 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2643 &ccb->ccb_dmamap) != 0) {
2644 printf("%s: unable to create dma map\n", DEVNAME(sc));
2645 goto free_maps;
2646 }
2647
2648 ccb->ccb_sc = sc;
2649 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2650 cv_init(&ccb->ccb_cv, "mpiiexec");
2651
2652 ccb->ccb_smid = htole16(i);
2653 ccb->ccb_offset = sc->sc_request_size * i;
2654
2655 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2656 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2657 ccb->ccb_offset;
2658
2659 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2660 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2661 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2662 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2663 ccb->ccb_cmd_dva);
2664
2665 mpii_put_ccb(sc, ccb);
2666 }
2667
2668 return (0);
2669
2670 free_maps:
2671 while ((ccb = mpii_get_ccb(sc)) != NULL)
2672 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2673
2674 mpii_dmamem_free(sc, sc->sc_requests);
2675 free_ccbs:
2676 free(sc->sc_ccbs, M_DEVBUF);
2677
2678 return (1);
2679 }
2680
2681 static void
2682 mpii_put_ccb(struct mpii_softc *sc, struct mpii_ccb *ccb)
2683 {
2684 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2685
2686 ccb->ccb_state = MPII_CCB_FREE;
2687 ccb->ccb_cookie = NULL;
2688 ccb->ccb_done = NULL;
2689 ccb->ccb_rcb = NULL;
2690 memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2691
2692 mutex_enter(&sc->sc_ccb_free_mtx);
2693 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2694 mutex_exit(&sc->sc_ccb_free_mtx);
2695 }
2696
2697 static struct mpii_ccb *
2698 mpii_get_ccb(struct mpii_softc *sc)
2699 {
2700 struct mpii_ccb *ccb;
2701
2702 mutex_enter(&sc->sc_ccb_free_mtx);
2703 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2704 if (ccb != NULL) {
2705 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2706 ccb->ccb_state = MPII_CCB_READY;
2707 KASSERT(ccb->ccb_sc == sc);
2708 }
2709 mutex_exit(&sc->sc_ccb_free_mtx);
2710
2711 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2712
2713 return (ccb);
2714 }
2715
2716 static int
2717 mpii_alloc_replies(struct mpii_softc *sc)
2718 {
2719 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2720
2721 sc->sc_rcbs = malloc(sc->sc_num_reply_frames * sizeof(struct mpii_rcb),
2722 M_DEVBUF, M_WAITOK);
2723
2724 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2725 sc->sc_num_reply_frames);
2726 if (sc->sc_replies == NULL) {
2727 free(sc->sc_rcbs, M_DEVBUF);
2728 return (1);
2729 }
2730
2731 return (0);
2732 }
2733
2734 static void
2735 mpii_push_replies(struct mpii_softc *sc)
2736 {
2737 struct mpii_rcb *rcb;
2738 uintptr_t kva = (uintptr_t)MPII_DMA_KVA(sc->sc_replies);
2739 int i;
2740
2741 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2742 0, sc->sc_reply_size * sc->sc_num_reply_frames,
2743 BUS_DMASYNC_PREREAD);
2744
2745 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2746 rcb = &sc->sc_rcbs[i];
2747
2748 rcb->rcb_reply = (void *)(kva + sc->sc_reply_size * i);
2749 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2750 sc->sc_reply_size * i;
2751 mpii_push_reply(sc, rcb);
2752 }
2753 }
2754
2755 static void
2756 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2757 {
2758 struct mpii_request_header *rhp;
2759 struct mpii_request_descr descr;
2760 #if defined(__LP64__) && 0
2761 u_long *rdp = (u_long *)&descr;
2762 #else
2763 u_int32_t *rdp = (u_int32_t *)&descr;
2764 #endif
2765
2766 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2767 ccb->ccb_cmd_dva);
2768
2769 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2770 ccb->ccb_offset, sc->sc_request_size,
2771 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2772
2773 ccb->ccb_state = MPII_CCB_QUEUED;
2774
2775 rhp = ccb->ccb_cmd;
2776
2777 memset(&descr, 0, sizeof(descr));
2778
2779 switch (rhp->function) {
2780 case MPII_FUNCTION_SCSI_IO_REQUEST:
2781 descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2782 descr.dev_handle = htole16(ccb->ccb_dev_handle);
2783 break;
2784 case MPII_FUNCTION_SCSI_TASK_MGMT:
2785 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2786 break;
2787 default:
2788 descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2789 }
2790
2791 descr.vf_id = sc->sc_vf_id;
2792 descr.smid = ccb->ccb_smid;
2793
2794 #if defined(__LP64__) && 0
2795 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2796 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2797 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2798 MPII_REQ_DESCR_POST_LOW, *rdp);
2799 #else
2800 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2801 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2802
2803 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2804 "0x%04x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2805
2806 mutex_enter(&sc->sc_req_mtx);
2807 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2808 MPII_REQ_DESCR_POST_LOW, rdp[0]);
2809 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2810 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2811
2812 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2813 MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2814 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2815 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2816 mutex_exit(&sc->sc_req_mtx);
2817 #endif
2818 }
2819
2820 static int
2821 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2822 {
2823 void (*done)(struct mpii_ccb *);
2824 void *cookie;
2825 int rv = 1;
2826
2827 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2828
2829 done = ccb->ccb_done;
2830 cookie = ccb->ccb_cookie;
2831
2832 ccb->ccb_done = mpii_poll_done;
2833 ccb->ccb_cookie = &rv;
2834
2835 mpii_start(sc, ccb);
2836
2837 while (rv == 1) {
2838 /* avoid excessive polling */
2839 if (mpii_reply_waiting(sc))
2840 mpii_intr(sc);
2841 else
2842 delay(10);
2843 }
2844
2845 ccb->ccb_cookie = cookie;
2846 done(ccb);
2847
2848 return (0);
2849 }
2850
2851 static void
2852 mpii_poll_done(struct mpii_ccb *ccb)
2853 {
2854 int *rv = ccb->ccb_cookie;
2855
2856 *rv = 0;
2857 }
2858
2859 static int
2860 mpii_alloc_queues(struct mpii_softc *sc)
2861 {
2862 u_int32_t *rfp;
2863 int i;
2864
2865 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2866
2867 mutex_init(&sc->sc_reply_free_mtx, MUTEX_DEFAULT, IPL_BIO);
2868 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2869 sc->sc_reply_free_qdepth * sizeof(*rfp));
2870 if (sc->sc_reply_freeq == NULL)
2871 return (1);
2872 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2873 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2874 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2875 sc->sc_reply_size * i;
2876 }
2877
2878 sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2879 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2880 if (sc->sc_reply_postq == NULL)
2881 goto free_reply_freeq;
2882 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2883 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2884 sizeof(struct mpii_reply_descr));
2885
2886 return (0);
2887
2888 free_reply_freeq:
2889 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2890 return (1);
2891 }
2892
2893 static void
2894 mpii_init_queues(struct mpii_softc *sc)
2895 {
2896 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2897
2898 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2899 sc->sc_reply_post_host_index = 0;
2900 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2901 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2902 }
2903
2904 static void
2905 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2906 {
2907 void (*done)(struct mpii_ccb *);
2908 void *cookie;
2909
2910 done = ccb->ccb_done;
2911 cookie = ccb->ccb_cookie;
2912
2913 ccb->ccb_done = mpii_wait_done;
2914 ccb->ccb_cookie = ccb;
2915
2916 /* XXX this will wait forever for the ccb to complete */
2917
2918 mpii_start(sc, ccb);
2919
2920 mutex_enter(&ccb->ccb_mtx);
2921 while (ccb->ccb_cookie != NULL)
2922 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
2923 mutex_exit(&ccb->ccb_mtx);
2924
2925 ccb->ccb_cookie = cookie;
2926 done(ccb);
2927 }
2928
2929 static void
2930 mpii_wait_done(struct mpii_ccb *ccb)
2931 {
2932 mutex_enter(&ccb->ccb_mtx);
2933 ccb->ccb_cookie = NULL;
2934 cv_signal(&ccb->ccb_cv);
2935 mutex_exit(&ccb->ccb_mtx);
2936 }
2937
2938 static void
2939 mpii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2940 void *arg)
2941 {
2942 struct scsipi_periph *periph;
2943 struct scsipi_xfer *xs;
2944 struct scsipi_adapter *adapt = chan->chan_adapter;
2945 struct mpii_softc *sc = device_private(adapt->adapt_dev);
2946 struct mpii_ccb *ccb;
2947 struct mpii_msg_scsi_io *io;
2948 struct mpii_device *dev;
2949 int target, timeout, ret;
2950 u_int16_t dev_handle;
2951
2952 DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request\n", DEVNAME(sc));
2953
2954 switch (req) {
2955 case ADAPTER_REQ_GROW_RESOURCES:
2956 /* Not supported. */
2957 return;
2958 case ADAPTER_REQ_SET_XFER_MODE:
2959 {
2960 struct scsipi_xfer_mode *xm = arg;
2961 xm->xm_mode = PERIPH_CAP_TQING;
2962 xm->xm_period = 0;
2963 xm->xm_offset = 0;
2964 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2965 return;
2966 }
2967 case ADAPTER_REQ_RUN_XFER:
2968 break;
2969 }
2970
2971 xs = arg;
2972 periph = xs->xs_periph;
2973 target = periph->periph_target;
2974
2975 if (xs->cmdlen > MPII_CDB_LEN) {
2976 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2977 DEVNAME(sc), xs->cmdlen);
2978 memset(&xs->sense, 0, sizeof(xs->sense));
2979 xs->sense.scsi_sense.response_code =
2980 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
2981 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
2982 xs->sense.scsi_sense.asc = 0x20;
2983 xs->error = XS_SENSE;
2984 scsipi_done(xs);
2985 return;
2986 }
2987
2988 mutex_enter(&sc->sc_devs_mtx);
2989 if ((dev = sc->sc_devs[target]) == NULL) {
2990 mutex_exit(&sc->sc_devs_mtx);
2991 /* device no longer exists */
2992 xs->error = XS_SELTIMEOUT;
2993 scsipi_done(xs);
2994 return;
2995 }
2996 dev_handle = dev->dev_handle;
2997 mutex_exit(&sc->sc_devs_mtx);
2998
2999 ccb = mpii_get_ccb(sc);
3000 if (ccb == NULL) {
3001 xs->error = XS_RESOURCE_SHORTAGE;
3002 scsipi_done(xs);
3003 return;
3004 }
3005 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->cmd->opcode: 0x%02x xs->xs_control: 0x%x\n",
3006 DEVNAME(sc), ccb->ccb_smid, xs->cmd->opcode, xs->xs_control);
3007
3008 ccb->ccb_cookie = xs;
3009 ccb->ccb_done = mpii_scsi_cmd_done;
3010 ccb->ccb_dev_handle = dev_handle;
3011
3012 io = ccb->ccb_cmd;
3013 memset(io, 0, sizeof(*io));
3014 io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
3015 io->sense_buffer_length = sizeof(xs->sense);
3016 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
3017 io->io_flags = htole16(xs->cmdlen);
3018 io->dev_handle = htole16(ccb->ccb_dev_handle);
3019 io->lun[0] = htobe16(periph->periph_lun);
3020
3021 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
3022 case XS_CTL_DATA_IN:
3023 io->direction = MPII_SCSIIO_DIR_READ;
3024 break;
3025 case XS_CTL_DATA_OUT:
3026 io->direction = MPII_SCSIIO_DIR_WRITE;
3027 break;
3028 default:
3029 io->direction = MPII_SCSIIO_DIR_NONE;
3030 break;
3031 }
3032
3033 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
3034
3035 memcpy(io->cdb, xs->cmd, xs->cmdlen);
3036
3037 io->data_length = htole32(xs->datalen);
3038
3039 /* sense data is at the end of a request */
3040 io->sense_buffer_low_address = htole32(ccb->ccb_cmd_dva +
3041 sc->sc_request_size - sizeof(struct scsi_sense_data));
3042
3043 if (ISSET(sc->sc_flags, MPII_F_SAS3))
3044 ret = mpii_load_xs_sas3(ccb);
3045 else
3046 ret = mpii_load_xs(ccb);
3047
3048 if (ret != 0) {
3049 xs->error = XS_DRIVER_STUFFUP;
3050 goto done;
3051 }
3052
3053 if (xs->xs_control & XS_CTL_POLL) {
3054 if (mpii_poll(sc, ccb) != 0) {
3055 xs->error = XS_DRIVER_STUFFUP;
3056 goto done;
3057 }
3058 return;
3059 }
3060 timeout = mstohz(xs->timeout);
3061 if (timeout == 0)
3062 timeout = 1;
3063 callout_reset(&xs->xs_callout, timeout, mpii_scsi_cmd_tmo, ccb);
3064 mpii_start(sc, ccb);
3065 return;
3066 done:
3067 mpii_put_ccb(sc, ccb);
3068 scsipi_done(xs);
3069 }
3070
3071 static void
3072 mpii_scsi_cmd_tmo(void *xccb)
3073 {
3074 struct mpii_ccb *ccb = xccb;
3075 struct mpii_softc *sc = ccb->ccb_sc;
3076 bool start_work;
3077
3078 printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
3079
3080 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3081 mutex_enter(&sc->sc_ssb_tmomtx);
3082 start_work = (SIMPLEQ_FIRST(&sc->sc_ccb_tmos) == 0);
3083 ccb->ccb_state = MPII_CCB_TIMEOUT;
3084 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3085 if (start_work) {
3086 workqueue_enqueue(sc->sc_ssb_tmowk,
3087 &sc->sc_ssb_tmowork, NULL);
3088 }
3089 mutex_exit(&sc->sc_ssb_tmomtx);
3090 }
3091 }
3092
3093 static void
3094 mpii_scsi_cmd_tmo_handler(struct work *wk, void *cookie)
3095 {
3096 struct mpii_softc *sc = cookie;
3097 struct mpii_ccb *next;
3098 struct mpii_ccb *ccb;
3099 struct mpii_ccb *tccb;
3100 struct mpii_msg_scsi_task_request *stq;
3101
3102 mutex_enter(&sc->sc_ssb_tmomtx);
3103 next = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3104 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
3105 mutex_exit(&sc->sc_ssb_tmomtx);
3106
3107 while (next != NULL) {
3108 ccb = next;
3109 next = SIMPLEQ_NEXT(ccb, ccb_link);
3110 if (ccb->ccb_state != MPII_CCB_TIMEOUT)
3111 continue;
3112 tccb = mpii_get_ccb(sc);
3113 stq = tccb->ccb_cmd;
3114 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3115 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3116 stq->dev_handle = htole16(ccb->ccb_dev_handle);
3117
3118 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3119 mpii_wait(sc, tccb);
3120 }
3121 }
3122
3123 static void
3124 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3125 {
3126 mpii_put_ccb(tccb->ccb_sc, tccb);
3127 }
3128
3129 static u_int8_t
3130 map_scsi_status(u_int8_t mpii_scsi_status)
3131 {
3132 u_int8_t scsi_status;
3133
3134 switch (mpii_scsi_status)
3135 {
3136 case MPII_SCSIIO_STATUS_GOOD:
3137 scsi_status = SCSI_OK;
3138 break;
3139
3140 case MPII_SCSIIO_STATUS_CHECK_COND:
3141 scsi_status = SCSI_CHECK;
3142 break;
3143
3144 case MPII_SCSIIO_STATUS_BUSY:
3145 scsi_status = SCSI_BUSY;
3146 break;
3147
3148 case MPII_SCSIIO_STATUS_INTERMEDIATE:
3149 scsi_status = SCSI_INTERM;
3150 break;
3151
3152 case MPII_SCSIIO_STATUS_INTERMEDIATE_CONDMET:
3153 scsi_status = SCSI_INTERM;
3154 break;
3155
3156 case MPII_SCSIIO_STATUS_RESERVATION_CONFLICT:
3157 scsi_status = SCSI_RESV_CONFLICT;
3158 break;
3159
3160 case MPII_SCSIIO_STATUS_CMD_TERM:
3161 case MPII_SCSIIO_STATUS_TASK_ABORTED:
3162 scsi_status = SCSI_TERMINATED;
3163 break;
3164
3165 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3166 scsi_status = SCSI_QUEUE_FULL;
3167 break;
3168
3169 case MPII_SCSIIO_STATUS_ACA_ACTIVE:
3170 scsi_status = SCSI_ACA_ACTIVE;
3171 break;
3172
3173 default:
3174 /* XXX: for the lack of anything better and other than OK */
3175 scsi_status = 0xFF;
3176 break;
3177 }
3178
3179 return scsi_status;
3180 }
3181
3182 static void
3183 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3184 {
3185 struct mpii_msg_scsi_io_error *sie;
3186 struct mpii_softc *sc = ccb->ccb_sc;
3187 struct scsipi_xfer *xs = ccb->ccb_cookie;
3188 struct scsi_sense_data *sense;
3189 bus_dmamap_t dmap = ccb->ccb_dmamap;
3190 bool timeout = 1;
3191
3192 callout_stop(&xs->xs_callout);
3193 if (ccb->ccb_state == MPII_CCB_TIMEOUT)
3194 timeout = 1;
3195 ccb->ccb_state = MPII_CCB_READY;
3196
3197 if (xs->datalen != 0) {
3198 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3199 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3200 BUS_DMASYNC_POSTWRITE);
3201
3202 bus_dmamap_unload(sc->sc_dmat, dmap);
3203 }
3204
3205 KASSERT(xs->error == XS_NOERROR);
3206 KASSERT(xs->resid == xs->datalen);
3207 KASSERT(xs->status == SCSI_OK);
3208
3209 if (ccb->ccb_rcb == NULL) {
3210 /* no scsi error, we're ok so drop out early */
3211 xs->resid = 0;
3212 goto done;
3213 }
3214
3215 sie = ccb->ccb_rcb->rcb_reply;
3216
3217 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3218 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3219 xs->xs_control);
3220 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3221 "function: 0x%02x\n", DEVNAME(sc), le16toh(sie->dev_handle),
3222 sie->msg_length, sie->function);
3223 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3224 sie->vp_id, sie->vf_id);
3225 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3226 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3227 sie->scsi_state, le16toh(sie->ioc_status));
3228 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3229 le32toh(sie->ioc_loginfo));
3230 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3231 le32toh(sie->transfer_count));
3232 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3233 le32toh(sie->sense_count));
3234 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3235 le32toh(sie->response_info));
3236 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3237 le16toh(sie->task_tag));
3238 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3239 DEVNAME(sc), le32toh(sie->bidirectional_transfer_count));
3240
3241 xs->status = map_scsi_status(sie->scsi_status);
3242
3243 switch (le16toh(sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3244 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3245 switch(sie->scsi_status) {
3246 case MPII_SCSIIO_STATUS_CHECK_COND:
3247 xs->error = XS_SENSE;
3248 /* FALLTHROUGH */
3249 case MPII_SCSIIO_STATUS_GOOD:
3250 xs->resid = xs->datalen - le32toh(sie->transfer_count);
3251 break;
3252 default:
3253 xs->error = XS_DRIVER_STUFFUP;
3254 break;
3255 }
3256 break;
3257
3258 case MPII_IOCSTATUS_SUCCESS:
3259 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3260 switch (sie->scsi_status) {
3261 case MPII_SCSIIO_STATUS_GOOD:
3262 xs->resid = 0;
3263 break;
3264
3265 case MPII_SCSIIO_STATUS_CHECK_COND:
3266 xs->resid = 0;
3267 xs->error = XS_SENSE;
3268 break;
3269
3270 case MPII_SCSIIO_STATUS_BUSY:
3271 case MPII_SCSIIO_STATUS_TASK_SET_FULL:
3272 xs->error = XS_BUSY;
3273 break;
3274
3275 default:
3276 xs->error = XS_DRIVER_STUFFUP;
3277 }
3278 break;
3279
3280 case MPII_IOCSTATUS_BUSY:
3281 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3282 xs->error = XS_BUSY;
3283 break;
3284
3285 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3286 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3287 xs->error = timeout ? XS_TIMEOUT : XS_RESET;
3288 break;
3289
3290 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3291 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3292 xs->error = XS_SELTIMEOUT;
3293 break;
3294
3295 default:
3296 xs->error = XS_DRIVER_STUFFUP;
3297 break;
3298 }
3299
3300 sense = (struct scsi_sense_data *)((uintptr_t)ccb->ccb_cmd +
3301 sc->sc_request_size - sizeof(*sense));
3302 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3303 memcpy(&xs->sense, sense, sizeof(xs->sense));
3304
3305 mpii_push_reply(sc, ccb->ccb_rcb);
3306
3307 done:
3308 mpii_put_ccb(sc, ccb);
3309
3310 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x len: %d resid: %d\n",
3311 DEVNAME(sc), xs->error, xs->status, xs->datalen, xs->resid);
3312
3313 scsipi_done(xs);
3314 }
3315
3316 #if 0
3317 int
3318 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, void *addr, int flag)
3319 {
3320 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3321 struct mpii_device *dev = sc->sc_devs[link->target];
3322
3323 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3324
3325 switch (cmd) {
3326 case DIOCGCACHE:
3327 case DIOCSCACHE:
3328 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3329 return (mpii_ioctl_cache(link, cmd,
3330 (struct dk_cache *)addr));
3331 }
3332 break;
3333
3334 default:
3335 if (sc->sc_ioctl)
3336 return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
3337
3338 break;
3339 }
3340
3341 return (ENOTTY);
3342 }
3343
3344 int
3345 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3346 {
3347 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3348 struct mpii_device *dev = sc->sc_devs[link->target];
3349 struct mpii_cfg_raid_vol_pg0 *vpg;
3350 struct mpii_msg_raid_action_request *req;
3351 struct mpii_msg_raid_action_reply *rep;
3352 struct mpii_cfg_hdr hdr;
3353 struct mpii_ccb *ccb;
3354 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3355 size_t pagelen;
3356 int rv = 0;
3357 int enabled;
3358
3359 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3360 addr, MPII_PG_POLL, &hdr) != 0)
3361 return (EINVAL);
3362
3363 pagelen = hdr.page_length * 4;
3364 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3365 if (vpg == NULL)
3366 return (ENOMEM);
3367
3368 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3369 vpg, pagelen) != 0) {
3370 rv = EINVAL;
3371 goto done;
3372 }
3373
3374 enabled = ((le16toh(vpg->volume_settings) &
3375 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3376 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3377
3378 if (cmd == DIOCGCACHE) {
3379 dc->wrcache = enabled;
3380 dc->rdcache = 0;
3381 goto done;
3382 } /* else DIOCSCACHE */
3383
3384 if (dc->rdcache) {
3385 rv = EOPNOTSUPP;
3386 goto done;
3387 }
3388
3389 if (((dc->wrcache) ? 1 : 0) == enabled)
3390 goto done;
3391
3392 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3393 if (ccb == NULL) {
3394 rv = ENOMEM;
3395 goto done;
3396 }
3397
3398 ccb->ccb_done = mpii_empty_done;
3399
3400 req = ccb->ccb_cmd;
3401 memset(req, 0, sizeof(*req));
3402 req->function = MPII_FUNCTION_RAID_ACTION;
3403 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3404 req->vol_dev_handle = htole16(dev->dev_handle);
3405 req->action_data = htole32(dc->wrcache ?
3406 MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3407 MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3408
3409 if (mpii_poll(sc, ccb) != 0) {
3410 rv = EIO;
3411 goto done;
3412 }
3413
3414 if (ccb->ccb_rcb != NULL) {
3415 rep = ccb->ccb_rcb->rcb_reply;
3416 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3417 ((rep->action_data[0] &
3418 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3419 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3420 MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3421 rv = EINVAL;
3422 mpii_push_reply(sc, ccb->ccb_rcb);
3423 }
3424
3425 scsi_io_put(&sc->sc_iopool, ccb);
3426
3427 done:
3428 free(vpg, M_TEMP);
3429 return (rv);
3430 }
3431 #endif /* 0 */
3432
3433 #if NBIO > 0
3434 static int
3435 mpii_ioctl(device_t dev, u_long cmd, void *addr)
3436 {
3437 struct mpii_softc *sc = device_private(dev);
3438 int error = 0;
3439
3440 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3441
3442 switch (cmd) {
3443 case BIOCINQ:
3444 DNPRINTF(MPII_D_IOCTL, "inq\n");
3445 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3446 break;
3447 case BIOCVOL:
3448 DNPRINTF(MPII_D_IOCTL, "vol\n");
3449 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3450 break;
3451 case BIOCDISK:
3452 DNPRINTF(MPII_D_IOCTL, "disk\n");
3453 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3454 break;
3455 default:
3456 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3457 error = ENOTTY;
3458 }
3459
3460 return (error);
3461 }
3462
3463 static int
3464 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3465 {
3466 int i;
3467
3468 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3469
3470 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3471 mutex_enter(&sc->sc_devs_mtx);
3472 for (i = 0; i < sc->sc_max_devices; i++)
3473 if (sc->sc_devs[i] &&
3474 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3475 bi->bi_novol++;
3476 mutex_exit(&sc->sc_devs_mtx);
3477 return (0);
3478 }
3479
3480 static int
3481 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3482 {
3483 struct mpii_cfg_raid_vol_pg0 *vpg;
3484 struct mpii_cfg_hdr hdr;
3485 struct mpii_device *dev;
3486 size_t pagelen;
3487 u_int16_t volh;
3488 int rv, hcnt = 0;
3489 int percent;
3490
3491 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3492 DEVNAME(sc), bv->bv_volid);
3493
3494 mutex_enter(&sc->sc_devs_mtx);
3495 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3496 mutex_exit(&sc->sc_devs_mtx);
3497 return (ENODEV);
3498 }
3499 volh = dev->dev_handle;
3500 percent = dev->percent;
3501 mutex_exit(&sc->sc_devs_mtx);
3502
3503 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3504 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3505 printf("%s: unable to fetch header for raid volume page 0\n",
3506 DEVNAME(sc));
3507 return (EINVAL);
3508 }
3509
3510 pagelen = hdr.page_length * 4;
3511 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3512 if (vpg == NULL) {
3513 printf("%s: unable to allocate space for raid "
3514 "volume page 0\n", DEVNAME(sc));
3515 return (ENOMEM);
3516 }
3517
3518 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3519 &hdr, 1, vpg, pagelen) != 0) {
3520 printf("%s: unable to fetch raid volume page 0\n",
3521 DEVNAME(sc));
3522 free(vpg, M_TEMP);
3523 return (EINVAL);
3524 }
3525
3526 switch (vpg->volume_state) {
3527 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3528 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3529 bv->bv_status = BIOC_SVONLINE;
3530 break;
3531 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3532 if (ISSET(le32toh(vpg->volume_status),
3533 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3534 bv->bv_status = BIOC_SVREBUILD;
3535 bv->bv_percent = percent;
3536 } else
3537 bv->bv_status = BIOC_SVDEGRADED;
3538 break;
3539 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3540 bv->bv_status = BIOC_SVOFFLINE;
3541 break;
3542 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3543 bv->bv_status = BIOC_SVBUILDING;
3544 break;
3545 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3546 default:
3547 bv->bv_status = BIOC_SVINVALID;
3548 break;
3549 }
3550
3551 switch (vpg->volume_type) {
3552 case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3553 bv->bv_level = 0;
3554 break;
3555 case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3556 bv->bv_level = 1;
3557 break;
3558 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3559 case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3560 bv->bv_level = 10;
3561 break;
3562 default:
3563 bv->bv_level = -1;
3564 }
3565
3566 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3567 free(vpg, M_TEMP);
3568 return (rv);
3569 }
3570
3571 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3572
3573 bv->bv_size = le64toh(vpg->max_lba) * le16toh(vpg->block_size);
3574
3575 free(vpg, M_TEMP);
3576 return (0);
3577 }
3578
3579 static int
3580 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3581 {
3582 struct mpii_cfg_raid_vol_pg0 *vpg;
3583 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3584 struct mpii_cfg_hdr hdr;
3585 struct mpii_device *dev;
3586 size_t pagelen;
3587 u_int16_t volh;
3588 u_int8_t dn;
3589
3590 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3591 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3592
3593 mutex_enter(&sc->sc_devs_mtx);
3594 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) {
3595 mutex_exit(&sc->sc_devs_mtx);
3596 return (ENODEV);
3597 }
3598 volh = dev->dev_handle;
3599 mutex_exit(&sc->sc_devs_mtx);
3600
3601 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3602 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3603 printf("%s: unable to fetch header for raid volume page 0\n",
3604 DEVNAME(sc));
3605 return (EINVAL);
3606 }
3607
3608 pagelen = hdr.page_length * 4;
3609 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3610 if (vpg == NULL) {
3611 printf("%s: unable to allocate space for raid "
3612 "volume page 0\n", DEVNAME(sc));
3613 return (ENOMEM);
3614 }
3615
3616 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3617 &hdr, 1, vpg, pagelen) != 0) {
3618 printf("%s: unable to fetch raid volume page 0\n",
3619 DEVNAME(sc));
3620 free(vpg, M_TEMP);
3621 return (EINVAL);
3622 }
3623
3624 if (bd->bd_diskid >= vpg->num_phys_disks) {
3625 int nvdsk = vpg->num_phys_disks;
3626 int hsmap = vpg->hot_spare_pool;
3627
3628 free(vpg, M_TEMP);
3629 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3630 }
3631
3632 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3633 bd->bd_diskid;
3634 dn = pd->phys_disk_num;
3635
3636 free(vpg, M_TEMP);
3637 return (mpii_bio_disk(sc, bd, dn));
3638 }
3639
3640 static int
3641 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3642 int hsmap, int *hscnt)
3643 {
3644 struct mpii_cfg_raid_config_pg0 *cpg;
3645 struct mpii_raid_config_element *el;
3646 struct mpii_ecfg_hdr ehdr;
3647 size_t pagelen;
3648 int i, nhs = 0;
3649
3650 if (bd) {
3651 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3652 bd->bd_diskid - nvdsk);
3653 } else {
3654 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3655 }
3656
3657 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3658 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3659 &ehdr) != 0) {
3660 printf("%s: unable to fetch header for raid config page 0\n",
3661 DEVNAME(sc));
3662 return (EINVAL);
3663 }
3664
3665 pagelen = le16toh(ehdr.ext_page_length) * 4;
3666 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3667 if (cpg == NULL) {
3668 printf("%s: unable to allocate space for raid config page 0\n",
3669 DEVNAME(sc));
3670 return (ENOMEM);
3671 }
3672
3673 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3674 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3675 printf("%s: unable to fetch raid config page 0\n",
3676 DEVNAME(sc));
3677 free(cpg, M_TEMP);
3678 return (EINVAL);
3679 }
3680
3681 el = (struct mpii_raid_config_element *)(cpg + 1);
3682 for (i = 0; i < cpg->num_elements; i++, el++) {
3683 if (ISSET(le16toh(el->element_flags),
3684 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3685 el->hot_spare_pool == hsmap) {
3686 /*
3687 * diskid comparison is based on the idea that all
3688 * disks are counted by the bio(4) in sequence, thus
3689 * substracting the number of disks in the volume
3690 * from the diskid yields us a "relative" hotspare
3691 * number, which is good enough for us.
3692 */
3693 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3694 u_int8_t dn = el->phys_disk_num;
3695
3696 free(cpg, M_TEMP);
3697 return (mpii_bio_disk(sc, bd, dn));
3698 }
3699 nhs++;
3700 }
3701 }
3702
3703 if (hscnt)
3704 *hscnt = nhs;
3705
3706 free(cpg, M_TEMP);
3707 return (0);
3708 }
3709
3710 static int
3711 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3712 {
3713 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3714 struct mpii_cfg_hdr hdr;
3715 struct mpii_device *dev;
3716 int len;
3717
3718 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3719 bd->bd_diskid);
3720
3721 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_ZERO);
3722 if (ppg == NULL) {
3723 printf("%s: unable to allocate space for raid physical disk "
3724 "page 0\n", DEVNAME(sc));
3725 return (ENOMEM);
3726 }
3727
3728 hdr.page_version = 0;
3729 hdr.page_length = sizeof(*ppg) / 4;
3730 hdr.page_number = 0;
3731 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3732
3733 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3734 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3735 printf("%s: unable to fetch raid drive page 0\n",
3736 DEVNAME(sc));
3737 free(ppg, M_TEMP);
3738 return (EINVAL);
3739 }
3740
3741 bd->bd_target = ppg->phys_disk_num;
3742
3743 mutex_enter(&sc->sc_devs_mtx);
3744 if ((dev = mpii_find_dev(sc, le16toh(ppg->dev_handle))) == NULL) {
3745 mutex_exit(&sc->sc_devs_mtx);
3746 bd->bd_status = BIOC_SDINVALID;
3747 free(ppg, M_TEMP);
3748 return (0);
3749 }
3750 mutex_exit(&sc->sc_devs_mtx);
3751
3752 switch (ppg->phys_disk_state) {
3753 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3754 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3755 bd->bd_status = BIOC_SDONLINE;
3756 break;
3757 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3758 if (ppg->offline_reason ==
3759 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3760 ppg->offline_reason ==
3761 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3762 bd->bd_status = BIOC_SDFAILED;
3763 else
3764 bd->bd_status = BIOC_SDOFFLINE;
3765 break;
3766 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3767 bd->bd_status = BIOC_SDFAILED;
3768 break;
3769 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3770 bd->bd_status = BIOC_SDREBUILD;
3771 break;
3772 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3773 bd->bd_status = BIOC_SDHOTSPARE;
3774 break;
3775 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3776 bd->bd_status = BIOC_SDUNUSED;
3777 break;
3778 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3779 default:
3780 bd->bd_status = BIOC_SDINVALID;
3781 break;
3782 }
3783
3784 bd->bd_size = le64toh(ppg->dev_max_lba) * le16toh(ppg->block_size);
3785
3786 strnvisx(bd->bd_vendor, sizeof(bd->bd_vendor),
3787 ppg->vendor_id, sizeof(ppg->vendor_id),
3788 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3789 len = strlen(bd->bd_vendor);
3790 bd->bd_vendor[len] = ' ';
3791 strnvisx(&bd->bd_vendor[len + 1], sizeof(ppg->vendor_id) - len - 1,
3792 ppg->product_id, sizeof(ppg->product_id),
3793 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3794 strnvisx(bd->bd_serial, sizeof(bd->bd_serial),
3795 ppg->serial, sizeof(ppg->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3796
3797 free(ppg, M_TEMP);
3798 return (0);
3799 }
3800
3801 static struct mpii_device *
3802 mpii_find_vol(struct mpii_softc *sc, int volid)
3803 {
3804 struct mpii_device *dev = NULL;
3805
3806 KASSERT(mutex_owned(&sc->sc_devs_mtx));
3807 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3808 return (NULL);
3809 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3810 if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3811 return (dev);
3812 return (NULL);
3813 }
3814
3815 /*
3816 * Non-sleeping lightweight version of the mpii_ioctl_vol
3817 */
3818 static int
3819 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3820 {
3821 struct mpii_cfg_raid_vol_pg0 *vpg;
3822 struct mpii_cfg_hdr hdr;
3823 struct mpii_device *dev = NULL;
3824 size_t pagelen;
3825 u_int16_t volh;
3826
3827 mutex_enter(&sc->sc_devs_mtx);
3828 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) {
3829 mutex_exit(&sc->sc_devs_mtx);
3830 return (ENODEV);
3831 }
3832 volh = dev->dev_handle;
3833 mutex_exit(&sc->sc_devs_mtx);
3834
3835 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3836 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3837 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3838 "volume page 0\n", DEVNAME(sc));
3839 return (EINVAL);
3840 }
3841
3842 pagelen = hdr.page_length * 4;
3843 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3844 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3845 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3846 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3847 "page 0\n", DEVNAME(sc));
3848 free(vpg, M_TEMP);
3849 return (EINVAL);
3850 }
3851
3852 switch (vpg->volume_state) {
3853 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3854 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3855 bv->bv_status = BIOC_SVONLINE;
3856 break;
3857 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3858 if (ISSET(le32toh(vpg->volume_status),
3859 MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3860 bv->bv_status = BIOC_SVREBUILD;
3861 else
3862 bv->bv_status = BIOC_SVDEGRADED;
3863 break;
3864 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3865 bv->bv_status = BIOC_SVOFFLINE;
3866 break;
3867 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3868 bv->bv_status = BIOC_SVBUILDING;
3869 break;
3870 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3871 default:
3872 bv->bv_status = BIOC_SVINVALID;
3873 break;
3874 }
3875
3876 free(vpg, M_TEMP);
3877 return (0);
3878 }
3879
3880 static int
3881 mpii_create_sensors(struct mpii_softc *sc)
3882 {
3883 int i, rv;
3884
3885 DNPRINTF(MPII_D_MISC, "%s: mpii_create_sensors(%d)\n",
3886 DEVNAME(sc), sc->sc_max_volumes);
3887 sc->sc_sme = sysmon_envsys_create();
3888 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_max_volumes,
3889 M_DEVBUF, M_WAITOK | M_ZERO);
3890
3891 for (i = 0; i < sc->sc_max_volumes; i++) {
3892 sc->sc_sensors[i].units = ENVSYS_DRIVE;
3893 sc->sc_sensors[i].state = ENVSYS_SINVALID;
3894 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
3895 sc->sc_sensors[i].flags |= ENVSYS_FMONSTCHANGED;
3896
3897 /* logical drives */
3898 snprintf(sc->sc_sensors[i].desc,
3899 sizeof(sc->sc_sensors[i].desc), "%s:%d",
3900 DEVNAME(sc), i);
3901 if ((rv = sysmon_envsys_sensor_attach(sc->sc_sme,
3902 &sc->sc_sensors[i])) != 0) {
3903 aprint_error_dev(sc->sc_dev,
3904 "unable to attach sensor (rv = %d)\n", rv);
3905 goto out;
3906 }
3907 }
3908 sc->sc_sme->sme_name = DEVNAME(sc);
3909 sc->sc_sme->sme_cookie = sc;
3910 sc->sc_sme->sme_refresh = mpii_refresh_sensors;
3911
3912 rv = sysmon_envsys_register(sc->sc_sme);
3913 if (rv != 0) {
3914 aprint_error_dev(sc->sc_dev,
3915 "unable to register with sysmon (rv = %d)\n", rv);
3916 goto out;
3917 }
3918 return 0;
3919
3920 out:
3921 free(sc->sc_sensors, M_DEVBUF);
3922 sysmon_envsys_destroy(sc->sc_sme);
3923 sc->sc_sme = NULL;
3924 return 1;
3925 }
3926
3927 static int
3928 mpii_destroy_sensors(struct mpii_softc *sc)
3929 {
3930 if (sc->sc_sme == NULL)
3931 return 0;
3932 sysmon_envsys_unregister(sc->sc_sme);
3933 sc->sc_sme = NULL;
3934 free(sc->sc_sensors, M_DEVBUF);
3935 return 0;
3936
3937 }
3938
3939 static void
3940 mpii_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
3941 {
3942 struct mpii_softc *sc = sme->sme_cookie;
3943 struct bioc_vol bv;
3944
3945 memset(&bv, 0, sizeof(bv));
3946 bv.bv_volid = edata->sensor;
3947 if (mpii_bio_volstate(sc, &bv))
3948 bv.bv_status = BIOC_SVINVALID;
3949 bio_vol_to_envsys(edata, &bv);
3950 }
3951 #endif /* NBIO > 0 */
3952