mpii.c revision 1.13 1 /* $NetBSD: mpii.c,v 1.13 2018/11/24 18:11:22 bouyer Exp $ */
2 /* OpenBSD: mpii.c,v 1.51 2012/04/11 13:29:14 naddy Exp */
3 /*
4 * Copyright (c) 2010 Mike Belopuhov <mkb (at) crypt.org.ru>
5 * Copyright (c) 2009 James Giannoules
6 * Copyright (c) 2005 - 2010 David Gwynne <dlg (at) openbsd.org>
7 * Copyright (c) 2005 - 2010 Marco Peereboom <marco (at) openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: mpii.c,v 1.13 2018/11/24 18:11:22 bouyer Exp $");
24
25 #include "bio.h"
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/device.h>
31 #include <sys/ioctl.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/mutex.h>
35 #include <sys/condvar.h>
36 #include <sys/dkio.h>
37 #include <sys/tree.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #include <dev/pci/mpiireg.h>
48
49 #if NBIO > 0
50 #include <dev/biovar.h>
51 #include <dev/sysmon/sysmonvar.h>
52 #include <sys/envsys.h>
53 #endif
54
55 /* #define MPII_DEBUG */
56 #ifdef MPII_DEBUG
57 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
58 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
59 #define MPII_D_CMD (0x0001)
60 #define MPII_D_INTR (0x0002)
61 #define MPII_D_MISC (0x0004)
62 #define MPII_D_DMA (0x0008)
63 #define MPII_D_IOCTL (0x0010)
64 #define MPII_D_RW (0x0020)
65 #define MPII_D_MEM (0x0040)
66 #define MPII_D_CCB (0x0080)
67 #define MPII_D_PPR (0x0100)
68 #define MPII_D_RAID (0x0200)
69 #define MPII_D_EVT (0x0400)
70 #define MPII_D_CFG (0x0800)
71 #define MPII_D_MAP (0x1000)
72 #if 0
73 u_int32_t mpii_debug = 0
74 | MPII_D_CMD
75 | MPII_D_INTR
76 | MPII_D_MISC
77 | MPII_D_DMA
78 | MPII_D_IOCTL
79 | MPII_D_RW
80 | MPII_D_MEM
81 | MPII_D_CCB
82 | MPII_D_PPR
83 | MPII_D_RAID
84 | MPII_D_EVT
85 | MPII_D_CFG
86 | MPII_D_MAP
87 ;
88 #endif
89 u_int32_t mpii_debug = MPII_D_MISC;
90 #else
91 #define DPRINTF(x...)
92 #define DNPRINTF(n,x...)
93 #endif
94
95 #define MPII_REQUEST_SIZE (512)
96 #define MPII_REPLY_SIZE (128)
97 #define MPII_REPLY_COUNT PAGE_SIZE / MPII_REPLY_SIZE
98
99 /*
100 * this is the max number of sge's we can stuff in a request frame:
101 * sizeof(scsi_io) + sizeof(sense) + sizeof(sge) * 32 = MPII_REQUEST_SIZE
102 */
103 #define MPII_MAX_SGL (32)
104
105 #define MPII_MAX_REQUEST_CREDIT (128)
106
107 #define MPII_MAXFER MAXPHYS /* XXX bogus */
108
109 struct mpii_dmamem {
110 bus_dmamap_t mdm_map;
111 bus_dma_segment_t mdm_seg;
112 size_t mdm_size;
113 void *mdm_kva;
114 };
115 #define MPII_DMA_MAP(_mdm) (_mdm)->mdm_map
116 #define MPII_DMA_DVA(_mdm) (_mdm)->mdm_map->dm_segs[0].ds_addr
117 #define MPII_DMA_KVA(_mdm) (void *)(_mdm)->mdm_kva
118
119 struct mpii_ccb_bundle {
120 struct mpii_msg_scsi_io mcb_io; /* sgl must follow */
121 struct mpii_sge mcb_sgl[MPII_MAX_SGL];
122 struct scsi_sense_data mcb_sense;
123 } __packed;
124
125 struct mpii_softc;
126
127 struct mpii_rcb {
128 union {
129 struct work rcb_wk; /* has to be first in struct */
130 SIMPLEQ_ENTRY(mpii_rcb) rcb_link;
131 } u;
132 void *rcb_reply;
133 u_int32_t rcb_reply_dva;
134 };
135
136 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
137
138 struct mpii_device {
139 int flags;
140 #define MPII_DF_ATTACH (0x0001)
141 #define MPII_DF_DETACH (0x0002)
142 #define MPII_DF_HIDDEN (0x0004)
143 #define MPII_DF_UNUSED (0x0008)
144 #define MPII_DF_VOLUME (0x0010)
145 #define MPII_DF_VOLUME_DISK (0x0020)
146 #define MPII_DF_HOT_SPARE (0x0040)
147 short slot;
148 short percent;
149 u_int16_t dev_handle;
150 u_int16_t enclosure;
151 u_int16_t expander;
152 u_int8_t phy_num;
153 u_int8_t physical_port;
154 };
155
156 struct mpii_ccb {
157 union {
158 struct work ccb_wk; /* has to be first in struct */
159 SIMPLEQ_ENTRY(mpii_ccb) ccb_link;
160 } u;
161 struct mpii_softc *ccb_sc;
162 int ccb_smid;
163
164 void * ccb_cookie;
165 bus_dmamap_t ccb_dmamap;
166
167 bus_addr_t ccb_offset;
168 void *ccb_cmd;
169 bus_addr_t ccb_cmd_dva;
170 u_int16_t ccb_dev_handle;
171
172 volatile enum {
173 MPII_CCB_FREE,
174 MPII_CCB_READY,
175 MPII_CCB_QUEUED,
176 MPII_CCB_TIMEOUT
177 } ccb_state;
178
179 void (*ccb_done)(struct mpii_ccb *);
180 struct mpii_rcb *ccb_rcb;
181
182 };
183
184 struct mpii_ccb_wait {
185 kmutex_t mpii_ccbw_mtx;
186 kcondvar_t mpii_ccbw_cv;
187 };
188
189 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
190
191 struct mpii_softc {
192 device_t sc_dev;
193
194 pci_chipset_tag_t sc_pc;
195 pcitag_t sc_tag;
196
197 void *sc_ih;
198
199 int sc_flags;
200 #define MPII_F_RAID (1<<1)
201
202 struct scsipi_adapter sc_adapt;
203 struct scsipi_channel sc_chan;
204 device_t sc_child; /* our scsibus */
205
206 struct mpii_device **sc_devs;
207
208 bus_space_tag_t sc_iot;
209 bus_space_handle_t sc_ioh;
210 bus_size_t sc_ios;
211 bus_dma_tag_t sc_dmat;
212
213 kmutex_t sc_req_mtx;
214 kmutex_t sc_rep_mtx;
215
216 u_int8_t sc_porttype;
217 int sc_request_depth;
218 int sc_num_reply_frames;
219 int sc_reply_free_qdepth;
220 int sc_reply_post_qdepth;
221 int sc_maxchdepth;
222 int sc_first_sgl_len;
223 int sc_chain_len;
224 int sc_max_sgl_len;
225
226 u_int8_t sc_ioc_event_replay;
227 u_int16_t sc_max_enclosures;
228 u_int16_t sc_max_expanders;
229 u_int8_t sc_max_volumes;
230 u_int16_t sc_max_devices;
231 u_int16_t sc_max_dpm_entries;
232 u_int16_t sc_vd_count;
233 u_int16_t sc_vd_id_low;
234 u_int16_t sc_pd_id_start;
235 u_int8_t sc_num_channels;
236 int sc_ioc_number;
237 u_int8_t sc_vf_id;
238 u_int8_t sc_num_ports;
239
240 struct mpii_ccb *sc_ccbs;
241 struct mpii_ccb_list sc_ccb_free;
242 kmutex_t sc_ccb_free_mtx;
243 kcondvar_t sc_ccb_free_cv;
244
245 kmutex_t sc_ccb_mtx;
246 /*
247 * this protects the ccb state and list entry
248 * between mpii_scsi_cmd and scsidone.
249 */
250
251 struct workqueue *sc_ssb_tmowk;
252
253 struct mpii_dmamem *sc_requests;
254
255 struct mpii_dmamem *sc_replies;
256 struct mpii_rcb *sc_rcbs;
257
258 struct mpii_dmamem *sc_reply_postq;
259 struct mpii_reply_descr *sc_reply_postq_kva;
260 int sc_reply_post_host_index;
261
262 struct mpii_dmamem *sc_reply_freeq;
263 int sc_reply_free_host_index;
264
265 struct workqueue *sc_ssb_evt_ackwk;
266
267 struct sysmon_envsys *sc_sme;
268 envsys_data_t *sc_sensors;
269 };
270
271 static int mpii_match(device_t, cfdata_t, void *);
272 static void mpii_attach(device_t, device_t, void *);
273 static int mpii_detach(device_t, int);
274 static void mpii_childdetached(device_t, device_t);
275 static int mpii_rescan(device_t, const char *, const int *);
276
277 static int mpii_intr(void *);
278
279 CFATTACH_DECL3_NEW(mpii, sizeof(struct mpii_softc),
280 mpii_match, mpii_attach, mpii_detach, NULL, mpii_rescan,
281 mpii_childdetached, DVF_DETACH_SHUTDOWN);
282
283 #define PREAD(s, r) pci_conf_read((s)->sc_pc, (s)->sc_tag, (r))
284 #define PWRITE(s, r, v) pci_conf_write((s)->sc_pc, (s)->sc_tag, (r), (v))
285
286 static void mpii_scsipi_request(struct scsipi_channel *,
287 scsipi_adapter_req_t, void *);
288 static void mpii_scsi_cmd_done(struct mpii_ccb *);
289 static void mpii_minphys(struct buf *bp);
290
291 static struct mpii_dmamem *mpii_dmamem_alloc(struct mpii_softc *, size_t);
292 static void mpii_dmamem_free(struct mpii_softc *, struct mpii_dmamem *);
293 static int mpii_alloc_ccbs(struct mpii_softc *);
294 static struct mpii_ccb *mpii_get_ccb(struct mpii_softc *, int);
295 #define MPII_NOSLEEP 0x0001
296 static void mpii_put_ccb(struct mpii_softc *, struct mpii_ccb *);
297 static int mpii_alloc_replies(struct mpii_softc *);
298 static int mpii_alloc_queues(struct mpii_softc *);
299 static void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
300 static void mpii_push_replies(struct mpii_softc *);
301
302 static void mpii_scsi_cmd_tmo(void *);
303 static void mpii_scsi_cmd_tmo_handler(struct work *, void *);
304 static void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
305
306 static int mpii_alloc_dev(struct mpii_softc *);
307 static int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
308 static int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
309 static struct mpii_device *mpii_find_dev(struct mpii_softc *, u_int16_t);
310
311 static void mpii_start(struct mpii_softc *, struct mpii_ccb *);
312 static int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
313 static void mpii_poll_done(struct mpii_ccb *);
314 static struct mpii_rcb *mpii_reply(struct mpii_softc *,
315 struct mpii_reply_descr *);
316
317 static void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
318 static void mpii_wait_done(struct mpii_ccb *);
319
320 static void mpii_init_queues(struct mpii_softc *);
321
322 static int mpii_load_xs(struct mpii_ccb *);
323
324 static u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
325 static void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
326 static int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
327 u_int32_t);
328 static int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
329 u_int32_t);
330
331 static int mpii_init(struct mpii_softc *);
332 static int mpii_reset_soft(struct mpii_softc *);
333 static int mpii_reset_hard(struct mpii_softc *);
334
335 static int mpii_handshake_send(struct mpii_softc *, void *, size_t);
336 static int mpii_handshake_recv_dword(struct mpii_softc *,
337 u_int32_t *);
338 static int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
339
340 static void mpii_empty_done(struct mpii_ccb *);
341
342 static int mpii_iocinit(struct mpii_softc *);
343 static int mpii_iocfacts(struct mpii_softc *);
344 static int mpii_portfacts(struct mpii_softc *);
345 static int mpii_portenable(struct mpii_softc *);
346 static int mpii_cfg_coalescing(struct mpii_softc *);
347
348 static int mpii_eventnotify(struct mpii_softc *);
349 static void mpii_eventnotify_done(struct mpii_ccb *);
350 static void mpii_eventack(struct work *, void *);
351 static void mpii_eventack_done(struct mpii_ccb *);
352 static void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
353 static void mpii_event_sas(struct mpii_softc *,
354 struct mpii_msg_event_reply *);
355 static void mpii_event_raid(struct mpii_softc *,
356 struct mpii_msg_event_reply *);
357 static void mpii_event_defer(void *, void *);
358
359 static void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
360
361 static int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
362 u_int8_t, u_int32_t, int, void *);
363 static int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
364 void *, int, void *, size_t);
365
366 static int mpii_get_ioc_pg8(struct mpii_softc *);
367
368 #if 0
369 static int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
370 #endif
371 static int mpii_cache_enable(struct mpii_softc *, struct mpii_device *);
372
373 #if NBIO > 0
374 static int mpii_ioctl(device_t, u_long, void *);
375 static int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
376 static int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
377 static int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
378 static int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
379 int, int *);
380 static int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
381 u_int8_t);
382 static struct mpii_device *mpii_find_vol(struct mpii_softc *, int);
383 static int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
384 static int mpii_create_sensors(struct mpii_softc *);
385 static int mpii_destroy_sensors(struct mpii_softc *);
386 static void mpii_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
387 #endif /* NBIO > 0 */
388
389 #define DEVNAME(_s) (device_xname((_s)->sc_dev))
390
391 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
392 #define dwordn(p, n) (((u_int32_t *)(p))[(n)])
393
394 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL)
395 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v))
396 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS)
397 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v))
398 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
399 == MPII_INTR_STATUS_REPLY)
400
401 #define mpii_read_reply_free(s) mpii_read((s), \
402 MPII_REPLY_FREE_HOST_INDEX)
403 #define mpii_write_reply_free(s, v) mpii_write((s), \
404 MPII_REPLY_FREE_HOST_INDEX, (v))
405 #define mpii_read_reply_post(s) mpii_read((s), \
406 MPII_REPLY_POST_HOST_INDEX)
407 #define mpii_write_reply_post(s, v) mpii_write((s), \
408 MPII_REPLY_POST_HOST_INDEX, (v))
409
410 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \
411 MPII_INTR_STATUS_IOC2SYSDB, 0)
412 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \
413 MPII_INTR_STATUS_SYS2IOCDB, 0)
414
415 #define MPII_PG_EXTENDED (1<<0)
416 #define MPII_PG_POLL (1<<1)
417 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED"
418
419 #define mpii_cfg_header(_s, _t, _n, _a, _h) \
420 mpii_req_cfg_header((_s), (_t), (_n), (_a), \
421 MPII_PG_POLL, (_h))
422 #define mpii_ecfg_header(_s, _t, _n, _a, _h) \
423 mpii_req_cfg_header((_s), (_t), (_n), (_a), \
424 MPII_PG_POLL|MPII_PG_EXTENDED, (_h))
425
426 #define mpii_cfg_page(_s, _a, _h, _r, _p, _l) \
427 mpii_req_cfg_page((_s), (_a), MPII_PG_POLL, \
428 (_h), (_r), (_p), (_l))
429 #define mpii_ecfg_page(_s, _a, _h, _r, _p, _l) \
430 mpii_req_cfg_page((_s), (_a), MPII_PG_POLL|MPII_PG_EXTENDED, \
431 (_h), (_r), (_p), (_l))
432
433
434 static const struct mpii_pci_product {
435 pci_vendor_id_t mpii_vendor;
436 pci_product_id_t mpii_product;
437 } mpii_devices[] = {
438 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 },
439 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 },
440 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 },
441 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 },
442 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 },
443 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 },
444 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 },
445 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 },
446 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 },
447 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 },
448 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 },
449 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 },
450 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 },
451 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 },
452 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 },
453 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 },
454 { 0, 0 }
455 };
456
457 static int
458 mpii_match(device_t parent, cfdata_t match, void *aux)
459 {
460 struct pci_attach_args *pa = aux;
461 const struct mpii_pci_product *mpii;
462
463 for (mpii = mpii_devices; mpii->mpii_vendor != 0; mpii++) {
464 if (PCI_VENDOR(pa->pa_id) == mpii->mpii_vendor &&
465 PCI_PRODUCT(pa->pa_id) == mpii->mpii_product)
466 return (1);
467 }
468 return (0);
469 }
470
471 static void
472 mpii_attach(device_t parent, device_t self, void *aux)
473 {
474 struct mpii_softc *sc = device_private(self);
475 struct pci_attach_args *pa = aux;
476 pcireg_t memtype;
477 int r;
478 pci_intr_handle_t ih;
479 const char *intrstr;
480 struct mpii_ccb *ccb;
481 struct scsipi_adapter *adapt = &sc->sc_adapt;
482 struct scsipi_channel *chan = &sc->sc_chan;
483 char wkname[15];
484 char intrbuf[PCI_INTRSTR_LEN];
485
486 pci_aprint_devinfo(pa, NULL);
487
488 sc->sc_pc = pa->pa_pc;
489 sc->sc_tag = pa->pa_tag;
490 sc->sc_dmat = pa->pa_dmat;
491 sc->sc_dev = self;
492
493 mutex_init(&sc->sc_req_mtx, MUTEX_DEFAULT, IPL_BIO);
494 mutex_init(&sc->sc_rep_mtx, MUTEX_DEFAULT, IPL_BIO);
495 mutex_init(&sc->sc_ccb_free_mtx, MUTEX_DEFAULT, IPL_BIO);
496 cv_init(&sc->sc_ccb_free_cv, "mpii_ccbs");
497 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
498
499 snprintf(wkname, sizeof(wkname), "%s_tmo", DEVNAME(sc));
500 if (workqueue_create(&sc->sc_ssb_tmowk, wkname,
501 mpii_scsi_cmd_tmo_handler, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
502 aprint_error_dev(self, "can't create %s workqueue\n", wkname);
503 return;
504 }
505
506 snprintf(wkname, sizeof(wkname), "%s_evt", DEVNAME(sc));
507 if (workqueue_create(&sc->sc_ssb_evt_ackwk, wkname,
508 mpii_eventack, sc, PRI_NONE, IPL_BIO, WQ_MPSAFE) != 0) {
509 aprint_error_dev(self, "can't create %s workqueue\n", wkname);
510 return;
511 }
512
513 /* find the appropriate memory base */
514 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
515 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
516 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
517 break;
518 }
519 if (r >= PCI_MAPREG_END) {
520 aprint_error_dev(self,
521 "unable to locate system interface registers\n");
522 return;
523 }
524
525 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
526 NULL, &sc->sc_ios) != 0) {
527 aprint_error_dev(self,
528 "unable to map system interface registers\n");
529 return;
530 }
531
532 /* disable the expansion rom */
533 PWRITE(sc, PCI_MAPREG_ROM,
534 PREAD(sc, PCI_MAPREG_ROM) & ~PCI_MAPREG_ROM_ENABLE);
535
536 /* disable interrupts */
537 mpii_write(sc, MPII_INTR_MASK,
538 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
539 MPII_INTR_MASK_DOORBELL);
540
541 /* hook up the interrupt */
542 if (pci_intr_map(pa, &ih) != 0) {
543 aprint_error_dev(self, "unable to map interrupt\n");
544 goto unmap;
545 }
546 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
547
548 if (mpii_init(sc) != 0) {
549 aprint_error_dev(self, "unable to initialize ioc\n");
550 goto unmap;
551 }
552
553 if (mpii_iocfacts(sc) != 0) {
554 aprint_error_dev(self, "unable to get iocfacts\n");
555 goto unmap;
556 }
557
558 if (mpii_alloc_ccbs(sc) != 0) {
559 /* error already printed */
560 goto unmap;
561 }
562
563 if (mpii_alloc_replies(sc) != 0) {
564 aprint_error_dev(self, "unable to allocated reply space\n");
565 goto free_ccbs;
566 }
567
568 if (mpii_alloc_queues(sc) != 0) {
569 aprint_error_dev(self, "unable to allocate reply queues\n");
570 goto free_replies;
571 }
572
573 if (mpii_iocinit(sc) != 0) {
574 aprint_error_dev(self, "unable to send iocinit\n");
575 goto free_queues;
576 }
577
578 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
579 MPII_DOORBELL_STATE_OPER) != 0) {
580 aprint_error_dev(self, "state: 0x%08x\n",
581 mpii_read_db(sc) & MPII_DOORBELL_STATE);
582 aprint_error_dev(self, "operational state timeout\n");
583 goto free_queues;
584 }
585
586 mpii_push_replies(sc);
587 mpii_init_queues(sc);
588
589 if (mpii_portfacts(sc) != 0) {
590 aprint_error_dev(self, "unable to get portfacts\n");
591 goto free_queues;
592 }
593
594 if (mpii_get_ioc_pg8(sc) != 0) {
595 aprint_error_dev(self, "unable to get ioc page 8\n");
596 goto free_queues;
597 }
598
599 if (mpii_cfg_coalescing(sc) != 0) {
600 aprint_error_dev(self, "unable to configure coalescing\n");
601 goto free_queues;
602 }
603
604 /* XXX bail on unsupported porttype? */
605 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
606 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL)) {
607 if (mpii_eventnotify(sc) != 0) {
608 aprint_error_dev(self, "unable to enable events\n");
609 goto free_queues;
610 }
611 }
612
613 if (mpii_alloc_dev(sc) != 0) {
614 aprint_error_dev(self,
615 "unable to allocate memory for mpii_device\n");
616 goto free_queues;
617 }
618
619 if (mpii_portenable(sc) != 0) {
620 aprint_error_dev(self, "unable to enable port\n");
621 goto free_dev;
622 }
623
624 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
625 mpii_intr, sc);
626 if (sc->sc_ih == NULL) {
627 aprint_error_dev(self, "can't establish interrupt");
628 if (intrstr)
629 aprint_error(" at %s", intrstr);
630 aprint_error("\n");
631 goto free_dev;
632 }
633
634 memset(adapt, 0, sizeof(*adapt));
635 adapt->adapt_dev = sc->sc_dev;
636 adapt->adapt_nchannels = 1;
637 adapt->adapt_openings = sc->sc_request_depth - 1;
638 adapt->adapt_max_periph = adapt->adapt_openings;
639 adapt->adapt_request = mpii_scsipi_request;
640 adapt->adapt_minphys = mpii_minphys;
641
642 memset(chan, 0, sizeof(*chan));
643 chan->chan_adapter = adapt;
644 chan->chan_bustype = &scsi_sas_bustype;
645 chan->chan_channel = 0;
646 chan->chan_flags = 0;
647 chan->chan_nluns = 8;
648 chan->chan_ntargets = sc->sc_max_devices;
649 chan->chan_id = -1;
650
651 mpii_rescan(self, "scsi", NULL);
652
653 /* enable interrupts */
654 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
655 | MPII_INTR_MASK_RESET);
656
657 #if NBIO > 0
658 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
659 if (bio_register(sc->sc_dev, mpii_ioctl) != 0)
660 panic("%s: controller registration failed",
661 DEVNAME(sc));
662
663 if (mpii_create_sensors(sc) != 0)
664 aprint_error_dev(self, "unable to create sensors\n");
665 }
666 #endif
667
668 return;
669
670 free_dev:
671 if (sc->sc_devs)
672 free(sc->sc_devs, M_DEVBUF);
673
674 free_queues:
675 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
676 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
677 mpii_dmamem_free(sc, sc->sc_reply_freeq);
678
679 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
680 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
681 mpii_dmamem_free(sc, sc->sc_reply_postq);
682
683 free_replies:
684 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
685 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
686 mpii_dmamem_free(sc, sc->sc_replies);
687
688 free_ccbs:
689 while ((ccb = mpii_get_ccb(sc, MPII_NOSLEEP)) != NULL)
690 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
691 mpii_dmamem_free(sc, sc->sc_requests);
692 free(sc->sc_ccbs, M_DEVBUF);
693
694 unmap:
695 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
696 sc->sc_ios = 0;
697 }
698
699 static int
700 mpii_detach(device_t self, int flags)
701 {
702 struct mpii_softc *sc = device_private(self);
703 int error;
704 struct mpii_ccb *ccb;
705
706 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
707 return error;
708
709 #if NBIO > 0
710 mpii_destroy_sensors(sc);
711 bio_unregister(sc->sc_dev);
712 #endif /* NBIO > 0 */
713
714 if (sc->sc_ih != NULL) {
715 if (sc->sc_devs)
716 free(sc->sc_devs, M_DEVBUF);
717
718 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
719 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
720 mpii_dmamem_free(sc, sc->sc_reply_freeq);
721
722 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
723 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
724 mpii_dmamem_free(sc, sc->sc_reply_postq);
725
726 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
727 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
728 mpii_dmamem_free(sc, sc->sc_replies);
729
730 while ((ccb = mpii_get_ccb(sc, MPII_NOSLEEP)) != NULL)
731 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
732 mpii_dmamem_free(sc, sc->sc_requests);
733 free(sc->sc_ccbs, M_DEVBUF);
734
735 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
736 sc->sc_ih = NULL;
737 }
738 if (sc->sc_ios != 0) {
739 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
740 sc->sc_ios = 0;
741 }
742
743 return (0);
744 }
745
746 static int
747 mpii_rescan(device_t self, const char *ifattr, const int *locators)
748 {
749 struct mpii_softc *sc = device_private(self);
750
751 if (sc->sc_child != NULL)
752 return 0;
753
754 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
755 scsiprint, NULL);
756
757 return 0;
758 }
759
760 static void
761 mpii_childdetached(device_t self, device_t child)
762 {
763 struct mpii_softc *sc = device_private(self);
764
765 KASSERT(self == sc->sc_dev);
766 KASSERT(child == sc->sc_child);
767
768 if (child == sc->sc_child)
769 sc->sc_child = NULL;
770 }
771
772 static int
773 mpii_intr(void *arg)
774 {
775 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts);
776 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
777 struct mpii_softc *sc = arg;
778 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
779 struct mpii_ccb *ccb;
780 struct mpii_rcb *rcb;
781 int smid;
782 int rv = 0;
783
784 mutex_enter(&sc->sc_rep_mtx);
785 bus_dmamap_sync(sc->sc_dmat,
786 MPII_DMA_MAP(sc->sc_reply_postq),
787 0, 8 * sc->sc_reply_post_qdepth,
788 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
789
790 for (;;) {
791 rdp = &postq[sc->sc_reply_post_host_index];
792 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
793 MPII_REPLY_DESCR_UNUSED)
794 break;
795 if (rdp->data == 0xffffffff) {
796 /*
797 * ioc is still writing to the reply post queue
798 * race condition - bail!
799 */
800 break;
801 }
802
803 smid = le16toh(rdp->smid);
804 rcb = mpii_reply(sc, rdp);
805
806 if (smid) {
807 ccb = &sc->sc_ccbs[smid - 1];
808 ccb->ccb_state = MPII_CCB_READY;
809 ccb->ccb_rcb = rcb;
810 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, u.ccb_link);
811 } else
812 SIMPLEQ_INSERT_TAIL(&evts, rcb, u.rcb_link);
813
814 sc->sc_reply_post_host_index++;
815 sc->sc_reply_post_host_index %= sc->sc_reply_post_qdepth;
816 rv = 1;
817 }
818
819 bus_dmamap_sync(sc->sc_dmat,
820 MPII_DMA_MAP(sc->sc_reply_postq),
821 0, 8 * sc->sc_reply_post_qdepth,
822 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
823
824 if (rv)
825 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
826
827 mutex_exit(&sc->sc_rep_mtx);
828
829 if (rv == 0)
830 return (0);
831
832 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
833 SIMPLEQ_REMOVE_HEAD(&ccbs, u.ccb_link);
834 ccb->ccb_done(ccb);
835 }
836 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
837 SIMPLEQ_REMOVE_HEAD(&evts, u.rcb_link);
838 mpii_event_process(sc, rcb);
839 }
840
841 return (1);
842 }
843
844 static int
845 mpii_load_xs(struct mpii_ccb *ccb)
846 {
847 struct mpii_softc *sc = ccb->ccb_sc;
848 struct scsipi_xfer *xs = ccb->ccb_cookie;
849 struct mpii_ccb_bundle *mcb = ccb->ccb_cmd;
850 struct mpii_msg_scsi_io *io = &mcb->mcb_io;
851 struct mpii_sge *sge = NULL, *nsge = &mcb->mcb_sgl[0];
852 struct mpii_sge *ce = NULL, *nce = NULL;
853 u_int64_t ce_dva;
854 bus_dmamap_t dmap = ccb->ccb_dmamap;
855 u_int32_t addr, flags;
856 int i, error;
857
858 /* zero length transfer still requires an SGE */
859 if (xs->datalen == 0) {
860 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
861 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
862 return (0);
863 }
864
865 error = bus_dmamap_load(sc->sc_dmat, dmap,
866 xs->data, xs->datalen, NULL, (xs->xs_control & XS_CTL_NOSLEEP) ?
867 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
868 if (error) {
869 aprint_error_dev(sc->sc_dev, "error %d loading dmamap\n",
870 error);
871 return (1);
872 }
873
874 /* safe default staring flags */
875 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
876 /* if data out */
877 if (xs->xs_control & XS_CTL_DATA_OUT)
878 flags |= MPII_SGE_FL_DIR_OUT;
879
880 /* we will have to exceed the SGEs we can cram into the request frame */
881 if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
882 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
883 io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4;
884 }
885
886 for (i = 0; i < dmap->dm_nsegs; i++) {
887 if (nsge == ce) {
888 nsge++;
889 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST);
890
891 DNPRINTF(MPII_D_DMA, "%s: - 0x%08x 0x%08x 0x%08x\n",
892 DEVNAME(sc), sge->sg_hdr,
893 sge->sg_hi_addr, sge->sg_lo_addr);
894
895 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
896 nce = &nsge[sc->sc_chain_len - 1];
897 addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4;
898 addr = addr << 16 |
899 sizeof(struct mpii_sge) * sc->sc_chain_len;
900 } else {
901 nce = NULL;
902 addr = sizeof(struct mpii_sge) *
903 (dmap->dm_nsegs - i);
904 }
905
906 ce->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN |
907 MPII_SGE_FL_SIZE_64 | addr);
908
909 ce_dva = ccb->ccb_cmd_dva +
910 ((u_int8_t *)nsge - (u_int8_t *)mcb);
911
912 addr = (u_int32_t)(ce_dva >> 32);
913 ce->sg_hi_addr = htole32(addr);
914 addr = (u_int32_t)ce_dva;
915 ce->sg_lo_addr = htole32(addr);
916
917 DNPRINTF(MPII_D_DMA, "%s: ce: 0x%08x 0x%08x 0x%08x\n",
918 DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr,
919 ce->sg_lo_addr);
920
921 ce = nce;
922 }
923
924 DNPRINTF(MPII_D_DMA, "%s: %d: %" PRId64 " 0x%016" PRIx64 "\n",
925 DEVNAME(sc), i, (int64_t)dmap->dm_segs[i].ds_len,
926 (u_int64_t)dmap->dm_segs[i].ds_addr);
927
928 sge = nsge;
929
930 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
931 addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32);
932 sge->sg_hi_addr = htole32(addr);
933 addr = (u_int32_t)dmap->dm_segs[i].ds_addr;
934 sge->sg_lo_addr = htole32(addr);
935
936 DNPRINTF(MPII_D_DMA, "%s: %d: 0x%08x 0x%08x 0x%08x\n",
937 DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr,
938 sge->sg_lo_addr);
939
940 nsge = sge + 1;
941 }
942
943 /* terminate list */
944 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
945 MPII_SGE_FL_EOL);
946
947 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
948 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
949 BUS_DMASYNC_PREWRITE);
950
951 return (0);
952 }
953
954 static u_int32_t
955 mpii_read(struct mpii_softc *sc, bus_size_t r)
956 {
957 u_int32_t rv;
958
959 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
960 BUS_SPACE_BARRIER_READ);
961 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
962
963 DNPRINTF(MPII_D_RW, "%s: mpii_read %#" PRIx64 " %#x\n", DEVNAME(sc),
964 (uint64_t)r, rv);
965
966 return (rv);
967 }
968
969 static void
970 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
971 {
972 DNPRINTF(MPII_D_RW, "%s: mpii_write %#" PRIx64 " %#x\n", DEVNAME(sc),
973 (uint64_t)r, v);
974
975 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
976 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
977 BUS_SPACE_BARRIER_WRITE);
978 }
979
980
981 static int
982 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
983 u_int32_t target)
984 {
985 int i;
986
987 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#" PRIx64 " %#x %#x\n",
988 DEVNAME(sc), (uint64_t)r, mask, target);
989
990 for (i = 0; i < 15000; i++) {
991 if ((mpii_read(sc, r) & mask) == target)
992 return (0);
993 delay(1000);
994 }
995
996 return (1);
997 }
998
999 static int
1000 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1001 u_int32_t target)
1002 {
1003 int i;
1004
1005 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#" PRIx64 " %#x %#x\n",
1006 DEVNAME(sc), (uint64_t)r, mask, target);
1007
1008 for (i = 0; i < 15000; i++) {
1009 if ((mpii_read(sc, r) & mask) != target)
1010 return (0);
1011 delay(1000);
1012 }
1013
1014 return (1);
1015 }
1016
1017
1018 static int
1019 mpii_init(struct mpii_softc *sc)
1020 {
1021 u_int32_t db;
1022 int i;
1023
1024 /* spin until the ioc leaves the reset state */
1025 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1026 MPII_DOORBELL_STATE_RESET) != 0) {
1027 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1028 "reset state\n", DEVNAME(sc));
1029 return (1);
1030 }
1031
1032 /* check current ownership */
1033 db = mpii_read_db(sc);
1034 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1035 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1036 DEVNAME(sc));
1037 return (0);
1038 }
1039
1040 for (i = 0; i < 5; i++) {
1041 switch (db & MPII_DOORBELL_STATE) {
1042 case MPII_DOORBELL_STATE_READY:
1043 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1044 DEVNAME(sc));
1045 return (0);
1046
1047 case MPII_DOORBELL_STATE_OPER:
1048 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1049 DEVNAME(sc));
1050 if (sc->sc_ioc_event_replay)
1051 mpii_reset_soft(sc);
1052 else
1053 mpii_reset_hard(sc);
1054 break;
1055
1056 case MPII_DOORBELL_STATE_FAULT:
1057 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1058 "reset hard\n" , DEVNAME(sc));
1059 mpii_reset_hard(sc);
1060 break;
1061
1062 case MPII_DOORBELL_STATE_RESET:
1063 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1064 "out of reset\n", DEVNAME(sc));
1065 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1066 MPII_DOORBELL_STATE_RESET) != 0)
1067 return (1);
1068 break;
1069 }
1070 db = mpii_read_db(sc);
1071 }
1072
1073 return (1);
1074 }
1075
1076 static int
1077 mpii_reset_soft(struct mpii_softc *sc)
1078 {
1079 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1080
1081 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1082 return (1);
1083 }
1084
1085 mpii_write_db(sc,
1086 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1087
1088 /* XXX LSI waits 15 sec */
1089 if (mpii_wait_db_ack(sc) != 0)
1090 return (1);
1091
1092 /* XXX LSI waits 15 sec */
1093 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1094 MPII_DOORBELL_STATE_READY) != 0)
1095 return (1);
1096
1097 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1098
1099 return (0);
1100 }
1101
1102 static int
1103 mpii_reset_hard(struct mpii_softc *sc)
1104 {
1105 u_int16_t i;
1106
1107 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1108
1109 mpii_write_intr(sc, 0);
1110
1111 /* enable diagnostic register */
1112 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1113 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1114 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1115 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1116 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1117 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1118 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1119
1120 delay(100);
1121
1122 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1123 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1124 "diagnostic read/write\n", DEVNAME(sc));
1125 return(1);
1126 }
1127
1128 /* reset ioc */
1129 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1130
1131 /* 240 milliseconds */
1132 delay(240000);
1133
1134
1135 /* XXX this whole function should be more robust */
1136
1137 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1138 for (i = 0; i < 30000; i++) {
1139 if ((mpii_read(sc, MPII_HOSTDIAG) &
1140 MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1141 break;
1142 delay(10000);
1143 }
1144
1145 /* disable diagnostic register */
1146 mpii_write(sc, MPII_WRITESEQ, 0xff);
1147
1148 /* XXX what else? */
1149
1150 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1151
1152 return(0);
1153 }
1154
1155 static int
1156 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1157 {
1158 u_int32_t *query = buf;
1159 int i;
1160
1161 /* make sure the doorbell is not in use. */
1162 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1163 return (1);
1164
1165 /* clear pending doorbell interrupts */
1166 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1167 mpii_write_intr(sc, 0);
1168
1169 /*
1170 * first write the doorbell with the handshake function and the
1171 * dword count.
1172 */
1173 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1174 MPII_DOORBELL_DWORDS(dwords));
1175
1176 /*
1177 * the doorbell used bit will be set because a doorbell function has
1178 * started. wait for the interrupt and then ack it.
1179 */
1180 if (mpii_wait_db_int(sc) != 0)
1181 return (1);
1182 mpii_write_intr(sc, 0);
1183
1184 /* poll for the acknowledgement. */
1185 if (mpii_wait_db_ack(sc) != 0)
1186 return (1);
1187
1188 /* write the query through the doorbell. */
1189 for (i = 0; i < dwords; i++) {
1190 mpii_write_db(sc, htole32(query[i]));
1191 if (mpii_wait_db_ack(sc) != 0)
1192 return (1);
1193 }
1194
1195 return (0);
1196 }
1197
1198 static int
1199 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1200 {
1201 u_int16_t *words = (u_int16_t *)dword;
1202 int i;
1203
1204 for (i = 0; i < 2; i++) {
1205 if (mpii_wait_db_int(sc) != 0)
1206 return (1);
1207 words[i] = le16toh(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1208 mpii_write_intr(sc, 0);
1209 }
1210
1211 return (0);
1212 }
1213
1214 static int
1215 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1216 {
1217 struct mpii_msg_reply *reply = buf;
1218 u_int32_t *dbuf = buf, dummy;
1219 int i;
1220
1221 /* get the first dword so we can read the length out of the header. */
1222 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1223 return (1);
1224
1225 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %zd reply: %d\n",
1226 DEVNAME(sc), dwords, reply->msg_length);
1227
1228 /*
1229 * the total length, in dwords, is in the message length field of the
1230 * reply header.
1231 */
1232 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1233 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1234 return (1);
1235 }
1236
1237 /* if there's extra stuff to come off the ioc, discard it */
1238 while (i++ < reply->msg_length) {
1239 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1240 return (1);
1241 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1242 "0x%08x\n", DEVNAME(sc), dummy);
1243 }
1244
1245 /* wait for the doorbell used bit to be reset and clear the intr */
1246 if (mpii_wait_db_int(sc) != 0)
1247 return (1);
1248
1249 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1250 return (1);
1251
1252 mpii_write_intr(sc, 0);
1253
1254 return (0);
1255 }
1256
1257 static void
1258 mpii_empty_done(struct mpii_ccb *ccb)
1259 {
1260 /* nothing to do */
1261 }
1262
1263 static int
1264 mpii_iocfacts(struct mpii_softc *sc)
1265 {
1266 struct mpii_msg_iocfacts_request ifq;
1267 struct mpii_msg_iocfacts_reply ifp;
1268
1269 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1270
1271 bzero(&ifq, sizeof(ifq));
1272 bzero(&ifp, sizeof(ifp));
1273
1274 ifq.function = MPII_FUNCTION_IOC_FACTS;
1275
1276 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1277 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1278 DEVNAME(sc));
1279 return (1);
1280 }
1281
1282 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1283 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1284 DEVNAME(sc));
1285 return (1);
1286 }
1287
1288 DNPRINTF(MPII_D_MISC, "%s: func: 0x%02x length: %d msgver: %d.%d\n",
1289 DEVNAME(sc), ifp.function, ifp.msg_length,
1290 ifp.msg_version_maj, ifp.msg_version_min);
1291 DNPRINTF(MPII_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x "
1292 "headerver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1293 ifp.ioc_number, ifp.header_version_unit,
1294 ifp.header_version_dev);
1295 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
1296 ifp.vp_id, ifp.vf_id);
1297 DNPRINTF(MPII_D_MISC, "%s: iocstatus: 0x%04x ioexceptions: 0x%04x\n",
1298 DEVNAME(sc), le16toh(ifp.ioc_status),
1299 le16toh(ifp.ioc_exceptions));
1300 DNPRINTF(MPII_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc),
1301 le32toh(ifp.ioc_loginfo));
1302 DNPRINTF(MPII_D_MISC, "%s: numberofports: 0x%02x whoinit: 0x%02x "
1303 "maxchaindepth: %d\n", DEVNAME(sc), ifp.number_of_ports,
1304 ifp.whoinit, ifp.max_chain_depth);
1305 DNPRINTF(MPII_D_MISC, "%s: productid: 0x%04x requestcredit: 0x%04x\n",
1306 DEVNAME(sc), le16toh(ifp.product_id), le16toh(ifp.request_credit));
1307 DNPRINTF(MPII_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc),
1308 le32toh(ifp.ioc_capabilities));
1309 DNPRINTF(MPII_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x "
1310 "fw_version_dev: 0x%02x\n", DEVNAME(sc),
1311 ifp.fw_version_maj, ifp.fw_version_min,
1312 ifp.fw_version_unit, ifp.fw_version_dev);
1313 DNPRINTF(MPII_D_MISC, "%s: iocrequestframesize: 0x%04x\n",
1314 DEVNAME(sc), le16toh(ifp.ioc_request_frame_size));
1315 DNPRINTF(MPII_D_MISC, "%s: maxtargets: 0x%04x "
1316 "maxinitiators: 0x%04x\n", DEVNAME(sc),
1317 le16toh(ifp.max_targets), le16toh(ifp.max_initiators));
1318 DNPRINTF(MPII_D_MISC, "%s: maxenclosures: 0x%04x "
1319 "maxsasexpanders: 0x%04x\n", DEVNAME(sc),
1320 le16toh(ifp.max_enclosures), le16toh(ifp.max_sas_expanders));
1321 DNPRINTF(MPII_D_MISC, "%s: highprioritycredit: 0x%04x "
1322 "protocolflags: 0x%02x\n", DEVNAME(sc),
1323 le16toh(ifp.high_priority_credit), le16toh(ifp.protocol_flags));
1324 DNPRINTF(MPII_D_MISC, "%s: maxvolumes: 0x%02x replyframesize: 0x%02x "
1325 "mrdpqd: 0x%04x\n", DEVNAME(sc), ifp.max_volumes,
1326 ifp.reply_frame_size,
1327 le16toh(ifp.max_reply_descriptor_post_queue_depth));
1328 DNPRINTF(MPII_D_MISC, "%s: maxpersistententries: 0x%04x "
1329 "maxdevhandle: 0x%02x\n", DEVNAME(sc),
1330 le16toh(ifp.max_persistent_entries), le16toh(ifp.max_dev_handle));
1331
1332 sc->sc_maxchdepth = ifp.max_chain_depth;
1333 sc->sc_ioc_number = ifp.ioc_number;
1334 sc->sc_vf_id = ifp.vf_id;
1335
1336 sc->sc_num_ports = ifp.number_of_ports;
1337 sc->sc_ioc_event_replay = (le32toh(ifp.ioc_capabilities) &
1338 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY) ? 1 : 0;
1339 sc->sc_max_enclosures = le16toh(ifp.max_enclosures);
1340 sc->sc_max_expanders = le16toh(ifp.max_sas_expanders);
1341 sc->sc_max_volumes = ifp.max_volumes;
1342 sc->sc_max_devices = ifp.max_volumes + le16toh(ifp.max_targets);
1343 sc->sc_num_channels = 1;
1344
1345 if (ISSET(le32toh(ifp.ioc_capabilities),
1346 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1347 SET(sc->sc_flags, MPII_F_RAID);
1348
1349 sc->sc_request_depth = MIN(le16toh(ifp.request_credit),
1350 MPII_MAX_REQUEST_CREDIT);
1351
1352 /* should not be multiple of 16 */
1353 sc->sc_num_reply_frames = sc->sc_request_depth + 32;
1354 if (!(sc->sc_num_reply_frames % 16))
1355 sc->sc_num_reply_frames--;
1356
1357 /* must be multiple of 16 */
1358 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1359 (16 - (sc->sc_num_reply_frames % 16));
1360 sc->sc_reply_post_qdepth = ((sc->sc_request_depth +
1361 sc->sc_num_reply_frames + 1 + 15) / 16) * 16;
1362
1363 if (sc->sc_reply_post_qdepth >
1364 ifp.max_reply_descriptor_post_queue_depth)
1365 sc->sc_reply_post_qdepth =
1366 ifp.max_reply_descriptor_post_queue_depth;
1367
1368 DNPRINTF(MPII_D_MISC, "%s: sc_request_depth: %d "
1369 "sc_num_reply_frames: %d sc_reply_free_qdepth: %d "
1370 "sc_reply_post_qdepth: %d\n", DEVNAME(sc), sc->sc_request_depth,
1371 sc->sc_num_reply_frames, sc->sc_reply_free_qdepth,
1372 sc->sc_reply_post_qdepth);
1373
1374 /*
1375 * you can fit sg elements on the end of the io cmd if they fit in the
1376 * request frame size.
1377 */
1378
1379 sc->sc_first_sgl_len = ((le16toh(ifp.ioc_request_frame_size) * 4) -
1380 sizeof(struct mpii_msg_scsi_io)) / sizeof(struct mpii_sge);
1381 DNPRINTF(MPII_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc),
1382 sc->sc_first_sgl_len);
1383
1384 sc->sc_chain_len = (le16toh(ifp.ioc_request_frame_size) * 4) /
1385 sizeof(struct mpii_sge);
1386 DNPRINTF(MPII_D_MISC, "%s: chain len: %d\n", DEVNAME(sc),
1387 sc->sc_chain_len);
1388
1389 /* the sgl tailing the io cmd loses an entry to the chain element. */
1390 sc->sc_max_sgl_len = MPII_MAX_SGL - 1;
1391 /* the sgl chains lose an entry for each chain element */
1392 sc->sc_max_sgl_len -= (MPII_MAX_SGL - sc->sc_first_sgl_len) /
1393 sc->sc_chain_len;
1394 DNPRINTF(MPII_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc),
1395 sc->sc_max_sgl_len);
1396
1397 /* XXX we're ignoring the max chain depth */
1398
1399 return(0);
1400
1401 }
1402
1403 static int
1404 mpii_iocinit(struct mpii_softc *sc)
1405 {
1406 struct mpii_msg_iocinit_request iiq;
1407 struct mpii_msg_iocinit_reply iip;
1408 u_int32_t hi_addr;
1409
1410 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1411
1412 bzero(&iiq, sizeof(iiq));
1413 bzero(&iip, sizeof(iip));
1414
1415 iiq.function = MPII_FUNCTION_IOC_INIT;
1416 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1417
1418 /* XXX JPG do something about vf_id */
1419 iiq.vf_id = 0;
1420
1421 iiq.msg_version_maj = 0x02;
1422 iiq.msg_version_min = 0x00;
1423
1424 /* XXX JPG ensure compliance with some level and hard-code? */
1425 iiq.hdr_version_unit = 0x00;
1426 iiq.hdr_version_dev = 0x00;
1427
1428 iiq.system_request_frame_size = htole16(MPII_REQUEST_SIZE / 4);
1429
1430 iiq.reply_descriptor_post_queue_depth =
1431 htole16(sc->sc_reply_post_qdepth);
1432
1433 iiq.reply_free_queue_depth = htole16(sc->sc_reply_free_qdepth);
1434
1435 hi_addr = (u_int32_t)((u_int64_t)MPII_DMA_DVA(sc->sc_requests) >> 32);
1436 iiq.sense_buffer_address_high = htole32(hi_addr);
1437
1438 hi_addr = (u_int32_t)
1439 ((u_int64_t)MPII_DMA_DVA(sc->sc_replies) >> 32);
1440 iiq.system_reply_address_high = htole32(hi_addr);
1441
1442 iiq.system_request_frame_base_address =
1443 (u_int64_t)MPII_DMA_DVA(sc->sc_requests);
1444
1445 iiq.reply_descriptor_post_queue_address =
1446 (u_int64_t)MPII_DMA_DVA(sc->sc_reply_postq);
1447
1448 iiq.reply_free_queue_address =
1449 (u_int64_t)MPII_DMA_DVA(sc->sc_reply_freeq);
1450
1451 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1452 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1453 DEVNAME(sc));
1454 return (1);
1455 }
1456
1457 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1458 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1459 DEVNAME(sc));
1460 return (1);
1461 }
1462
1463 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1464 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1465 iip.msg_length, iip.whoinit);
1466 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1467 iip.msg_flags);
1468 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1469 iip.vf_id, iip.vp_id);
1470 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1471 le16toh(iip.ioc_status));
1472 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1473 le32toh(iip.ioc_loginfo));
1474
1475 if ((iip.ioc_status != MPII_IOCSTATUS_SUCCESS) || (iip.ioc_loginfo))
1476 return (1);
1477
1478 return (0);
1479 }
1480
1481 static void
1482 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1483 {
1484 u_int32_t *rfp;
1485
1486 if (rcb == NULL)
1487 return;
1488
1489 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1490 rfp[sc->sc_reply_free_host_index] = rcb->rcb_reply_dva;
1491
1492 sc->sc_reply_free_host_index = (sc->sc_reply_free_host_index + 1) %
1493 sc->sc_reply_free_qdepth;
1494
1495 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
1496 }
1497
1498 static int
1499 mpii_portfacts(struct mpii_softc *sc)
1500 {
1501 struct mpii_msg_portfacts_request *pfq;
1502 struct mpii_msg_portfacts_reply *pfp;
1503 struct mpii_ccb *ccb;
1504 int rv = 1;
1505
1506 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1507
1508 ccb = mpii_get_ccb(sc, 0);
1509 if (ccb == NULL) {
1510 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1511 DEVNAME(sc));
1512 return (rv);
1513 }
1514
1515 ccb->ccb_done = mpii_empty_done;
1516 pfq = ccb->ccb_cmd;
1517
1518 bzero(pfq, sizeof(*pfq));
1519
1520 pfq->function = MPII_FUNCTION_PORT_FACTS;
1521 pfq->chain_offset = 0;
1522 pfq->msg_flags = 0;
1523 pfq->port_number = 0;
1524 pfq->vp_id = 0;
1525 pfq->vf_id = 0;
1526
1527 if (mpii_poll(sc, ccb) != 0) {
1528 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1529 DEVNAME(sc));
1530 goto err;
1531 }
1532
1533 if (ccb->ccb_rcb == NULL) {
1534 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1535 DEVNAME(sc));
1536 goto err;
1537 }
1538
1539 pfp = ccb->ccb_rcb->rcb_reply;
1540 DNPRINTF(MPII_D_MISC, "%s pfp: %p\n", DEVNAME(sc), pfp);
1541
1542 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d\n",
1543 DEVNAME(sc), pfp->function, pfp->msg_length);
1544 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n",
1545 DEVNAME(sc), pfp->msg_flags, pfp->port_number);
1546 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n",
1547 DEVNAME(sc), pfp->vf_id, pfp->vp_id);
1548 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1549 le16toh(pfp->ioc_status));
1550 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1551 le32toh(pfp->ioc_loginfo));
1552 DNPRINTF(MPII_D_MISC, "%s: port_type: 0x%02x\n", DEVNAME(sc),
1553 pfp->port_type);
1554 DNPRINTF(MPII_D_MISC, "%s: max_posted_cmd_buffers: %d\n", DEVNAME(sc),
1555 le16toh(pfp->max_posted_cmd_buffers));
1556
1557 sc->sc_porttype = pfp->port_type;
1558
1559 mpii_push_reply(sc, ccb->ccb_rcb);
1560 rv = 0;
1561 err:
1562 mpii_put_ccb(sc, ccb);
1563
1564 return (rv);
1565 }
1566
1567 static void
1568 mpii_eventack(struct work *wk, void *cookie)
1569 {
1570 struct mpii_softc *sc = cookie;
1571 struct mpii_ccb *ccb;
1572 struct mpii_rcb *rcb = (void *)wk;
1573 struct mpii_msg_event_reply *enp;
1574 struct mpii_msg_eventack_request *eaq;
1575
1576 ccb = mpii_get_ccb(sc, 0);
1577
1578 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1579
1580 ccb->ccb_done = mpii_eventack_done;
1581 eaq = ccb->ccb_cmd;
1582
1583 eaq->function = MPII_FUNCTION_EVENT_ACK;
1584
1585 eaq->event = enp->event;
1586 eaq->event_context = enp->event_context;
1587
1588 mpii_push_reply(sc, rcb);
1589
1590 mpii_start(sc, ccb);
1591
1592 }
1593
1594 static void
1595 mpii_eventack_done(struct mpii_ccb *ccb)
1596 {
1597 struct mpii_softc *sc = ccb->ccb_sc;
1598
1599 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1600
1601 mpii_push_reply(sc, ccb->ccb_rcb);
1602 mpii_put_ccb(sc, ccb);
1603 }
1604
1605 static int
1606 mpii_portenable(struct mpii_softc *sc)
1607 {
1608 struct mpii_msg_portenable_request *peq;
1609 struct mpii_ccb *ccb;
1610
1611 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1612
1613 ccb = mpii_get_ccb(sc, 0);
1614 if (ccb == NULL) {
1615 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1616 DEVNAME(sc));
1617 return (1);
1618 }
1619
1620 ccb->ccb_done = mpii_empty_done;
1621 peq = ccb->ccb_cmd;
1622
1623 peq->function = MPII_FUNCTION_PORT_ENABLE;
1624 peq->vf_id = sc->sc_vf_id;
1625
1626 if (mpii_poll(sc, ccb) != 0) {
1627 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1628 DEVNAME(sc));
1629 return (1);
1630 }
1631
1632 if (ccb->ccb_rcb == NULL) {
1633 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1634 DEVNAME(sc));
1635 return (1);
1636 }
1637
1638 mpii_push_reply(sc, ccb->ccb_rcb);
1639 mpii_put_ccb(sc, ccb);
1640
1641 return (0);
1642 }
1643
1644 static int
1645 mpii_cfg_coalescing(struct mpii_softc *sc)
1646 {
1647 struct mpii_cfg_hdr hdr;
1648 struct mpii_cfg_ioc_pg1 pg;
1649
1650 if (mpii_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0,
1651 &hdr) != 0) {
1652 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1 "
1653 "header\n", DEVNAME(sc));
1654 return (1);
1655 }
1656
1657 if (mpii_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
1658 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1659 "page 1\n", DEVNAME(sc));
1660 return (1);
1661 }
1662
1663 DNPRINTF(MPII_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
1664 DNPRINTF(MPII_D_MISC, "%s: flags: 0x08%x\n", DEVNAME(sc),
1665 le32toh(pg.flags));
1666 DNPRINTF(MPII_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc),
1667 le32toh(pg.coalescing_timeout));
1668 DNPRINTF(MPII_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n",
1669 DEVNAME(sc), pg.coalescing_timeout, pg.pci_slot_num);
1670
1671 if (!ISSET(le32toh(pg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1672 return (0);
1673
1674 CLR(pg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1675 if (mpii_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
1676 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1677 DEVNAME(sc));
1678 return (1);
1679 }
1680
1681 return (0);
1682 }
1683
1684 #define MPII_EVENT_MASKALL(enq) do { \
1685 enq->event_masks[0] = 0xffffffff; \
1686 enq->event_masks[1] = 0xffffffff; \
1687 enq->event_masks[2] = 0xffffffff; \
1688 enq->event_masks[3] = 0xffffffff; \
1689 } while (0)
1690
1691 #define MPII_EVENT_UNMASK(enq, evt) do { \
1692 enq->event_masks[evt / 32] &= \
1693 htole32(~(1 << (evt % 32))); \
1694 } while (0)
1695
1696 static int
1697 mpii_eventnotify(struct mpii_softc *sc)
1698 {
1699 struct mpii_msg_event_request *enq;
1700 struct mpii_ccb *ccb;
1701
1702 ccb = mpii_get_ccb(sc, 0);
1703 if (ccb == NULL) {
1704 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1705 DEVNAME(sc));
1706 return (1);
1707 }
1708
1709 ccb->ccb_done = mpii_eventnotify_done;
1710 enq = ccb->ccb_cmd;
1711
1712 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1713
1714 /*
1715 * Enable reporting of the following events:
1716 *
1717 * MPII_EVENT_SAS_DISCOVERY
1718 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1719 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1720 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1721 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1722 * MPII_EVENT_IR_VOLUME
1723 * MPII_EVENT_IR_PHYSICAL_DISK
1724 * MPII_EVENT_IR_OPERATION_STATUS
1725 */
1726
1727 MPII_EVENT_MASKALL(enq);
1728 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1729 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1730 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1731 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1732 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1733 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1734 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1735 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1736
1737 mpii_start(sc, ccb);
1738
1739 return (0);
1740 }
1741
1742 static void
1743 mpii_eventnotify_done(struct mpii_ccb *ccb)
1744 {
1745 struct mpii_softc *sc = ccb->ccb_sc;
1746 struct mpii_rcb *rcb = ccb->ccb_rcb;
1747
1748 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1749
1750 mpii_put_ccb(sc, ccb);
1751 mpii_event_process(sc, rcb);
1752 }
1753
1754 static void
1755 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1756 {
1757 struct mpii_evt_ir_cfg_change_list *ccl;
1758 struct mpii_evt_ir_cfg_element *ce;
1759 struct mpii_device *dev;
1760 u_int16_t type;
1761 int i;
1762
1763 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1764
1765 if (ccl->num_elements == 0)
1766 return;
1767 if (ISSET(le32toh(ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN))
1768 /* bail on foreign configurations */
1769 return;
1770
1771 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1772
1773 for (i = 0; i < ccl->num_elements; i++, ce++) {
1774 type = (le16toh(ce->element_flags) &
1775 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1776
1777 switch (type) {
1778 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1779 switch (ce->reason_code) {
1780 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1781 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1782 if (mpii_find_dev(sc,
1783 le16toh(ce->vol_dev_handle))) {
1784 aprint_error_dev(sc->sc_dev,
1785 "device %#x is already "
1786 "configured\n",
1787 le16toh(ce->vol_dev_handle));
1788 break;
1789 }
1790 dev = malloc(sizeof(*dev), M_DEVBUF,
1791 M_NOWAIT | M_ZERO);
1792 if (!dev) {
1793 aprint_error_dev(sc->sc_dev,
1794 "can't allocate device structure\n");
1795 break;
1796 }
1797 SET(dev->flags, MPII_DF_VOLUME);
1798 dev->slot = sc->sc_vd_id_low;
1799 dev->dev_handle = le16toh(ce->vol_dev_handle);
1800 if (mpii_insert_dev(sc, dev)) {
1801 aprint_error_dev(sc->sc_dev,
1802 "can't insert device structure\n");
1803 free(dev, M_DEVBUF);
1804 break;
1805 }
1806 if (mpii_cache_enable(sc, dev)) {
1807 aprint_error_dev(sc->sc_dev,
1808 "can't enable device cache\n");
1809 free(dev, M_DEVBUF);
1810 break;
1811 }
1812 sc->sc_vd_count++;
1813 break;
1814 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1815 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1816 if (!(dev = mpii_find_dev(sc,
1817 le16toh(ce->vol_dev_handle))))
1818 break;
1819 mpii_remove_dev(sc, dev);
1820 sc->sc_vd_count--;
1821 break;
1822 }
1823 break;
1824 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1825 if (ce->reason_code ==
1826 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1827 ce->reason_code ==
1828 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1829 /* there should be an underlying sas drive */
1830 if (!(dev = mpii_find_dev(sc,
1831 le16toh(ce->phys_disk_dev_handle))))
1832 break;
1833 /* promoted from a hot spare? */
1834 CLR(dev->flags, MPII_DF_HOT_SPARE);
1835 SET(dev->flags, MPII_DF_VOLUME_DISK |
1836 MPII_DF_HIDDEN);
1837 }
1838 break;
1839 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1840 if (ce->reason_code ==
1841 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1842 /* there should be an underlying sas drive */
1843 if (!(dev = mpii_find_dev(sc,
1844 le16toh(ce->phys_disk_dev_handle))))
1845 break;
1846 SET(dev->flags, MPII_DF_HOT_SPARE |
1847 MPII_DF_HIDDEN);
1848 }
1849 break;
1850 }
1851 }
1852 }
1853
1854 static void
1855 mpii_event_sas(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1856 {
1857 struct mpii_evt_sas_tcl *tcl;
1858 struct mpii_evt_phy_entry *pe;
1859 struct mpii_device *dev;
1860 int i;
1861
1862 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1863
1864 if (tcl->num_entries == 0)
1865 return;
1866
1867 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1868
1869 for (i = 0; i < tcl->num_entries; i++, pe++) {
1870 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1871 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1872 if (mpii_find_dev(sc, le16toh(pe->dev_handle))) {
1873 aprint_error_dev(sc->sc_dev,
1874 "device %#x is already configured\n",
1875 le16toh(pe->dev_handle));
1876 break;
1877 }
1878 dev = malloc(sizeof(*dev), M_DEVBUF, M_NOWAIT | M_ZERO);
1879 if (!dev) {
1880 aprint_error_dev(sc->sc_dev, "can't allocate "
1881 "device structure\n");
1882 break;
1883 }
1884 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1885 dev->dev_handle = le16toh(pe->dev_handle);
1886 dev->phy_num = tcl->start_phy_num + i;
1887 if (tcl->enclosure_handle)
1888 dev->physical_port = tcl->physical_port;
1889 dev->enclosure = le16toh(tcl->enclosure_handle);
1890 dev->expander = le16toh(tcl->expander_handle);
1891 if (mpii_insert_dev(sc, dev)) {
1892 aprint_error_dev(sc->sc_dev, "can't insert "
1893 "device structure\n");
1894 free(dev, M_DEVBUF);
1895 break;
1896 }
1897 break;
1898 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1899 if (!(dev = mpii_find_dev(sc,
1900 le16toh(pe->dev_handle))))
1901 break;
1902 mpii_remove_dev(sc, dev);
1903 #if 0
1904 if (sc->sc_scsibus) {
1905 SET(dev->flags, MPII_DF_DETACH);
1906 scsi_activate(sc->sc_scsibus, dev->slot, -1,
1907 DVACT_DEACTIVATE);
1908 if (scsi_task(mpii_event_defer, sc,
1909 dev, 0) != 0)
1910 aprint_error_dev(sc->sc_dev,
1911 "unable to run device "
1912 "detachment routine\n");
1913 }
1914 #else
1915 mpii_event_defer(sc, dev);
1916 #endif /* XXX */
1917 break;
1918 }
1919 }
1920 }
1921
1922 static void
1923 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1924 {
1925 struct mpii_msg_event_reply *enp;
1926
1927 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1928
1929 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1930 le32toh(enp->event));
1931
1932 switch (le32toh(enp->event)) {
1933 case MPII_EVENT_EVENT_CHANGE:
1934 /* should be properly ignored */
1935 break;
1936 case MPII_EVENT_SAS_DISCOVERY: {
1937 struct mpii_evt_sas_discovery *esd =
1938 (struct mpii_evt_sas_discovery *)(enp + 1);
1939
1940 if (esd->reason_code ==
1941 MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED &&
1942 esd->discovery_status != 0)
1943 printf("%s: sas discovery completed with status %#x\n",
1944 DEVNAME(sc), esd->discovery_status);
1945 }
1946 break;
1947 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1948 mpii_event_sas(sc, enp);
1949 break;
1950 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
1951 break;
1952 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1953 break;
1954 case MPII_EVENT_IR_VOLUME: {
1955 struct mpii_evt_ir_volume *evd =
1956 (struct mpii_evt_ir_volume *)(enp + 1);
1957 struct mpii_device *dev;
1958 #if NBIO > 0
1959 const char *vol_states[] = {
1960 BIOC_SVINVALID_S,
1961 BIOC_SVOFFLINE_S,
1962 BIOC_SVBUILDING_S,
1963 BIOC_SVONLINE_S,
1964 BIOC_SVDEGRADED_S,
1965 BIOC_SVONLINE_S,
1966 };
1967 #endif
1968
1969 if (cold)
1970 break;
1971 if (!(dev = mpii_find_dev(sc, le16toh(evd->vol_dev_handle))))
1972 break;
1973 #if NBIO > 0
1974 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
1975 printf("%s: volume %d state changed from %s to %s\n",
1976 DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
1977 vol_states[evd->prev_value],
1978 vol_states[evd->new_value]);
1979 #endif
1980 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
1981 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
1982 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
1983 printf("%s: started resync on a volume %d\n",
1984 DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
1985 }
1986 break;
1987 case MPII_EVENT_IR_PHYSICAL_DISK:
1988 break;
1989 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1990 mpii_event_raid(sc, enp);
1991 break;
1992 case MPII_EVENT_IR_OPERATION_STATUS: {
1993 struct mpii_evt_ir_status *evs =
1994 (struct mpii_evt_ir_status *)(enp + 1);
1995 struct mpii_device *dev;
1996
1997 if (!(dev = mpii_find_dev(sc, le16toh(evs->vol_dev_handle))))
1998 break;
1999 if (evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2000 dev->percent = evs->percent;
2001 break;
2002 }
2003 default:
2004 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2005 DEVNAME(sc), le32toh(enp->event));
2006 }
2007
2008 if (enp->ack_required)
2009 workqueue_enqueue(sc->sc_ssb_evt_ackwk, &rcb->u.rcb_wk, NULL);
2010 else
2011 mpii_push_reply(sc, rcb);
2012 }
2013
2014 static void
2015 mpii_event_defer(void *xsc, void *arg)
2016 {
2017 struct mpii_softc *sc = xsc;
2018 struct mpii_device *dev = arg;
2019
2020 if (ISSET(dev->flags, MPII_DF_DETACH)) {
2021 mpii_sas_remove_device(sc, dev->dev_handle);
2022 #if 0
2023 if (!ISSET(dev->flags, MPII_DF_HIDDEN)) {
2024 scsi_detach_target(sc->sc_scsibus, dev->slot,
2025 DETACH_FORCE);
2026 }
2027 #endif /* XXX */
2028 free(dev, M_DEVBUF);
2029
2030 } else if (ISSET(dev->flags, MPII_DF_ATTACH)) {
2031 CLR(dev->flags, MPII_DF_ATTACH);
2032 #if 0
2033 if (!ISSET(dev->flags, MPII_DF_HIDDEN))
2034 scsi_probe_target(sc->sc_scsibus, dev->slot);
2035 #endif /* XXX */
2036 }
2037 }
2038
2039 static void
2040 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2041 {
2042 struct mpii_msg_scsi_task_request *stq;
2043 struct mpii_msg_sas_oper_request *soq;
2044 struct mpii_ccb *ccb;
2045
2046 ccb = mpii_get_ccb(sc, 0);
2047 if (ccb == NULL)
2048 return;
2049
2050 stq = ccb->ccb_cmd;
2051 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2052 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2053 stq->dev_handle = htole16(handle);
2054
2055 ccb->ccb_done = mpii_empty_done;
2056 mpii_wait(sc, ccb);
2057
2058 if (ccb->ccb_rcb != NULL)
2059 mpii_push_reply(sc, ccb->ccb_rcb);
2060
2061 /* reuse a ccb */
2062 ccb->ccb_state = MPII_CCB_READY;
2063 ccb->ccb_rcb = NULL;
2064
2065 soq = ccb->ccb_cmd;
2066 bzero(soq, sizeof(*soq));
2067 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2068 soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2069 soq->dev_handle = htole16(handle);
2070
2071 ccb->ccb_done = mpii_empty_done;
2072 mpii_wait(sc, ccb);
2073 if (ccb->ccb_rcb != NULL)
2074 mpii_push_reply(sc, ccb->ccb_rcb);
2075 }
2076
2077 static int
2078 mpii_get_ioc_pg8(struct mpii_softc *sc)
2079 {
2080 struct mpii_cfg_hdr hdr;
2081 struct mpii_cfg_ioc_pg8 *page;
2082 size_t pagelen;
2083 u_int16_t flags;
2084 int pad = 0, rv = 0;
2085
2086 DNPRINTF(MPII_D_RAID, "%s: mpii_get_ioc_pg8\n", DEVNAME(sc));
2087
2088 if (mpii_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_IOC, 8, 0,
2089 &hdr) != 0) {
2090 DNPRINTF(MPII_D_CFG, "%s: mpii_get_ioc_pg8 unable to fetch "
2091 "header for IOC page 8\n", DEVNAME(sc));
2092 return (1);
2093 }
2094
2095 pagelen = hdr.page_length * 4; /* dwords to bytes */
2096
2097 page = malloc(pagelen, M_TEMP, M_NOWAIT);
2098 if (page == NULL) {
2099 DNPRINTF(MPII_D_CFG, "%s: mpii_get_ioc_pg8 unable to allocate "
2100 "space for ioc config page 8\n", DEVNAME(sc));
2101 return (1);
2102 }
2103
2104 if (mpii_cfg_page(sc, 0, &hdr, 1, page, pagelen) != 0) {
2105 DNPRINTF(MPII_D_CFG, "%s: mpii_get_raid unable to fetch IOC "
2106 "page 8\n", DEVNAME(sc));
2107 rv = 1;
2108 goto out;
2109 }
2110
2111 DNPRINTF(MPII_D_CFG, "%s: numdevsperenclosure: 0x%02x\n", DEVNAME(sc),
2112 page->num_devs_per_enclosure);
2113 DNPRINTF(MPII_D_CFG, "%s: maxpersistententries: 0x%04x "
2114 "maxnumphysicalmappedids: 0x%04x\n", DEVNAME(sc),
2115 le16toh(page->max_persistent_entries),
2116 le16toh(page->max_num_physical_mapped_ids));
2117 DNPRINTF(MPII_D_CFG, "%s: flags: 0x%04x\n", DEVNAME(sc),
2118 le16toh(page->flags));
2119 DNPRINTF(MPII_D_CFG, "%s: irvolumemappingflags: 0x%04x\n",
2120 DEVNAME(sc), le16toh(page->ir_volume_mapping_flags));
2121
2122 if (page->flags & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2123 pad = 1;
2124
2125 flags = page->ir_volume_mapping_flags &
2126 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2127 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2128 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2129 sc->sc_vd_id_low += pad;
2130 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2131 } else
2132 sc->sc_vd_id_low = sc->sc_max_devices -
2133 sc->sc_max_volumes;
2134 }
2135
2136 sc->sc_pd_id_start += pad;
2137
2138 DNPRINTF(MPII_D_MAP, "%s: mpii_get_ioc_pg8 mapping: sc_pd_id_start: %d "
2139 "sc_vd_id_low: %d sc_max_volumes: %d\n", DEVNAME(sc),
2140 sc->sc_pd_id_start, sc->sc_vd_id_low, sc->sc_max_volumes);
2141
2142 out:
2143 free(page, M_TEMP);
2144
2145 return(rv);
2146 }
2147
2148 static int
2149 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2150 u_int32_t address, int flags, void *p)
2151 {
2152 struct mpii_msg_config_request *cq;
2153 struct mpii_msg_config_reply *cp;
2154 struct mpii_cfg_hdr *hdr = p;
2155 struct mpii_ccb *ccb;
2156 struct mpii_ecfg_hdr *ehdr = p;
2157 int etype = 0;
2158 int rv = 0;
2159
2160 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2161 "address: 0x%08x flags: 0x%x\n", DEVNAME(sc), type, number,
2162 address, flags);
2163
2164 ccb = mpii_get_ccb(sc, ISSET(flags, MPII_PG_POLL) ? MPII_NOSLEEP : 0);
2165 if (ccb == NULL) {
2166 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2167 DEVNAME(sc));
2168 return (1);
2169 }
2170
2171 if (ISSET(flags, MPII_PG_EXTENDED)) {
2172 etype = type;
2173 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2174 }
2175
2176 cq = ccb->ccb_cmd;
2177
2178 cq->function = MPII_FUNCTION_CONFIG;
2179
2180 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2181
2182 cq->config_header.page_number = number;
2183 cq->config_header.page_type = type;
2184 cq->ext_page_type = etype;
2185 cq->page_address = htole32(address);
2186 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2187 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2188
2189 ccb->ccb_done = mpii_empty_done;
2190 if (ISSET(flags, MPII_PG_POLL)) {
2191 if (mpii_poll(sc, ccb) != 0) {
2192 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2193 DEVNAME(sc));
2194 return (1);
2195 }
2196 } else
2197 mpii_wait(sc, ccb);
2198
2199 if (ccb->ccb_rcb == NULL) {
2200 mpii_put_ccb(sc, ccb);
2201 return (1);
2202 }
2203 cp = ccb->ccb_rcb->rcb_reply;
2204
2205 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2206 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2207 cp->sgl_flags, cp->msg_length, cp->function);
2208 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2209 "msg_flags: 0x%02x\n", DEVNAME(sc),
2210 le16toh(cp->ext_page_length), cp->ext_page_type,
2211 cp->msg_flags);
2212 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2213 cp->vp_id, cp->vf_id);
2214 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2215 le16toh(cp->ioc_status));
2216 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2217 le32toh(cp->ioc_loginfo));
2218 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2219 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2220 cp->config_header.page_version,
2221 cp->config_header.page_length,
2222 cp->config_header.page_number,
2223 cp->config_header.page_type);
2224
2225 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2226 rv = 1;
2227 else if (ISSET(flags, MPII_PG_EXTENDED)) {
2228 bzero(ehdr, sizeof(*ehdr));
2229 ehdr->page_version = cp->config_header.page_version;
2230 ehdr->page_number = cp->config_header.page_number;
2231 ehdr->page_type = cp->config_header.page_type;
2232 ehdr->ext_page_length = cp->ext_page_length;
2233 ehdr->ext_page_type = cp->ext_page_type;
2234 } else
2235 *hdr = cp->config_header;
2236
2237 mpii_push_reply(sc, ccb->ccb_rcb);
2238 mpii_put_ccb(sc, ccb);
2239
2240 return (rv);
2241 }
2242
2243 static int
2244 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2245 void *p, int read, void *page, size_t len)
2246 {
2247 struct mpii_msg_config_request *cq;
2248 struct mpii_msg_config_reply *cp;
2249 struct mpii_cfg_hdr *hdr = p;
2250 struct mpii_ccb *ccb;
2251 struct mpii_ecfg_hdr *ehdr = p;
2252 u_int64_t dva;
2253 char *kva;
2254 int page_length;
2255 int rv = 0;
2256
2257 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2258 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2259
2260 page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2261 le16toh(ehdr->ext_page_length) : hdr->page_length;
2262
2263 if (len > MPII_REQUEST_SIZE - sizeof(struct mpii_msg_config_request) ||
2264 len < page_length * 4)
2265 return (1);
2266
2267 ccb = mpii_get_ccb(sc,
2268 ISSET(flags, MPII_PG_POLL) ? MPII_NOSLEEP : 0);
2269 if (ccb == NULL) {
2270 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2271 DEVNAME(sc));
2272 return (1);
2273 }
2274
2275 cq = ccb->ccb_cmd;
2276
2277 cq->function = MPII_FUNCTION_CONFIG;
2278
2279 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2280 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2281
2282 if (ISSET(flags, MPII_PG_EXTENDED)) {
2283 cq->config_header.page_version = ehdr->page_version;
2284 cq->config_header.page_number = ehdr->page_number;
2285 cq->config_header.page_type = ehdr->page_type;
2286 cq->ext_page_len = ehdr->ext_page_length;
2287 cq->ext_page_type = ehdr->ext_page_type;
2288 } else
2289 cq->config_header = *hdr;
2290 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2291 cq->page_address = htole32(address);
2292 cq->page_buffer.sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
2293 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2294 MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2295 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2296
2297 /* bounce the page via the request space to avoid more bus_dma games */
2298 dva = ccb->ccb_cmd_dva + sizeof(struct mpii_msg_config_request);
2299
2300 cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32));
2301 cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva);
2302
2303 kva = ccb->ccb_cmd;
2304 kva += sizeof(struct mpii_msg_config_request);
2305
2306 if (!read)
2307 bcopy(page, kva, len);
2308
2309 ccb->ccb_done = mpii_empty_done;
2310 if (ISSET(flags, MPII_PG_POLL)) {
2311 if (mpii_poll(sc, ccb) != 0) {
2312 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2313 DEVNAME(sc));
2314 return (1);
2315 }
2316 } else
2317 mpii_wait(sc, ccb);
2318
2319 if (ccb->ccb_rcb == NULL) {
2320 mpii_put_ccb(sc, ccb);
2321 return (1);
2322 }
2323 cp = ccb->ccb_rcb->rcb_reply;
2324
2325 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x "
2326 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2327 cp->msg_length, cp->function);
2328 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2329 "msg_flags: 0x%02x\n", DEVNAME(sc),
2330 le16toh(cp->ext_page_length), cp->ext_page_type,
2331 cp->msg_flags);
2332 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2333 cp->vp_id, cp->vf_id);
2334 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2335 le16toh(cp->ioc_status));
2336 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2337 le32toh(cp->ioc_loginfo));
2338 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2339 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2340 cp->config_header.page_version,
2341 cp->config_header.page_length,
2342 cp->config_header.page_number,
2343 cp->config_header.page_type);
2344
2345 if (le16toh(cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2346 rv = 1;
2347 else if (read)
2348 bcopy(kva, page, len);
2349
2350 mpii_push_reply(sc, ccb->ccb_rcb);
2351 mpii_put_ccb(sc, ccb);
2352
2353 return (rv);
2354 }
2355
2356 static struct mpii_rcb *
2357 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2358 {
2359 struct mpii_rcb *rcb = NULL;
2360 u_int32_t rfid;
2361
2362 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2363
2364 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2365 MPII_REPLY_DESCR_ADDRESS_REPLY) {
2366 rfid = (le32toh(rdp->frame_addr) -
2367 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) / MPII_REPLY_SIZE;
2368
2369 bus_dmamap_sync(sc->sc_dmat,
2370 MPII_DMA_MAP(sc->sc_replies), MPII_REPLY_SIZE * rfid,
2371 MPII_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
2372
2373 rcb = &sc->sc_rcbs[rfid];
2374 }
2375
2376 memset(rdp, 0xff, sizeof(*rdp));
2377
2378 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2379 8 * sc->sc_reply_post_host_index, 8,
2380 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2381
2382 return (rcb);
2383 }
2384
2385 static struct mpii_dmamem *
2386 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2387 {
2388 struct mpii_dmamem *mdm;
2389 int nsegs;
2390
2391 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2392 if (mdm == NULL)
2393 return (NULL);
2394
2395 mdm->mdm_size = size;
2396
2397 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2398 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2399 goto mdmfree;
2400
2401 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2402 1, &nsegs, BUS_DMA_NOWAIT) != 0) goto destroy;
2403
2404 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2405 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2406 goto free;
2407
2408 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2409 NULL, BUS_DMA_NOWAIT) != 0)
2410 goto unmap;
2411
2412 DNPRINTF(MPII_D_MEM,
2413 " kva: %p dva: 0x%" PRIx64 " map: %p size: %" PRId64 "\n",
2414 mdm->mdm_kva, (uint64_t)mdm->mdm_map->dm_segs[0].ds_addr,
2415 mdm->mdm_map, (uint64_t)size);
2416
2417 bzero(mdm->mdm_kva, size);
2418
2419 return (mdm);
2420
2421 unmap:
2422 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2423 free:
2424 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2425 destroy:
2426 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2427 mdmfree:
2428 free(mdm, M_DEVBUF);
2429
2430 return (NULL);
2431 }
2432
2433 static void
2434 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2435 {
2436 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2437
2438 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2439 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2440 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2441 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2442 free(mdm, M_DEVBUF);
2443 }
2444
2445 static int
2446 mpii_alloc_dev(struct mpii_softc *sc)
2447 {
2448 sc->sc_devs = malloc(sc->sc_max_devices *
2449 sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
2450 if (sc->sc_devs == NULL)
2451 return (1);
2452 return (0);
2453 }
2454
2455 static int
2456 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2457 {
2458
2459 if (!dev || dev->slot < 0)
2460 return (1);
2461
2462 int slot = dev->slot; /* initial hint */
2463
2464 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2465 slot++;
2466 if (slot >= sc->sc_max_devices)
2467 return (1);
2468 dev->slot = slot;
2469 sc->sc_devs[slot] = dev;
2470 return (0);
2471 }
2472
2473 static int
2474 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2475 {
2476 int i;
2477
2478 if (!dev)
2479 return (1);
2480 for (i = 0; i < sc->sc_max_devices; i++)
2481 if (sc->sc_devs[i] &&
2482 sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2483 sc->sc_devs[i] = NULL;
2484 return (0);
2485 }
2486 return (1);
2487 }
2488
2489 static struct mpii_device *
2490 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2491 {
2492 int i;
2493
2494 for (i = 0; i < sc->sc_max_devices; i++)
2495 if (sc->sc_devs[i] && sc->sc_devs[i]->dev_handle == handle)
2496 return (sc->sc_devs[i]);
2497 return (NULL);
2498 }
2499
2500 static int
2501 mpii_alloc_ccbs(struct mpii_softc *sc)
2502 {
2503 struct mpii_ccb *ccb;
2504 u_int8_t *cmd;
2505 int i;
2506
2507 SIMPLEQ_INIT(&sc->sc_ccb_free);
2508
2509 sc->sc_ccbs = malloc(sizeof(*ccb) * (sc->sc_request_depth-1),
2510 M_DEVBUF, M_NOWAIT | M_ZERO);
2511 if (sc->sc_ccbs == NULL) {
2512 printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2513 return (1);
2514 }
2515
2516 sc->sc_requests = mpii_dmamem_alloc(sc,
2517 MPII_REQUEST_SIZE * sc->sc_request_depth);
2518 if (sc->sc_requests == NULL) {
2519 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2520 goto free_ccbs;
2521 }
2522 cmd = MPII_DMA_KVA(sc->sc_requests);
2523 bzero(cmd, MPII_REQUEST_SIZE * sc->sc_request_depth);
2524
2525 /*
2526 * we have sc->sc_request_depth system request message
2527 * frames, but smid zero cannot be used. so we then
2528 * have (sc->sc_request_depth - 1) number of ccbs
2529 */
2530 for (i = 1; i < sc->sc_request_depth; i++) {
2531 ccb = &sc->sc_ccbs[i - 1];
2532
2533 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
2534 sc->sc_max_sgl_len, MAXPHYS, 0,
2535 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2536 &ccb->ccb_dmamap) != 0) {
2537 printf("%s: unable to create dma map\n", DEVNAME(sc));
2538 goto free_maps;
2539 }
2540
2541 ccb->ccb_sc = sc;
2542 ccb->ccb_smid = i;
2543 ccb->ccb_offset = MPII_REQUEST_SIZE * i;
2544
2545 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2546 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2547 ccb->ccb_offset;
2548
2549 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2550 "sc: %p smid: %#x offs: %#" PRIx64 " cmd: %#" PRIx64 " dva: %#" PRIx64 "\n",
2551 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2552 ccb->ccb_smid, (uint64_t)ccb->ccb_offset,
2553 (uint64_t)ccb->ccb_cmd, (uint64_t)ccb->ccb_cmd_dva);
2554
2555 mpii_put_ccb(sc, ccb);
2556 }
2557
2558 return (0);
2559
2560 free_maps:
2561 while ((ccb = mpii_get_ccb(sc, MPII_NOSLEEP)) != NULL)
2562 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2563
2564 mpii_dmamem_free(sc, sc->sc_requests);
2565 free_ccbs:
2566 free(sc->sc_ccbs, M_DEVBUF);
2567
2568 return (1);
2569 }
2570
2571 static void
2572 mpii_put_ccb(struct mpii_softc *sc, struct mpii_ccb *ccb)
2573 {
2574 KASSERT(ccb->ccb_sc == sc);
2575 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2576
2577 ccb->ccb_state = MPII_CCB_FREE;
2578 ccb->ccb_cookie = NULL;
2579 ccb->ccb_done = NULL;
2580 ccb->ccb_rcb = NULL;
2581 bzero(ccb->ccb_cmd, MPII_REQUEST_SIZE);
2582
2583 mutex_enter(&sc->sc_ccb_free_mtx);
2584 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, u.ccb_link);
2585 cv_signal(&sc->sc_ccb_free_cv);
2586 mutex_exit(&sc->sc_ccb_free_mtx);
2587 }
2588
2589 static struct mpii_ccb *
2590 mpii_get_ccb(struct mpii_softc *sc, int flags)
2591 {
2592 struct mpii_ccb *ccb;
2593
2594 mutex_enter(&sc->sc_ccb_free_mtx);
2595 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)) == NULL) {
2596 if (flags & MPII_NOSLEEP)
2597 break;
2598 cv_wait(&sc->sc_ccb_free_cv, &sc->sc_ccb_free_mtx);
2599 }
2600
2601 if (ccb != NULL) {
2602 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, u.ccb_link);
2603 ccb->ccb_state = MPII_CCB_READY;
2604 KASSERT(ccb->ccb_sc == sc);
2605 }
2606 mutex_exit(&sc->sc_ccb_free_mtx);
2607
2608 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2609
2610 return (ccb);
2611 }
2612
2613 static int
2614 mpii_alloc_replies(struct mpii_softc *sc)
2615 {
2616 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2617
2618 sc->sc_rcbs = malloc(sc->sc_num_reply_frames * sizeof(struct mpii_rcb),
2619 M_DEVBUF, M_NOWAIT);
2620 if (sc->sc_rcbs == NULL)
2621 return (1);
2622
2623 sc->sc_replies = mpii_dmamem_alloc(sc, MPII_REPLY_SIZE *
2624 sc->sc_num_reply_frames);
2625 if (sc->sc_replies == NULL) {
2626 free(sc->sc_rcbs, M_DEVBUF);
2627 return (1);
2628 }
2629
2630 return (0);
2631 }
2632
2633 static void
2634 mpii_push_replies(struct mpii_softc *sc)
2635 {
2636 struct mpii_rcb *rcb;
2637 char *kva = MPII_DMA_KVA(sc->sc_replies);
2638 int i;
2639
2640 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2641 0, MPII_REPLY_SIZE * sc->sc_num_reply_frames, BUS_DMASYNC_PREREAD);
2642
2643 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2644 rcb = &sc->sc_rcbs[i];
2645
2646 rcb->rcb_reply = kva + MPII_REPLY_SIZE * i;
2647 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2648 MPII_REPLY_SIZE * i;
2649 mpii_push_reply(sc, rcb);
2650 }
2651 }
2652
2653 static void
2654 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2655 {
2656 struct mpii_request_header *rhp;
2657 struct mpii_request_descr descr;
2658 u_int32_t *rdp = (u_int32_t *)&descr;
2659
2660 DNPRINTF(MPII_D_RW, "%s: mpii_start %#" PRIx64 "\n", DEVNAME(sc),
2661 (uint64_t)ccb->ccb_cmd_dva);
2662
2663 rhp = ccb->ccb_cmd;
2664
2665 bzero(&descr, sizeof(descr));
2666
2667 switch (rhp->function) {
2668 case MPII_FUNCTION_SCSI_IO_REQUEST:
2669 descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2670 descr.dev_handle = htole16(ccb->ccb_dev_handle);
2671 break;
2672 case MPII_FUNCTION_SCSI_TASK_MGMT:
2673 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2674 break;
2675 default:
2676 descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2677 }
2678
2679 descr.vf_id = sc->sc_vf_id;
2680 descr.smid = htole16(ccb->ccb_smid);
2681
2682 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2683 ccb->ccb_offset, MPII_REQUEST_SIZE,
2684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2685
2686 ccb->ccb_state = MPII_CCB_QUEUED;
2687
2688 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2689 "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2690
2691 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2692 "0x%08x\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2693
2694 mutex_enter(&sc->sc_req_mtx);
2695 mpii_write(sc, MPII_REQ_DESCR_POST_LOW, htole32(*rdp));
2696 mpii_write(sc, MPII_REQ_DESCR_POST_HIGH, htole32(*(rdp+1)));
2697 mutex_exit(&sc->sc_req_mtx);
2698 }
2699
2700 static int
2701 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2702 {
2703 void (*done)(struct mpii_ccb *);
2704 void *cookie;
2705 int rv = 1;
2706
2707 DNPRINTF(MPII_D_INTR, "%s: mpii_complete\n", DEVNAME(sc));
2708
2709 done = ccb->ccb_done;
2710 cookie = ccb->ccb_cookie;
2711
2712 ccb->ccb_done = mpii_poll_done;
2713 ccb->ccb_cookie = &rv;
2714
2715 mpii_start(sc, ccb);
2716
2717 while (rv == 1) {
2718 /* avoid excessive polling */
2719 if (mpii_reply_waiting(sc))
2720 mpii_intr(sc);
2721 else
2722 delay(10);
2723 }
2724
2725 ccb->ccb_cookie = cookie;
2726 done(ccb);
2727
2728 return (0);
2729 }
2730
2731 static void
2732 mpii_poll_done(struct mpii_ccb *ccb)
2733 {
2734 int *rv = ccb->ccb_cookie;
2735
2736 *rv = 0;
2737 }
2738
2739 static int
2740 mpii_alloc_queues(struct mpii_softc *sc)
2741 {
2742 u_int32_t *kva;
2743 u_int64_t *kva64;
2744 int i;
2745
2746 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2747
2748 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2749 sc->sc_reply_free_qdepth * 4);
2750 if (sc->sc_reply_freeq == NULL)
2751 return (1);
2752
2753 kva = MPII_DMA_KVA(sc->sc_reply_freeq);
2754 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2755 kva[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2756 MPII_REPLY_SIZE * i;
2757
2758 DNPRINTF(MPII_D_MISC, "%s: %d: %p = 0x%08x\n",
2759 DEVNAME(sc), i,
2760 &kva[i], (u_int)MPII_DMA_DVA(sc->sc_replies) +
2761 MPII_REPLY_SIZE * i);
2762 }
2763
2764 sc->sc_reply_postq =
2765 mpii_dmamem_alloc(sc, sc->sc_reply_post_qdepth * 8);
2766 if (sc->sc_reply_postq == NULL)
2767 goto free_reply_freeq;
2768 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2769
2770 DNPRINTF(MPII_D_MISC, "%s: populating reply post descriptor queue\n",
2771 DEVNAME(sc));
2772 kva64 = (u_int64_t *)MPII_DMA_KVA(sc->sc_reply_postq);
2773 for (i = 0; i < sc->sc_reply_post_qdepth; i++) {
2774 kva64[i] = 0xffffffffffffffffllu;
2775 DNPRINTF(MPII_D_MISC, "%s: %d: %p = 0x%" PRIx64 "\n",
2776 DEVNAME(sc), i, &kva64[i], kva64[i]);
2777 }
2778
2779 return (0);
2780
2781 free_reply_freeq:
2782
2783 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2784 return (1);
2785 }
2786
2787 static void
2788 mpii_init_queues(struct mpii_softc *sc)
2789 {
2790 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2791
2792 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2793 sc->sc_reply_post_host_index = 0;
2794 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2795 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2796 }
2797
2798 static void
2799 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2800 {
2801 struct mpii_ccb_wait mpii_ccb_wait;
2802 void (*done)(struct mpii_ccb *);
2803 void *cookie;
2804
2805 done = ccb->ccb_done;
2806 cookie = ccb->ccb_cookie;
2807
2808 ccb->ccb_done = mpii_wait_done;
2809 ccb->ccb_cookie = &mpii_ccb_wait;
2810
2811 mutex_init(&mpii_ccb_wait.mpii_ccbw_mtx, MUTEX_DEFAULT, IPL_BIO);
2812 cv_init(&mpii_ccb_wait.mpii_ccbw_cv, "mpii_wait");
2813
2814 /* XXX this will wait forever for the ccb to complete */
2815
2816 mpii_start(sc, ccb);
2817
2818 mutex_enter(&mpii_ccb_wait.mpii_ccbw_mtx);
2819 while (ccb->ccb_cookie != NULL) {
2820 cv_wait(&mpii_ccb_wait.mpii_ccbw_cv,
2821 &mpii_ccb_wait.mpii_ccbw_mtx);
2822 }
2823 mutex_exit(&mpii_ccb_wait.mpii_ccbw_mtx);
2824 mutex_destroy(&mpii_ccb_wait.mpii_ccbw_mtx);
2825 cv_destroy(&mpii_ccb_wait.mpii_ccbw_cv);
2826
2827 ccb->ccb_cookie = cookie;
2828 done(ccb);
2829 }
2830
2831 static void
2832 mpii_wait_done(struct mpii_ccb *ccb)
2833 {
2834 struct mpii_ccb_wait *mpii_ccb_waitp = ccb->ccb_cookie;
2835
2836 mutex_enter(&mpii_ccb_waitp->mpii_ccbw_mtx);
2837 ccb->ccb_cookie = NULL;
2838 cv_signal(&mpii_ccb_waitp->mpii_ccbw_cv);
2839 mutex_exit(&mpii_ccb_waitp->mpii_ccbw_mtx);
2840 }
2841
2842 static void
2843 mpii_minphys(struct buf *bp)
2844 {
2845 DNPRINTF(MPII_D_MISC, "mpii_minphys: %d\n", bp->b_bcount);
2846
2847 /* XXX currently using MPII_MAXFER = MAXPHYS */
2848 if (bp->b_bcount > MPII_MAXFER) {
2849 bp->b_bcount = MPII_MAXFER;
2850 minphys(bp);
2851 }
2852 }
2853
2854 static void
2855 mpii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2856 void *arg)
2857 {
2858 struct scsipi_periph *periph;
2859 struct scsipi_xfer *xs;
2860 struct scsipi_adapter *adapt = chan->chan_adapter;
2861 struct mpii_softc *sc = device_private(adapt->adapt_dev);
2862 struct mpii_ccb *ccb;
2863 struct mpii_ccb_bundle *mcb;
2864 struct mpii_msg_scsi_io *io;
2865 struct mpii_device *dev;
2866 int target;
2867 int timeout;
2868
2869 DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request\n", DEVNAME(sc));
2870 switch (req) {
2871 case ADAPTER_REQ_GROW_RESOURCES:
2872 /* Not supported. */
2873 return;
2874 case ADAPTER_REQ_SET_XFER_MODE:
2875 {
2876 struct scsipi_xfer_mode *xm = arg;
2877 xm->xm_mode = PERIPH_CAP_TQING;
2878 xm->xm_period = 0;
2879 xm->xm_offset = 0;
2880 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2881 return;
2882 }
2883 case ADAPTER_REQ_RUN_XFER:
2884 break;
2885 }
2886
2887 xs = arg;
2888 periph = xs->xs_periph;
2889 target = periph->periph_target;
2890
2891 if (xs->cmdlen > MPII_CDB_LEN) {
2892 DNPRINTF(MPII_D_CMD, "%s: CBD too big %d\n",
2893 DEVNAME(sc), xs->cmdlen);
2894 bzero(&xs->sense, sizeof(xs->sense));
2895 xs->sense.scsi_sense.response_code =
2896 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
2897 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
2898 xs->sense.scsi_sense.asc = 0x20;
2899 xs->error = XS_SENSE;
2900 scsipi_done(xs);
2901 return;
2902 }
2903
2904 if ((dev = sc->sc_devs[target]) == NULL) {
2905 /* device no longer exists */
2906 xs->error = XS_SELTIMEOUT;
2907 scsipi_done(xs);
2908 return;
2909 }
2910
2911 ccb = mpii_get_ccb(sc, MPII_NOSLEEP);
2912 if (ccb == NULL) {
2913 xs->error = XS_RESOURCE_SHORTAGE;
2914 scsipi_done(xs);
2915 return;
2916 }
2917
2918 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->xs_control: 0x%x\n",
2919 DEVNAME(sc), ccb->ccb_smid, xs->xs_control);
2920
2921 ccb->ccb_cookie = xs;
2922 ccb->ccb_done = mpii_scsi_cmd_done;
2923 ccb->ccb_dev_handle = dev->dev_handle;
2924
2925 mcb = ccb->ccb_cmd;
2926 io = &mcb->mcb_io;
2927
2928 io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2929 io->sense_buffer_length = sizeof(xs->sense);
2930 io->sgl_offset0 = 24; /* XXX fix this */
2931 io->io_flags = htole16(xs->cmdlen);
2932 io->dev_handle = htole16(ccb->ccb_dev_handle);
2933 io->lun[0] = htobe16(periph->periph_lun);
2934
2935 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2936 case XS_CTL_DATA_IN:
2937 io->direction = MPII_SCSIIO_DIR_READ;
2938 break;
2939 case XS_CTL_DATA_OUT:
2940 io->direction = MPII_SCSIIO_DIR_WRITE;
2941 break;
2942 default:
2943 io->direction = MPII_SCSIIO_DIR_NONE;
2944 }
2945
2946 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2947
2948 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2949
2950 io->data_length = htole32(xs->datalen);
2951
2952 io->sense_buffer_low_address = htole32(ccb->ccb_cmd_dva +
2953 ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
2954
2955 if (mpii_load_xs(ccb) != 0) {
2956 xs->error = XS_DRIVER_STUFFUP;
2957 mpii_put_ccb(sc, ccb);
2958 scsipi_done(xs);
2959 return;
2960 }
2961
2962 DNPRINTF(MPII_D_CMD, "%s: sizeof(mpii_msg_scsi_io): %ld "
2963 "sizeof(mpii_ccb_bundle): %ld sge offset: 0x%02lx\n",
2964 DEVNAME(sc), sizeof(struct mpii_msg_scsi_io),
2965 sizeof(struct mpii_ccb_bundle),
2966 (u_int8_t *)&mcb->mcb_sgl[0] - (u_int8_t *)mcb);
2967
2968 DNPRINTF(MPII_D_CMD, "%s sgl[0]: 0x%04x 0%04x 0x%04x\n",
2969 DEVNAME(sc), mcb->mcb_sgl[0].sg_hdr, mcb->mcb_sgl[0].sg_lo_addr,
2970 mcb->mcb_sgl[0].sg_hi_addr);
2971
2972 DNPRINTF(MPII_D_CMD, "%s: Offset0: 0x%02x\n", DEVNAME(sc),
2973 io->sgl_offset0);
2974
2975 if (xs->xs_control & XS_CTL_POLL) {
2976 if (mpii_poll(sc, ccb) != 0) {
2977 xs->error = XS_DRIVER_STUFFUP;
2978 mpii_put_ccb(sc, ccb);
2979 scsipi_done(xs);
2980 }
2981 return;
2982 }
2983 timeout = mstohz(xs->timeout);
2984 if (timeout == 0)
2985 timeout = 1;
2986 callout_reset(&xs->xs_callout, timeout, mpii_scsi_cmd_tmo, ccb);
2987
2988 DNPRINTF(MPII_D_CMD, "%s: mpii_scsipi_request(): opcode: %02x "
2989 "datalen: %d\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen);
2990
2991 mpii_start(sc, ccb);
2992 }
2993
2994 static void
2995 mpii_scsi_cmd_tmo(void *xccb)
2996 {
2997 struct mpii_ccb *ccb = xccb;
2998 struct mpii_softc *sc = ccb->ccb_sc;
2999
3000 printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc));
3001
3002 mutex_enter(&sc->sc_ccb_mtx);
3003 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3004 ccb->ccb_state = MPII_CCB_TIMEOUT;
3005 workqueue_enqueue(sc->sc_ssb_tmowk, &ccb->u.ccb_wk, NULL);
3006 }
3007 mutex_exit(&sc->sc_ccb_mtx);
3008 }
3009
3010 static void
3011 mpii_scsi_cmd_tmo_handler(struct work *wk, void *cookie)
3012 {
3013 struct mpii_softc *sc = cookie;
3014 struct mpii_ccb *tccb;
3015 struct mpii_ccb *ccb;
3016 struct mpii_msg_scsi_task_request *stq;
3017
3018 ccb = (void *)wk;
3019 tccb = mpii_get_ccb(sc, 0);
3020
3021 mutex_enter(&sc->sc_ccb_mtx);
3022 if (ccb->ccb_state != MPII_CCB_TIMEOUT) {
3023 mpii_put_ccb(sc, tccb);
3024 }
3025 /* should remove any other ccbs for the same dev handle */
3026 mutex_exit(&sc->sc_ccb_mtx);
3027
3028 stq = tccb->ccb_cmd;
3029 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3030 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3031 stq->dev_handle = htole16(ccb->ccb_dev_handle);
3032
3033 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3034 mpii_start(sc, tccb);
3035 }
3036
3037 static void
3038 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3039 {
3040 mpii_put_ccb(tccb->ccb_sc, tccb);
3041 }
3042
3043 static u_int8_t
3044 map_scsi_status(u_int8_t mpii_scsi_status)
3045 {
3046 u_int8_t scsi_status;
3047
3048 switch (mpii_scsi_status)
3049 {
3050 case MPII_SCSIIO_ERR_STATUS_SUCCESS:
3051 scsi_status = SCSI_OK;
3052 break;
3053
3054 case MPII_SCSIIO_ERR_STATUS_CHECK_COND:
3055 scsi_status = SCSI_CHECK;
3056 break;
3057
3058 case MPII_SCSIIO_ERR_STATUS_BUSY:
3059 scsi_status = SCSI_BUSY;
3060 break;
3061
3062 case MPII_SCSIIO_ERR_STATUS_INTERMEDIATE:
3063 scsi_status = SCSI_INTERM;
3064 break;
3065
3066 case MPII_SCSIIO_ERR_STATUS_INTERMEDIATE_CONDMET:
3067 scsi_status = SCSI_INTERM;
3068 break;
3069
3070 case MPII_SCSIIO_ERR_STATUS_RESERVATION_CONFLICT:
3071 scsi_status = SCSI_RESV_CONFLICT;
3072 break;
3073
3074 case MPII_SCSIIO_ERR_STATUS_CMD_TERM:
3075 case MPII_SCSIIO_ERR_STATUS_TASK_ABORTED:
3076 scsi_status = SCSI_TERMINATED;
3077 break;
3078
3079 case MPII_SCSIIO_ERR_STATUS_TASK_SET_FULL:
3080 scsi_status = SCSI_QUEUE_FULL;
3081 break;
3082
3083 case MPII_SCSIIO_ERR_STATUS_ACA_ACTIVE:
3084 scsi_status = SCSI_ACA_ACTIVE;
3085 break;
3086
3087 default:
3088 /* XXX: for the lack of anything better and other than OK */
3089 scsi_status = 0xFF;
3090 break;
3091 }
3092
3093 return scsi_status;
3094 }
3095
3096 static void
3097 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3098 {
3099 struct mpii_msg_scsi_io_error *sie;
3100 struct mpii_softc *sc = ccb->ccb_sc;
3101 struct scsipi_xfer *xs = ccb->ccb_cookie;
3102 struct mpii_ccb_bundle *mcb = ccb->ccb_cmd;
3103 bus_dmamap_t dmap = ccb->ccb_dmamap;
3104 bool timeout = 0;
3105
3106 callout_stop(&xs->xs_callout);
3107 mutex_enter(&sc->sc_ccb_mtx);
3108 if (ccb->ccb_state == MPII_CCB_TIMEOUT)
3109 timeout = 1;
3110 ccb->ccb_state = MPII_CCB_READY;
3111 mutex_exit(&sc->sc_ccb_mtx);
3112
3113 if (xs->datalen != 0) {
3114 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3115 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3116 BUS_DMASYNC_POSTWRITE);
3117
3118 bus_dmamap_unload(sc->sc_dmat, dmap);
3119 }
3120
3121 xs->error = XS_NOERROR;
3122 xs->resid = 0;
3123
3124 if (ccb->ccb_rcb == NULL) {
3125 /* no scsi error, we're ok so drop out early */
3126 xs->status = SCSI_OK;
3127 mpii_put_ccb(sc, ccb);
3128 scsipi_done(xs);
3129 return;
3130 }
3131
3132 sie = ccb->ccb_rcb->rcb_reply;
3133
3134 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3135 "xs_control 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
3136 xs->xs_control);
3137 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3138 "function: 0x%02x\n", DEVNAME(sc), le16toh(sie->dev_handle),
3139 sie->msg_length, sie->function);
3140 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3141 sie->vp_id, sie->vf_id);
3142 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3143 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3144 sie->scsi_state, le16toh(sie->ioc_status));
3145 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3146 le32toh(sie->ioc_loginfo));
3147 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3148 le32toh(sie->transfer_count));
3149 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3150 le32toh(sie->sense_count));
3151 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3152 le32toh(sie->response_info));
3153 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3154 le16toh(sie->task_tag));
3155 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3156 DEVNAME(sc), le32toh(sie->bidirectional_transfer_count));
3157
3158 xs->status = map_scsi_status(sie->scsi_status);
3159
3160 switch (le16toh(sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3161 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3162 switch (sie->scsi_status) {
3163 case MPII_SCSIIO_ERR_STATUS_CHECK_COND:
3164 xs->error = XS_SENSE;
3165 /*FALLTHROUGH*/
3166 case MPII_SCSIIO_ERR_STATUS_SUCCESS:
3167 xs->resid = xs->datalen - le32toh(sie->transfer_count);
3168 break;
3169
3170 default:
3171 xs->error = XS_DRIVER_STUFFUP;
3172 break;
3173 }
3174 break;
3175
3176 case MPII_IOCSTATUS_SUCCESS:
3177 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3178 switch (sie->scsi_status) {
3179 case MPII_SCSIIO_ERR_STATUS_SUCCESS:
3180 /*
3181 * xs->resid = 0; - already set above
3182 *
3183 * XXX: check whether UNDERUN strategy
3184 * would be appropriate here too.
3185 * that would allow joining these cases.
3186 */
3187 break;
3188
3189 case MPII_SCSIIO_ERR_STATUS_CHECK_COND:
3190 xs->error = XS_SENSE;
3191 break;
3192
3193 case MPII_SCSIIO_ERR_STATUS_BUSY:
3194 case MPII_SCSIIO_ERR_STATUS_TASK_SET_FULL:
3195 xs->error = XS_BUSY;
3196 break;
3197
3198 default:
3199 xs->error = XS_DRIVER_STUFFUP;
3200 }
3201 break;
3202
3203 case MPII_IOCSTATUS_BUSY:
3204 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3205 xs->error = XS_BUSY;
3206 break;
3207
3208 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3209 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3210 xs->error = timeout ? XS_TIMEOUT : XS_RESET;
3211 break;
3212
3213 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3214 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3215 xs->error = XS_SELTIMEOUT;
3216 break;
3217
3218 default:
3219 xs->error = XS_DRIVER_STUFFUP;
3220 break;
3221 }
3222
3223 if (sie->scsi_state & MPII_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
3224 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
3225
3226 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x\n", DEVNAME(sc),
3227 xs->error, xs->status);
3228
3229 mpii_push_reply(sc, ccb->ccb_rcb);
3230 mpii_put_ccb(sc, ccb);
3231 scsipi_done(xs);
3232 }
3233
3234 #if 0
3235 static int
3236 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3237 {
3238 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc;
3239 struct mpii_device *dev = sc->sc_devs[link->target];
3240 struct mpii_cfg_raid_vol_pg0 *vpg;
3241 struct mpii_msg_raid_action_request *req;
3242 struct mpii_msg_raid_action_reply *rep;
3243 struct mpii_cfg_hdr hdr;
3244 struct mpii_ccb *ccb;
3245 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3246 size_t pagelen;
3247 int rv = 0;
3248 int enabled;
3249
3250 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3251 addr, MPII_PG_POLL, &hdr) != 0)
3252 return (EINVAL);
3253
3254 pagelen = hdr.page_length * 4;
3255 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3256 if (vpg == NULL)
3257 return (ENOMEM);
3258
3259 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3260 vpg, pagelen) != 0) {
3261 rv = EINVAL;
3262 goto done;
3263 }
3264
3265 enabled = ((le16toh(vpg->volume_settings) &
3266 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3267 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3268
3269 if (cmd == DIOCGCACHE) {
3270 dc->wrcache = enabled;
3271 dc->rdcache = 0;
3272 goto done;
3273 } /* else DIOCSCACHE */
3274
3275 if (dc->rdcache) {
3276 rv = EOPNOTSUPP;
3277 goto done;
3278 }
3279
3280 if (((dc->wrcache) ? 1 : 0) == enabled)
3281 goto done;
3282
3283 ccb = mpii_get_ccb(sc, MPII_NOSLEEP);
3284 if (ccb == NULL) {
3285 rv = ENOMEM;
3286 goto done;
3287 }
3288
3289 ccb->ccb_done = mpii_empty_done;
3290
3291 req = ccb->ccb_cmd;
3292 bzero(req, sizeof(*req));
3293 req->function = MPII_FUNCTION_RAID_ACTION;
3294 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3295 req->vol_dev_handle = htole16(dev->dev_handle);
3296 req->action_data = htole32(dc->wrcache ?
3297 MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3298 MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3299
3300 if (mpii_poll(sc, ccb) != 0) {
3301 rv = EIO;
3302 goto done;
3303 }
3304
3305 if (ccb->ccb_rcb != NULL) {
3306 rep = ccb->ccb_rcb->rcb_reply;
3307 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3308 ((rep->action_data[0] &
3309 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3310 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3311 MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3312 rv = EINVAL;
3313 mpii_push_reply(sc, ccb->ccb_rcb);
3314 }
3315
3316 mpii_put_ccb(sc, ccb);
3317
3318 done:
3319 free(vpg, M_TEMP);
3320 return (rv);
3321 }
3322 #endif
3323 static int
3324 mpii_cache_enable(struct mpii_softc *sc, struct mpii_device *dev)
3325 {
3326 struct mpii_cfg_raid_vol_pg0 *vpg;
3327 struct mpii_msg_raid_action_request *req;
3328 struct mpii_msg_raid_action_reply *rep;
3329 struct mpii_cfg_hdr hdr;
3330 struct mpii_ccb *ccb;
3331 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3332 size_t pagelen;
3333 int rv = 0;
3334 int enabled;
3335
3336 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3337 addr, MPII_PG_POLL, &hdr) != 0)
3338 return (EINVAL);
3339
3340 pagelen = hdr.page_length * 4;
3341 vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3342 if (vpg == NULL)
3343 return (ENOMEM);
3344
3345 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3346 vpg, pagelen) != 0) {
3347 rv = EINVAL;
3348 goto done;
3349 free(vpg, M_TEMP);
3350 return (EINVAL);
3351 }
3352
3353 enabled = ((le16toh(vpg->volume_settings) &
3354 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3355 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3356 aprint_normal_dev(sc->sc_dev, "target %d cache %s", dev->slot,
3357 enabled ? "enabled" : "disabled, enabling");
3358 aprint_normal("\n");
3359
3360 if (enabled == 0)
3361 goto done;
3362
3363 ccb = mpii_get_ccb(sc, MPII_NOSLEEP);
3364 if (ccb == NULL) {
3365 rv = ENOMEM;
3366 goto done;
3367 }
3368
3369 ccb->ccb_done = mpii_empty_done;
3370
3371 req = ccb->ccb_cmd;
3372 bzero(req, sizeof(*req));
3373 req->function = MPII_FUNCTION_RAID_ACTION;
3374 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3375 req->vol_dev_handle = htole16(dev->dev_handle);
3376 req->action_data = htole32(
3377 MPII_RAID_VOL_WRITE_CACHE_ENABLE);
3378
3379 if (mpii_poll(sc, ccb) != 0) {
3380 rv = EIO;
3381 goto done;
3382 }
3383
3384 if (ccb->ccb_rcb != NULL) {
3385 rep = ccb->ccb_rcb->rcb_reply;
3386 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3387 ((rep->action_data[0] &
3388 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3389 MPII_RAID_VOL_WRITE_CACHE_ENABLE))
3390 rv = EINVAL;
3391 mpii_push_reply(sc, ccb->ccb_rcb);
3392 }
3393
3394 mpii_put_ccb(sc, ccb);
3395
3396 done:
3397 free(vpg, M_TEMP);
3398 if (rv) {
3399 aprint_error_dev(sc->sc_dev,
3400 "enabling cache on target %d failed (%d)\n",
3401 dev->slot, rv);
3402 }
3403 return (rv);
3404 }
3405
3406 #if NBIO > 0
3407 static int
3408 mpii_ioctl(device_t dev, u_long cmd, void *addr)
3409 {
3410 struct mpii_softc *sc = device_private(dev);
3411 int s, error = 0;
3412
3413 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3414 KERNEL_LOCK(1, curlwp);
3415 s = splbio();
3416
3417 switch (cmd) {
3418 case BIOCINQ:
3419 DNPRINTF(MPII_D_IOCTL, "inq\n");
3420 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3421 break;
3422 case BIOCVOL:
3423 DNPRINTF(MPII_D_IOCTL, "vol\n");
3424 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3425 break;
3426 case BIOCDISK:
3427 DNPRINTF(MPII_D_IOCTL, "disk\n");
3428 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3429 break;
3430 default:
3431 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3432 error = EINVAL;
3433 }
3434
3435 splx(s);
3436 KERNEL_UNLOCK_ONE(curlwp);
3437 return (error);
3438 }
3439
3440 static int
3441 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3442 {
3443 int i;
3444
3445 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3446
3447 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3448 for (i = 0; i < sc->sc_max_devices; i++)
3449 if (sc->sc_devs[i] &&
3450 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3451 bi->bi_novol++;
3452 return (0);
3453 }
3454
3455 static int
3456 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3457 {
3458 struct mpii_cfg_raid_vol_pg0 *vpg;
3459 struct mpii_cfg_hdr hdr;
3460 struct mpii_device *dev;
3461 struct scsipi_periph *periph;
3462 size_t pagelen;
3463 u_int16_t volh;
3464 int rv, hcnt = 0;
3465
3466 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3467 DEVNAME(sc), bv->bv_volid);
3468
3469 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3470 return (ENODEV);
3471 volh = dev->dev_handle;
3472
3473 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3474 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3475 printf("%s: unable to fetch header for raid volume page 0\n",
3476 DEVNAME(sc));
3477 return (EINVAL);
3478 }
3479
3480 pagelen = hdr.page_length * 4;
3481 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3482 if (vpg == NULL) {
3483 printf("%s: unable to allocate space for raid "
3484 "volume page 0\n", DEVNAME(sc));
3485 return (ENOMEM);
3486 }
3487
3488 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3489 &hdr, 1, vpg, pagelen) != 0) {
3490 printf("%s: unable to fetch raid volume page 0\n",
3491 DEVNAME(sc));
3492 free(vpg, M_TEMP);
3493 return (EINVAL);
3494 }
3495
3496 switch (vpg->volume_state) {
3497 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3498 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3499 bv->bv_status = BIOC_SVONLINE;
3500 break;
3501 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3502 if (ISSET(le32toh(vpg->volume_status),
3503 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3504 bv->bv_status = BIOC_SVREBUILD;
3505 bv->bv_percent = dev->percent;
3506 } else
3507 bv->bv_status = BIOC_SVDEGRADED;
3508 break;
3509 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3510 bv->bv_status = BIOC_SVOFFLINE;
3511 break;
3512 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3513 bv->bv_status = BIOC_SVBUILDING;
3514 break;
3515 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3516 default:
3517 bv->bv_status = BIOC_SVINVALID;
3518 break;
3519 }
3520
3521 switch (vpg->volume_type) {
3522 case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3523 bv->bv_level = 0;
3524 break;
3525 case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3526 bv->bv_level = 1;
3527 break;
3528 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3529 case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3530 bv->bv_level = 10;
3531 break;
3532 default:
3533 bv->bv_level = -1;
3534 }
3535
3536 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3537 free(vpg, M_TEMP);
3538 return (rv);
3539 }
3540
3541 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3542
3543 bv->bv_size = le64toh(vpg->max_lba) * le16toh(vpg->block_size);
3544
3545 periph = scsipi_lookup_periph(&sc->sc_chan, dev->slot, 0);
3546 if (periph != NULL) {
3547 if (periph->periph_dev == NULL) {
3548 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s:%d",
3549 DEVNAME(sc), dev->slot);
3550 } else {
3551 strlcpy(bv->bv_dev, device_xname(periph->periph_dev),
3552 sizeof(bv->bv_dev));
3553 }
3554 }
3555
3556 free(vpg, M_TEMP);
3557 return (0);
3558 }
3559
3560 static int
3561 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3562 {
3563 struct mpii_cfg_raid_vol_pg0 *vpg;
3564 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3565 struct mpii_cfg_hdr hdr;
3566 struct mpii_device *dev;
3567 size_t pagelen;
3568 u_int16_t volh;
3569 u_int8_t dn;
3570
3571 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3572 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3573
3574 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3575 return (ENODEV);
3576 volh = dev->dev_handle;
3577
3578 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3579 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3580 printf("%s: unable to fetch header for raid volume page 0\n",
3581 DEVNAME(sc));
3582 return (EINVAL);
3583 }
3584
3585 pagelen = hdr.page_length * 4;
3586 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3587 if (vpg == NULL) {
3588 printf("%s: unable to allocate space for raid "
3589 "volume page 0\n", DEVNAME(sc));
3590 return (ENOMEM);
3591 }
3592
3593 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3594 &hdr, 1, vpg, pagelen) != 0) {
3595 printf("%s: unable to fetch raid volume page 0\n",
3596 DEVNAME(sc));
3597 free(vpg, M_TEMP);
3598 return (EINVAL);
3599 }
3600
3601 if (bd->bd_diskid >= vpg->num_phys_disks) {
3602 int nvdsk = vpg->num_phys_disks;
3603 int hsmap = vpg->hot_spare_pool;
3604
3605 free(vpg, M_TEMP);
3606 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3607 }
3608
3609 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3610 bd->bd_diskid;
3611 dn = pd->phys_disk_num;
3612
3613 free(vpg, M_TEMP);
3614 return (mpii_bio_disk(sc, bd, dn));
3615 }
3616
3617 static int
3618 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3619 int hsmap, int *hscnt)
3620 {
3621 struct mpii_cfg_raid_config_pg0 *cpg;
3622 struct mpii_raid_config_element *el;
3623 struct mpii_ecfg_hdr ehdr;
3624 size_t pagelen;
3625 int i, nhs = 0;
3626
3627 if (bd) {
3628 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3629 bd->bd_diskid - nvdsk);
3630 } else {
3631 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3632 }
3633
3634 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3635 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3636 &ehdr) != 0) {
3637 printf("%s: unable to fetch header for raid config page 0\n",
3638 DEVNAME(sc));
3639 return (EINVAL);
3640 }
3641
3642 pagelen = le16toh(ehdr.ext_page_length) * 4;
3643 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_ZERO);
3644 if (cpg == NULL) {
3645 printf("%s: unable to allocate space for raid config page 0\n",
3646 DEVNAME(sc));
3647 return (ENOMEM);
3648 }
3649
3650 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3651 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3652 printf("%s: unable to fetch raid config page 0\n",
3653 DEVNAME(sc));
3654 free(cpg, M_TEMP);
3655 return (EINVAL);
3656 }
3657
3658 el = (struct mpii_raid_config_element *)(cpg + 1);
3659 for (i = 0; i < cpg->num_elements; i++, el++) {
3660 if (ISSET(le16toh(el->element_flags),
3661 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3662 el->hot_spare_pool == hsmap) {
3663 /*
3664 * diskid comparison is based on the idea that all
3665 * disks are counted by the bio(4) in sequence, thus
3666 * substracting the number of disks in the volume
3667 * from the diskid yields us a "relative" hotspare
3668 * number, which is good enough for us.
3669 */
3670 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3671 u_int8_t dn = el->phys_disk_num;
3672
3673 free(cpg, M_TEMP);
3674 return (mpii_bio_disk(sc, bd, dn));
3675 }
3676 nhs++;
3677 }
3678 }
3679
3680 if (hscnt)
3681 *hscnt = nhs;
3682
3683 free(cpg, M_TEMP);
3684 return (0);
3685 }
3686
3687 static int
3688 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3689 {
3690 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3691 struct mpii_cfg_hdr hdr;
3692 struct mpii_device *dev;
3693 int len;
3694
3695 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3696 bd->bd_diskid);
3697
3698 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_ZERO);
3699 if (ppg == NULL) {
3700 printf("%s: unable to allocate space for raid physical disk "
3701 "page 0\n", DEVNAME(sc));
3702 return (ENOMEM);
3703 }
3704
3705 hdr.page_version = 0;
3706 hdr.page_length = sizeof(*ppg) / 4;
3707 hdr.page_number = 0;
3708 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3709
3710 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3711 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3712 printf("%s: unable to fetch raid drive page 0\n",
3713 DEVNAME(sc));
3714 free(ppg, M_TEMP);
3715 return (EINVAL);
3716 }
3717
3718 bd->bd_target = ppg->phys_disk_num;
3719
3720 if ((dev = mpii_find_dev(sc, le16toh(ppg->dev_handle))) == NULL) {
3721 bd->bd_status = BIOC_SDINVALID;
3722 free(ppg, M_TEMP);
3723 return (0);
3724 }
3725
3726 switch (ppg->phys_disk_state) {
3727 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3728 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3729 bd->bd_status = BIOC_SDONLINE;
3730 break;
3731 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3732 if (ppg->offline_reason ==
3733 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3734 ppg->offline_reason ==
3735 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3736 bd->bd_status = BIOC_SDFAILED;
3737 else
3738 bd->bd_status = BIOC_SDOFFLINE;
3739 break;
3740 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3741 bd->bd_status = BIOC_SDFAILED;
3742 break;
3743 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3744 bd->bd_status = BIOC_SDREBUILD;
3745 break;
3746 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3747 bd->bd_status = BIOC_SDHOTSPARE;
3748 break;
3749 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3750 bd->bd_status = BIOC_SDUNUSED;
3751 break;
3752 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3753 default:
3754 bd->bd_status = BIOC_SDINVALID;
3755 break;
3756 }
3757
3758 bd->bd_size = le64toh(ppg->dev_max_lba) * le16toh(ppg->block_size);
3759
3760 strnvisx(bd->bd_vendor, sizeof(bd->bd_vendor),
3761 ppg->vendor_id, sizeof(ppg->vendor_id),
3762 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3763 len = strlen(bd->bd_vendor);
3764 bd->bd_vendor[len] = ' ';
3765 strnvisx(&bd->bd_vendor[len + 1], sizeof(ppg->vendor_id) - len - 1,
3766 ppg->product_id, sizeof(ppg->product_id),
3767 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3768 strnvisx(bd->bd_serial, sizeof(bd->bd_serial),
3769 ppg->serial, sizeof(ppg->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
3770
3771 free(ppg, M_TEMP);
3772 return (0);
3773 }
3774
3775 static struct mpii_device *
3776 mpii_find_vol(struct mpii_softc *sc, int volid)
3777 {
3778 struct mpii_device *dev = NULL;
3779
3780 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3781 return (NULL);
3782 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3783 if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3784 return (dev);
3785 return (NULL);
3786 }
3787
3788 /*
3789 * Non-sleeping lightweight version of the mpii_ioctl_vol
3790 */
3791 static int
3792 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3793 {
3794 struct mpii_cfg_raid_vol_pg0 *vpg;
3795 struct mpii_cfg_hdr hdr;
3796 struct mpii_device *dev = NULL;
3797 size_t pagelen;
3798 u_int16_t volh;
3799
3800 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3801 return (ENODEV);
3802 volh = dev->dev_handle;
3803
3804 if (mpii_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3805 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, &hdr) != 0) {
3806 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3807 "volume page 0\n", DEVNAME(sc));
3808 return (EINVAL);
3809 }
3810
3811 pagelen = hdr.page_length * 4;
3812 vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3813 if (vpg == NULL) {
3814 DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3815 "volume page 0\n", DEVNAME(sc));
3816 return (ENOMEM);
3817 }
3818
3819 if (mpii_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3820 &hdr, 1, vpg, pagelen) != 0) {
3821 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3822 "page 0\n", DEVNAME(sc));
3823 free(vpg, M_TEMP);
3824 return (EINVAL);
3825 }
3826
3827 switch (vpg->volume_state) {
3828 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3829 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3830 bv->bv_status = BIOC_SVONLINE;
3831 break;
3832 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3833 if (ISSET(le32toh(vpg->volume_status),
3834 MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3835 bv->bv_status = BIOC_SVREBUILD;
3836 else
3837 bv->bv_status = BIOC_SVDEGRADED;
3838 break;
3839 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3840 bv->bv_status = BIOC_SVOFFLINE;
3841 break;
3842 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3843 bv->bv_status = BIOC_SVBUILDING;
3844 break;
3845 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3846 default:
3847 bv->bv_status = BIOC_SVINVALID;
3848 break;
3849 }
3850
3851 free(vpg, M_TEMP);
3852 return (0);
3853 }
3854
3855 static int
3856 mpii_create_sensors(struct mpii_softc *sc)
3857 {
3858 int i, rv;
3859
3860 sc->sc_sme = sysmon_envsys_create();
3861 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_vd_count,
3862 M_DEVBUF, M_NOWAIT | M_ZERO);
3863 if (sc->sc_sensors == NULL) {
3864 aprint_error_dev(sc->sc_dev,
3865 "can't allocate envsys_data_t\n");
3866 return (1);
3867 }
3868
3869 for (i = 0; i < sc->sc_vd_count; i++) {
3870 sc->sc_sensors[i].units = ENVSYS_DRIVE;
3871 sc->sc_sensors[i].state = ENVSYS_SINVALID;
3872 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
3873 /* Enable monitoring for drive state changes */
3874 sc->sc_sensors[i].flags |= ENVSYS_FMONSTCHANGED;
3875
3876 /* logical drives */
3877 snprintf(sc->sc_sensors[i].desc,
3878 sizeof(sc->sc_sensors[i].desc), "%s:%d",
3879 DEVNAME(sc), i);
3880 if ((rv = sysmon_envsys_sensor_attach(sc->sc_sme,
3881 &sc->sc_sensors[i])) != 0) {
3882 aprint_error_dev(sc->sc_dev,
3883 "unable to attach sensor (rv = %d)\n", rv);
3884 goto out;
3885 }
3886 }
3887 sc->sc_sme->sme_name = DEVNAME(sc);
3888 sc->sc_sme->sme_cookie = sc;
3889 sc->sc_sme->sme_refresh = mpii_refresh_sensors;
3890
3891 rv = sysmon_envsys_register(sc->sc_sme);
3892
3893 if (rv != 0) {
3894 aprint_error_dev(sc->sc_dev,
3895 "unable to register with sysmon (rv = %d)\n", rv);
3896 goto out;
3897 }
3898 return 0;
3899
3900 out:
3901 free(sc->sc_sensors, M_DEVBUF);
3902 sysmon_envsys_destroy(sc->sc_sme);
3903 sc->sc_sme = NULL;
3904 return EINVAL;
3905 }
3906
3907 static int
3908 mpii_destroy_sensors(struct mpii_softc *sc)
3909 {
3910 if (sc->sc_sme == NULL)
3911 return 0;
3912 sysmon_envsys_unregister(sc->sc_sme);
3913 sc->sc_sme = NULL;
3914 free(sc->sc_sensors, M_DEVBUF);
3915 return 0;
3916 }
3917
3918 static void
3919 mpii_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
3920 {
3921 struct mpii_softc *sc = sc = sme->sme_cookie;
3922 struct bioc_vol bv;
3923 int s, error;
3924
3925 bzero(&bv, sizeof(bv));
3926 bv.bv_volid = edata->sensor;
3927 KERNEL_LOCK(1, curlwp);
3928 s = splbio();
3929 error = mpii_bio_volstate(sc, &bv);
3930 splx(s);
3931 KERNEL_UNLOCK_ONE(curlwp);
3932 if (error)
3933 bv.bv_status = BIOC_SVINVALID;
3934 bio_vol_to_envsys(edata, &bv);
3935 }
3936 #endif /* NBIO > 0 */
3937