arcmsr.c revision 1.41 1 /* $NetBSD: arcmsr.c,v 1.41 2021/04/24 23:36:57 thorpej Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.41 2021/04/24 23:36:57 thorpej Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <dev/pci/arcmsrvar.h>
55
56 /* #define ARC_DEBUG */
57 #ifdef ARC_DEBUG
58 #define ARC_D_INIT (1<<0)
59 #define ARC_D_RW (1<<1)
60 #define ARC_D_DB (1<<2)
61
62 int arcdebug = 0;
63
64 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
65 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
66
67 #else
68 #define DPRINTF(p, ...) /* p */
69 #define DNPRINTF(n, p, ...) /* n, p */
70 #endif
71
72 /*
73 * the fw header must always equal this.
74 */
75 #if NBIO > 0
76 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
77 #endif
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, cfdata_t, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(device_t, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 /*
115 * interface for scsi midlayer to talk to.
116 */
117 static void arc_scsi_cmd(struct scsipi_channel *, scsipi_adapter_req_t,
118 void *);
119
120 /*
121 * code to deal with getting bits in and out of the bus space.
122 */
123 static uint32_t arc_read(struct arc_softc *, bus_size_t);
124 static void arc_read_region(struct arc_softc *, bus_size_t, void *,
125 size_t);
126 static void arc_write(struct arc_softc *, bus_size_t, uint32_t);
127 #if NBIO > 0
128 static void arc_write_region(struct arc_softc *, bus_size_t, void *,
129 size_t);
130 #endif
131 static int arc_wait_eq(struct arc_softc *, bus_size_t, uint32_t,
132 uint32_t);
133 #ifdef unused
134 static int arc_wait_ne(struct arc_softc *, bus_size_t, uint32_t,
135 uint32_t);
136 #endif
137 static int arc_msg0(struct arc_softc *, uint32_t);
138 static struct arc_dmamem *arc_dmamem_alloc(struct arc_softc *, size_t);
139 static void arc_dmamem_free(struct arc_softc *,
140 struct arc_dmamem *);
141
142 static int arc_alloc_ccbs(device_t);
143 static struct arc_ccb *arc_get_ccb(struct arc_softc *);
144 static void arc_put_ccb(struct arc_softc *, struct arc_ccb *);
145 static int arc_load_xs(struct arc_ccb *);
146 static int arc_complete(struct arc_softc *, struct arc_ccb *, int);
147 static void arc_scsi_cmd_done(struct arc_softc *, struct arc_ccb *,
148 uint32_t);
149
150 /*
151 * real stuff for dealing with the hardware.
152 */
153 static int arc_map_pci_resources(device_t, struct pci_attach_args *);
154 static void arc_unmap_pci_resources(struct arc_softc *);
155 static int arc_query_firmware(device_t);
156
157 /*
158 * stuff to do messaging via the doorbells.
159 */
160 #if NBIO > 0
161 static void arc_lock(struct arc_softc *);
162 static void arc_unlock(struct arc_softc *);
163 static void arc_wait(struct arc_softc *);
164 static uint8_t arc_msg_cksum(void *, uint16_t);
165 static int arc_msgbuf(struct arc_softc *, void *, size_t, void *, size_t);
166 #endif
167
168 #define arc_push(_s, _r) arc_write((_s), ARC_REG_POST_QUEUE, (_r))
169 #define arc_pop(_s) arc_read((_s), ARC_REG_REPLY_QUEUE)
170
171 static int
172 arc_match(device_t parent, cfdata_t match, void *aux)
173 {
174 struct pci_attach_args *pa = aux;
175
176 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
177 switch (PCI_PRODUCT(pa->pa_id)) {
178 case PCI_PRODUCT_ARECA_ARC1110:
179 case PCI_PRODUCT_ARECA_ARC1120:
180 case PCI_PRODUCT_ARECA_ARC1130:
181 case PCI_PRODUCT_ARECA_ARC1160:
182 case PCI_PRODUCT_ARECA_ARC1170:
183 case PCI_PRODUCT_ARECA_ARC1200:
184 case PCI_PRODUCT_ARECA_ARC1202:
185 case PCI_PRODUCT_ARECA_ARC1210:
186 case PCI_PRODUCT_ARECA_ARC1220:
187 case PCI_PRODUCT_ARECA_ARC1230:
188 case PCI_PRODUCT_ARECA_ARC1260:
189 case PCI_PRODUCT_ARECA_ARC1270:
190 case PCI_PRODUCT_ARECA_ARC1280:
191 case PCI_PRODUCT_ARECA_ARC1380:
192 case PCI_PRODUCT_ARECA_ARC1381:
193 case PCI_PRODUCT_ARECA_ARC1680:
194 case PCI_PRODUCT_ARECA_ARC1681:
195 return 1;
196 default:
197 break;
198 }
199 }
200
201 return 0;
202 }
203
204 static void
205 arc_attach(device_t parent, device_t self, void *aux)
206 {
207 struct arc_softc *sc = device_private(self);
208 struct pci_attach_args *pa = aux;
209 struct scsipi_adapter *adapt = &sc->sc_adapter;
210 struct scsipi_channel *chan = &sc->sc_chan;
211
212 sc->sc_dev = self;
213 sc->sc_talking = 0;
214 rw_init(&sc->sc_rwlock);
215 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
216 cv_init(&sc->sc_condvar, "arcdb");
217
218 if (arc_map_pci_resources(self, pa) != 0) {
219 /* error message printed by arc_map_pci_resources */
220 return;
221 }
222
223 if (arc_query_firmware(self) != 0) {
224 /* error message printed by arc_query_firmware */
225 goto unmap_pci;
226 }
227
228 if (arc_alloc_ccbs(self) != 0) {
229 /* error message printed by arc_alloc_ccbs */
230 goto unmap_pci;
231 }
232
233 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
234 panic("%s: couldn't establish shutdown handler\n",
235 device_xname(self));
236
237 memset(adapt, 0, sizeof(*adapt));
238 adapt->adapt_dev = self;
239 adapt->adapt_nchannels = 1;
240 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
241 adapt->adapt_max_periph = adapt->adapt_openings;
242 adapt->adapt_minphys = arc_minphys;
243 adapt->adapt_request = arc_scsi_cmd;
244 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
245
246 memset(chan, 0, sizeof(*chan));
247 chan->chan_adapter = adapt;
248 chan->chan_bustype = &scsi_bustype;
249 chan->chan_nluns = ARC_MAX_LUN;
250 chan->chan_ntargets = ARC_MAX_TARGET;
251 chan->chan_id = ARC_MAX_TARGET;
252 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
253
254 /*
255 * Save the device_t returned, because we could to attach
256 * devices via the management interface.
257 */
258 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint,
259 CFARG_EOL);
260
261 /* enable interrupts */
262 arc_write(sc, ARC_REG_INTRMASK,
263 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
264
265 #if NBIO > 0
266 /*
267 * Register the driver to bio(4) and setup the sensors.
268 */
269 if (bio_register(self, arc_bioctl) != 0)
270 panic("%s: bioctl registration failed\n", device_xname(self));
271
272 /*
273 * you need to talk to the firmware to get volume info. our firmware
274 * interface relies on being able to sleep, so we need to use a thread
275 * to do the work.
276 */
277 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
278 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
279 panic("%s: unable to create a kernel thread for sensors\n",
280 device_xname(self));
281 #endif
282
283 return;
284
285 unmap_pci:
286 arc_unmap_pci_resources(sc);
287 }
288
289 static int
290 arc_detach(device_t self, int flags)
291 {
292 struct arc_softc *sc = device_private(self);
293
294 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
295 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
296
297 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
298 aprint_error_dev(self, "timeout waiting to flush cache\n");
299
300 if (sc->sc_sme != NULL)
301 sysmon_envsys_unregister(sc->sc_sme);
302
303 return 0;
304 }
305
306 static bool
307 arc_shutdown(device_t self, int how)
308 {
309 struct arc_softc *sc = device_private(self);
310
311 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
312 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
313
314 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
315 aprint_error_dev(self, "timeout waiting to flush cache\n");
316
317 return true;
318 }
319
320 static void
321 arc_minphys(struct buf *bp)
322 {
323 if (bp->b_bcount > MAXPHYS)
324 bp->b_bcount = MAXPHYS;
325 minphys(bp);
326 }
327
328 static int
329 arc_intr(void *arg)
330 {
331 struct arc_softc *sc = arg;
332 struct arc_ccb *ccb = NULL;
333 char *kva = ARC_DMA_KVA(sc->sc_requests);
334 struct arc_io_cmd *cmd;
335 uint32_t reg, intrstat;
336
337 mutex_spin_enter(&sc->sc_mutex);
338 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
339 if (intrstat == 0x0) {
340 mutex_spin_exit(&sc->sc_mutex);
341 return 0;
342 }
343
344 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
345 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
346
347 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
348 if (sc->sc_talking) {
349 arc_write(sc, ARC_REG_INTRMASK,
350 ~ARC_REG_INTRMASK_POSTQUEUE);
351 cv_broadcast(&sc->sc_condvar);
352 } else {
353 /* otherwise drop it */
354 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
355 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
356 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
357 arc_write(sc, ARC_REG_INB_DOORBELL,
358 ARC_REG_INB_DOORBELL_READ_OK);
359 }
360 }
361 mutex_spin_exit(&sc->sc_mutex);
362
363 while ((reg = arc_pop(sc)) != 0xffffffff) {
364 cmd = (struct arc_io_cmd *)(kva +
365 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
366 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
367 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
368
369 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
370 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
372
373 arc_scsi_cmd_done(sc, ccb, reg);
374 }
375
376
377 return 1;
378 }
379
380 void
381 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
382 {
383 struct scsipi_periph *periph;
384 struct scsipi_xfer *xs;
385 struct scsipi_adapter *adapt = chan->chan_adapter;
386 struct arc_softc *sc = device_private(adapt->adapt_dev);
387 struct arc_ccb *ccb;
388 struct arc_msg_scsicmd *cmd;
389 uint32_t reg;
390 uint8_t target;
391
392 switch (req) {
393 case ADAPTER_REQ_GROW_RESOURCES:
394 /* Not supported. */
395 return;
396 case ADAPTER_REQ_SET_XFER_MODE:
397 /* Not supported. */
398 return;
399 case ADAPTER_REQ_RUN_XFER:
400 break;
401 }
402
403 mutex_spin_enter(&sc->sc_mutex);
404
405 xs = arg;
406 periph = xs->xs_periph;
407 target = periph->periph_target;
408
409 if (xs->cmdlen > ARC_MSG_CDBLEN) {
410 memset(&xs->sense, 0, sizeof(xs->sense));
411 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
412 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
413 xs->sense.scsi_sense.asc = 0x20;
414 xs->error = XS_SENSE;
415 xs->status = SCSI_CHECK;
416 mutex_spin_exit(&sc->sc_mutex);
417 scsipi_done(xs);
418 return;
419 }
420
421 ccb = arc_get_ccb(sc);
422 if (ccb == NULL) {
423 xs->error = XS_RESOURCE_SHORTAGE;
424 mutex_spin_exit(&sc->sc_mutex);
425 scsipi_done(xs);
426 return;
427 }
428
429 ccb->ccb_xs = xs;
430
431 if (arc_load_xs(ccb) != 0) {
432 xs->error = XS_DRIVER_STUFFUP;
433 arc_put_ccb(sc, ccb);
434 mutex_spin_exit(&sc->sc_mutex);
435 scsipi_done(xs);
436 return;
437 }
438
439 cmd = &ccb->ccb_cmd->cmd;
440 reg = ccb->ccb_cmd_post;
441
442 /* bus is always 0 */
443 cmd->target = target;
444 cmd->lun = periph->periph_lun;
445 cmd->function = 1; /* XXX magic number */
446
447 cmd->cdb_len = xs->cmdlen;
448 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
449 if (xs->xs_control & XS_CTL_DATA_OUT)
450 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
451 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
452 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
453 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
454 }
455
456 cmd->context = htole32(ccb->ccb_id);
457 cmd->data_len = htole32(xs->datalen);
458
459 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
460
461 /* we've built the command, let's put it on the hw */
462 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
463 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
464 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
465
466 arc_push(sc, reg);
467 if (xs->xs_control & XS_CTL_POLL) {
468 if (arc_complete(sc, ccb, xs->timeout) != 0) {
469 xs->error = XS_DRIVER_STUFFUP;
470 mutex_spin_exit(&sc->sc_mutex);
471 scsipi_done(xs);
472 return;
473 }
474 }
475
476 mutex_spin_exit(&sc->sc_mutex);
477 }
478
479 int
480 arc_load_xs(struct arc_ccb *ccb)
481 {
482 struct arc_softc *sc = ccb->ccb_sc;
483 struct scsipi_xfer *xs = ccb->ccb_xs;
484 bus_dmamap_t dmap = ccb->ccb_dmamap;
485 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
486 uint64_t addr;
487 int i, error;
488
489 if (xs->datalen == 0)
490 return 0;
491
492 error = bus_dmamap_load(sc->sc_dmat, dmap,
493 xs->data, xs->datalen, NULL,
494 (xs->xs_control & XS_CTL_NOSLEEP) ?
495 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
496 if (error != 0) {
497 aprint_error("%s: error %d loading dmamap\n",
498 device_xname(sc->sc_dev), error);
499 return 1;
500 }
501
502 for (i = 0; i < dmap->dm_nsegs; i++) {
503 sge = &sgl[i];
504
505 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
506 addr = dmap->dm_segs[i].ds_addr;
507 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
508 sge->sg_lo_addr = htole32((uint32_t)addr);
509 }
510
511 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
512 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
513 BUS_DMASYNC_PREWRITE);
514
515 return 0;
516 }
517
518 void
519 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
520 {
521 struct scsipi_xfer *xs = ccb->ccb_xs;
522 struct arc_msg_scsicmd *cmd;
523
524 if (xs->datalen != 0) {
525 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
526 ccb->ccb_dmamap->dm_mapsize,
527 (xs->xs_control & XS_CTL_DATA_IN) ?
528 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
529 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
530 }
531
532 /* timeout_del */
533 xs->status |= XS_STS_DONE;
534
535 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
536 cmd = &ccb->ccb_cmd->cmd;
537
538 switch (cmd->status) {
539 case ARC_MSG_STATUS_SELTIMEOUT:
540 case ARC_MSG_STATUS_ABORTED:
541 case ARC_MSG_STATUS_INIT_FAIL:
542 xs->status = SCSI_OK;
543 xs->error = XS_SELTIMEOUT;
544 break;
545
546 case SCSI_CHECK:
547 memset(&xs->sense, 0, sizeof(xs->sense));
548 memcpy(&xs->sense, cmd->sense_data,
549 uimin(ARC_MSG_SENSELEN, sizeof(xs->sense)));
550 xs->sense.scsi_sense.response_code =
551 SSD_RCODE_VALID | 0x70;
552 xs->status = SCSI_CHECK;
553 xs->error = XS_SENSE;
554 xs->resid = 0;
555 break;
556
557 default:
558 /* unknown device status */
559 xs->error = XS_BUSY; /* try again later? */
560 xs->status = SCSI_BUSY;
561 break;
562 }
563 } else {
564 xs->status = SCSI_OK;
565 xs->error = XS_NOERROR;
566 xs->resid = 0;
567 }
568
569 arc_put_ccb(sc, ccb);
570 scsipi_done(xs);
571 }
572
573 int
574 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
575 {
576 struct arc_ccb *ccb = NULL;
577 char *kva = ARC_DMA_KVA(sc->sc_requests);
578 struct arc_io_cmd *cmd;
579 uint32_t reg;
580
581 do {
582 reg = arc_pop(sc);
583 if (reg == 0xffffffff) {
584 if (timeout-- == 0)
585 return 1;
586
587 delay(1000);
588 continue;
589 }
590
591 cmd = (struct arc_io_cmd *)(kva +
592 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
593 ARC_DMA_DVA(sc->sc_requests)));
594 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
595
596 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
597 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
598 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
599
600 arc_scsi_cmd_done(sc, ccb, reg);
601 } while (nccb != ccb);
602
603 return 0;
604 }
605
606 int
607 arc_map_pci_resources(device_t self, struct pci_attach_args *pa)
608 {
609 struct arc_softc *sc = device_private(self);
610 pcireg_t memtype;
611 pci_intr_handle_t ih;
612 char intrbuf[PCI_INTRSTR_LEN];
613
614 sc->sc_pc = pa->pa_pc;
615 sc->sc_tag = pa->pa_tag;
616 sc->sc_dmat = pa->pa_dmat;
617
618 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
619 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
620 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
621 aprint_error(": unable to map system interface register\n");
622 return 1;
623 }
624
625 if (pci_intr_map(pa, &ih) != 0) {
626 aprint_error(": unable to map interrupt\n");
627 goto unmap;
628 }
629
630 pci_intr_setattr(pa->pa_pc, &ih, PCI_INTR_MPSAFE, true);
631
632 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, ih, IPL_BIO,
633 arc_intr, sc, device_xname(self));
634 if (sc->sc_ih == NULL) {
635 aprint_error(": unable to map interrupt [2]\n");
636 goto unmap;
637 }
638
639 aprint_normal("\n");
640 aprint_normal_dev(self, "interrupting at %s\n",
641 pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf)));
642
643 return 0;
644
645 unmap:
646 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
647 sc->sc_ios = 0;
648 return 1;
649 }
650
651 void
652 arc_unmap_pci_resources(struct arc_softc *sc)
653 {
654 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
655 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
656 sc->sc_ios = 0;
657 }
658
659 int
660 arc_query_firmware(device_t self)
661 {
662 struct arc_softc *sc = device_private(self);
663 struct arc_msg_firmware_info fwinfo;
664 char string[81]; /* sizeof(vendor)*2+1 */
665
666 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
667 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
668 aprint_debug_dev(self, "timeout waiting for firmware ok\n");
669 return 1;
670 }
671
672 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
673 aprint_debug_dev(self, "timeout waiting for get config\n");
674 return 1;
675 }
676
677 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
678 aprint_debug_dev(self, "timeout waiting to start bg rebuild\n");
679 return 1;
680 }
681
682 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
683
684 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
685 device_xname(self), htole32(fwinfo.signature));
686
687 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
688 aprint_error_dev(self, "invalid firmware info from iop\n");
689 return 1;
690 }
691
692 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
693 device_xname(self), htole32(fwinfo.request_len));
694 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
695 device_xname(self), htole32(fwinfo.queue_len));
696 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
697 device_xname(self), htole32(fwinfo.sdram_size));
698 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
699 device_xname(self), htole32(fwinfo.sata_ports));
700
701 strnvisx(string, sizeof(string), fwinfo.vendor, sizeof(fwinfo.vendor),
702 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
703 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
704 device_xname(self), string);
705
706 strnvisx(string, sizeof(string), fwinfo.model, sizeof(fwinfo.model),
707 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
708 aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n",
709 string);
710
711 strnvisx(string, sizeof(string), fwinfo.fw_version,
712 sizeof(fwinfo.fw_version), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
713 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
714 device_xname(self), string);
715
716 aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n",
717 htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string);
718
719 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
720 aprint_error_dev(self,
721 "unexpected request frame size (%d != %d)\n",
722 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
723 return 1;
724 }
725
726 sc->sc_req_count = htole32(fwinfo.queue_len);
727
728 return 0;
729 }
730
731 #if NBIO > 0
732 static int
733 arc_bioctl(device_t self, u_long cmd, void *addr)
734 {
735 struct arc_softc *sc = device_private(self);
736 int error = 0;
737
738 switch (cmd) {
739 case BIOCINQ:
740 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
741 break;
742
743 case BIOCVOL:
744 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
745 break;
746
747 case BIOCDISK:
748 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
749 break;
750
751 case BIOCDISK_NOVOL:
752 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
753 break;
754
755 case BIOCALARM:
756 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
757 break;
758
759 case BIOCSETSTATE:
760 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
761 break;
762
763 case BIOCVOLOPS:
764 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
765 break;
766
767 default:
768 error = ENOTTY;
769 break;
770 }
771
772 return error;
773 }
774
775 static int
776 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
777 {
778 switch (*reply) {
779 case ARC_FW_CMD_RAIDINVAL:
780 printf("%s: firmware error (invalid raid set)\n",
781 device_xname(sc->sc_dev));
782 return EINVAL;
783 case ARC_FW_CMD_VOLINVAL:
784 printf("%s: firmware error (invalid volume set)\n",
785 device_xname(sc->sc_dev));
786 return EINVAL;
787 case ARC_FW_CMD_NORAID:
788 printf("%s: firmware error (unexistent raid set)\n",
789 device_xname(sc->sc_dev));
790 return ENODEV;
791 case ARC_FW_CMD_NOVOLUME:
792 printf("%s: firmware error (unexistent volume set)\n",
793 device_xname(sc->sc_dev));
794 return ENODEV;
795 case ARC_FW_CMD_NOPHYSDRV:
796 printf("%s: firmware error (unexistent physical drive)\n",
797 device_xname(sc->sc_dev));
798 return ENODEV;
799 case ARC_FW_CMD_PARAM_ERR:
800 printf("%s: firmware error (parameter error)\n",
801 device_xname(sc->sc_dev));
802 return EINVAL;
803 case ARC_FW_CMD_UNSUPPORTED:
804 printf("%s: firmware error (unsupported command)\n",
805 device_xname(sc->sc_dev));
806 return EOPNOTSUPP;
807 case ARC_FW_CMD_DISKCFG_CHGD:
808 printf("%s: firmware error (disk configuration changed)\n",
809 device_xname(sc->sc_dev));
810 return EINVAL;
811 case ARC_FW_CMD_PASS_INVAL:
812 printf("%s: firmware error (invalid password)\n",
813 device_xname(sc->sc_dev));
814 return EINVAL;
815 case ARC_FW_CMD_NODISKSPACE:
816 printf("%s: firmware error (no disk space available)\n",
817 device_xname(sc->sc_dev));
818 return EOPNOTSUPP;
819 case ARC_FW_CMD_CHECKSUM_ERR:
820 printf("%s: firmware error (checksum error)\n",
821 device_xname(sc->sc_dev));
822 return EINVAL;
823 case ARC_FW_CMD_PASS_REQD:
824 printf("%s: firmware error (password required)\n",
825 device_xname(sc->sc_dev));
826 return EPERM;
827 case ARC_FW_CMD_OK:
828 default:
829 return 0;
830 }
831 }
832
833 static int
834 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
835 {
836 uint8_t request[2], reply[1];
837 size_t len;
838 int error = 0;
839
840 switch (ba->ba_opcode) {
841 case BIOC_SAENABLE:
842 case BIOC_SADISABLE:
843 request[0] = ARC_FW_SET_ALARM;
844 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
845 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
846 len = sizeof(request);
847
848 break;
849
850 case BIOC_SASILENCE:
851 request[0] = ARC_FW_MUTE_ALARM;
852 len = 1;
853
854 break;
855
856 case BIOC_GASTATUS:
857 /* system info is too big/ugly to deal with here */
858 return arc_bio_alarm_state(sc, ba);
859
860 default:
861 return EOPNOTSUPP;
862 }
863
864 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
865 if (error != 0)
866 return error;
867
868 return arc_fw_parse_status_code(sc, &reply[0]);
869 }
870
871 static int
872 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
873 {
874 struct arc_fw_sysinfo *sysinfo;
875 uint8_t request;
876 int error = 0;
877
878 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
879
880 request = ARC_FW_SYSINFO;
881 error = arc_msgbuf(sc, &request, sizeof(request),
882 sysinfo, sizeof(struct arc_fw_sysinfo));
883
884 if (error != 0)
885 goto out;
886
887 ba->ba_status = sysinfo->alarm;
888
889 out:
890 kmem_free(sysinfo, sizeof(*sysinfo));
891 return error;
892 }
893
894 static int
895 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
896 {
897 /* to create a raid set */
898 struct req_craidset {
899 uint8_t cmdcode;
900 uint32_t devmask;
901 uint8_t raidset_name[16];
902 } __packed;
903
904 /* to create a volume set */
905 struct req_cvolset {
906 uint8_t cmdcode;
907 uint8_t raidset;
908 uint8_t volset_name[16];
909 uint64_t capacity;
910 uint8_t raidlevel;
911 uint8_t stripe;
912 uint8_t scsi_chan;
913 uint8_t scsi_target;
914 uint8_t scsi_lun;
915 uint8_t tagqueue;
916 uint8_t cache;
917 uint8_t speed;
918 uint8_t quick_init;
919 } __packed;
920
921 struct scsibus_softc *scsibus_sc = NULL;
922 struct req_craidset req_craidset;
923 struct req_cvolset req_cvolset;
924 uint8_t request[2];
925 uint8_t reply[1];
926 int error = 0;
927
928 switch (bc->bc_opcode) {
929 case BIOC_VCREATE_VOLUME:
930 {
931 /*
932 * Zero out the structs so that we use some defaults
933 * in raid and volume sets.
934 */
935 memset(&req_craidset, 0, sizeof(req_craidset));
936 memset(&req_cvolset, 0, sizeof(req_cvolset));
937
938 /*
939 * Firstly we have to create the raid set and
940 * use the default name for all them.
941 */
942 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
943 req_craidset.devmask = bc->bc_devmask;
944 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
945 reply, sizeof(reply));
946 if (error != 0)
947 return error;
948
949 error = arc_fw_parse_status_code(sc, &reply[0]);
950 if (error) {
951 printf("%s: create raidset%d failed\n",
952 device_xname(sc->sc_dev), bc->bc_volid);
953 return error;
954 }
955
956 /*
957 * At this point the raid set was created, so it's
958 * time to create the volume set.
959 */
960 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
961 req_cvolset.raidset = bc->bc_volid;
962 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
963
964 /*
965 * Set the RAID level.
966 */
967 switch (bc->bc_level) {
968 case 0:
969 case 1:
970 req_cvolset.raidlevel = bc->bc_level;
971 break;
972 case BIOC_SVOL_RAID10:
973 req_cvolset.raidlevel = 1;
974 break;
975 case 3:
976 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
977 break;
978 case 5:
979 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
980 break;
981 case 6:
982 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
983 break;
984 default:
985 return EOPNOTSUPP;
986 }
987
988 /*
989 * Set the stripe size.
990 */
991 switch (bc->bc_stripe) {
992 case 4:
993 req_cvolset.stripe = 0;
994 break;
995 case 8:
996 req_cvolset.stripe = 1;
997 break;
998 case 16:
999 req_cvolset.stripe = 2;
1000 break;
1001 case 32:
1002 req_cvolset.stripe = 3;
1003 break;
1004 case 64:
1005 req_cvolset.stripe = 4;
1006 break;
1007 case 128:
1008 req_cvolset.stripe = 5;
1009 break;
1010 default:
1011 req_cvolset.stripe = 4; /* by default 64K */
1012 break;
1013 }
1014
1015 req_cvolset.scsi_chan = bc->bc_channel;
1016 req_cvolset.scsi_target = bc->bc_target;
1017 req_cvolset.scsi_lun = bc->bc_lun;
1018 req_cvolset.tagqueue = 1; /* always enabled */
1019 req_cvolset.cache = 1; /* always enabled */
1020 req_cvolset.speed = 4; /* always max speed */
1021
1022 /* RAID 1 and 1+0 levels need foreground initialization */
1023 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
1024 req_cvolset.quick_init = 1; /* foreground init */
1025
1026 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
1027 reply, sizeof(reply));
1028 if (error != 0)
1029 return error;
1030
1031 error = arc_fw_parse_status_code(sc, &reply[0]);
1032 if (error) {
1033 printf("%s: create volumeset%d failed\n",
1034 device_xname(sc->sc_dev), bc->bc_volid);
1035 return error;
1036 }
1037
1038 /*
1039 * If we are creating a RAID 1 or RAID 1+0 volume,
1040 * the volume will be created immediately but it won't
1041 * be available until the initialization is done... so
1042 * don't bother attaching the sd(4) device.
1043 */
1044 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
1045 break;
1046
1047 /*
1048 * Do a rescan on the bus to attach the device associated
1049 * with the new volume.
1050 */
1051 scsibus_sc = device_private(sc->sc_scsibus_dv);
1052 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
1053
1054 break;
1055 }
1056 case BIOC_VREMOVE_VOLUME:
1057 {
1058 /*
1059 * Remove the volume set specified in bc_volid.
1060 */
1061 request[0] = ARC_FW_DELETE_VOLUME;
1062 request[1] = bc->bc_volid;
1063 error = arc_msgbuf(sc, request, sizeof(request),
1064 reply, sizeof(reply));
1065 if (error != 0)
1066 return error;
1067
1068 error = arc_fw_parse_status_code(sc, &reply[0]);
1069 if (error) {
1070 printf("%s: delete volumeset%d failed\n",
1071 device_xname(sc->sc_dev), bc->bc_volid);
1072 return error;
1073 }
1074
1075 /*
1076 * Detach the sd(4) device associated with the volume,
1077 * but if there's an error don't make it a priority.
1078 */
1079 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1080 bc->bc_lun, 0);
1081 if (error)
1082 printf("%s: couldn't detach sd device for volume %d "
1083 "at %u:%u.%u (error=%d)\n",
1084 device_xname(sc->sc_dev), bc->bc_volid,
1085 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1086
1087 /*
1088 * and remove the raid set specified in bc_volid,
1089 * we only care about volumes.
1090 */
1091 request[0] = ARC_FW_DELETE_RAIDSET;
1092 request[1] = bc->bc_volid;
1093 error = arc_msgbuf(sc, request, sizeof(request),
1094 reply, sizeof(reply));
1095 if (error != 0)
1096 return error;
1097
1098 error = arc_fw_parse_status_code(sc, &reply[0]);
1099 if (error) {
1100 printf("%s: delete raidset%d failed\n",
1101 device_xname(sc->sc_dev), bc->bc_volid);
1102 return error;
1103 }
1104
1105 break;
1106 }
1107 default:
1108 return EOPNOTSUPP;
1109 }
1110
1111 return error;
1112 }
1113
1114 static int
1115 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1116 {
1117 /* for a hotspare disk */
1118 struct request_hs {
1119 uint8_t cmdcode;
1120 uint32_t devmask;
1121 } __packed;
1122
1123 /* for a pass-through disk */
1124 struct request_pt {
1125 uint8_t cmdcode;
1126 uint8_t devid;
1127 uint8_t scsi_chan;
1128 uint8_t scsi_id;
1129 uint8_t scsi_lun;
1130 uint8_t tagged_queue;
1131 uint8_t cache_mode;
1132 uint8_t max_speed;
1133 } __packed;
1134
1135 struct scsibus_softc *scsibus_sc = NULL;
1136 struct request_hs req_hs; /* to add/remove hotspare */
1137 struct request_pt req_pt; /* to add a pass-through */
1138 uint8_t req_gen[2];
1139 uint8_t reply[1];
1140 int error = 0;
1141
1142 switch (bs->bs_status) {
1143 case BIOC_SSHOTSPARE:
1144 {
1145 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1146 req_hs.devmask = (1 << bs->bs_target);
1147 goto hotspare;
1148 }
1149 case BIOC_SSDELHOTSPARE:
1150 {
1151 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1152 req_hs.devmask = (1 << bs->bs_target);
1153 goto hotspare;
1154 }
1155 case BIOC_SSPASSTHRU:
1156 {
1157 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1158 req_pt.devid = bs->bs_other_id; /* this wants device# */
1159 req_pt.scsi_chan = bs->bs_channel;
1160 req_pt.scsi_id = bs->bs_target;
1161 req_pt.scsi_lun = bs->bs_lun;
1162 req_pt.tagged_queue = 1; /* always enabled */
1163 req_pt.cache_mode = 1; /* always enabled */
1164 req_pt.max_speed = 4; /* always max speed */
1165
1166 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1167 reply, sizeof(reply));
1168 if (error != 0)
1169 return error;
1170
1171 /*
1172 * Do a rescan on the bus to attach the new device
1173 * associated with the pass-through disk.
1174 */
1175 scsibus_sc = device_private(sc->sc_scsibus_dv);
1176 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1177
1178 goto out;
1179 }
1180 case BIOC_SSDELPASSTHRU:
1181 {
1182 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1183 req_gen[1] = bs->bs_target;
1184 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1185 reply, sizeof(reply));
1186 if (error != 0)
1187 return error;
1188
1189 /*
1190 * Detach the sd device associated with this pass-through disk.
1191 */
1192 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1193 bs->bs_lun, 0);
1194 if (error)
1195 printf("%s: couldn't detach sd device for the "
1196 "pass-through disk at %u:%u.%u (error=%d)\n",
1197 device_xname(sc->sc_dev),
1198 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1199
1200 goto out;
1201 }
1202 case BIOC_SSCHECKSTART_VOL:
1203 {
1204 req_gen[0] = ARC_FW_START_CHECKVOL;
1205 req_gen[1] = bs->bs_volid;
1206 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1207 reply, sizeof(reply));
1208 if (error != 0)
1209 return error;
1210
1211 goto out;
1212 }
1213 case BIOC_SSCHECKSTOP_VOL:
1214 {
1215 uint8_t req = ARC_FW_STOP_CHECKVOL;
1216 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1217 if (error != 0)
1218 return error;
1219
1220 goto out;
1221 }
1222 default:
1223 return EOPNOTSUPP;
1224 }
1225
1226 hotspare:
1227 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1228 reply, sizeof(reply));
1229 if (error != 0)
1230 return error;
1231
1232 out:
1233 return arc_fw_parse_status_code(sc, &reply[0]);
1234 }
1235
1236 static int
1237 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1238 {
1239 uint8_t request[2];
1240 struct arc_fw_sysinfo *sysinfo = NULL;
1241 struct arc_fw_raidinfo *raidinfo;
1242 int nvols = 0, i;
1243 int error = 0;
1244
1245 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1246
1247 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1248 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1249
1250 request[0] = ARC_FW_SYSINFO;
1251 error = arc_msgbuf(sc, request, 1, sysinfo,
1252 sizeof(struct arc_fw_sysinfo));
1253 if (error != 0)
1254 goto out;
1255
1256 sc->sc_maxraidset = sysinfo->max_raid_set;
1257 sc->sc_maxvolset = sysinfo->max_volume_set;
1258 sc->sc_cchans = sysinfo->ide_channels;
1259 }
1260
1261 request[0] = ARC_FW_RAIDINFO;
1262 for (i = 0; i < sc->sc_maxraidset; i++) {
1263 request[1] = i;
1264 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1265 sizeof(struct arc_fw_raidinfo));
1266 if (error != 0)
1267 goto out;
1268
1269 nvols += raidinfo->volumes;
1270 }
1271
1272 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1273 bi->bi_novol = nvols;
1274 bi->bi_nodisk = sc->sc_cchans;
1275
1276 out:
1277 if (sysinfo)
1278 kmem_free(sysinfo, sizeof(*sysinfo));
1279 kmem_free(raidinfo, sizeof(*raidinfo));
1280 return error;
1281 }
1282
1283 static int
1284 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1285 {
1286 uint8_t request[2];
1287 int error = 0;
1288 int nvols = 0, i;
1289
1290 request[0] = ARC_FW_VOLINFO;
1291 for (i = 0; i < sc->sc_maxvolset; i++) {
1292 request[1] = i;
1293 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1294 sizeof(struct arc_fw_volinfo));
1295 if (error != 0)
1296 goto out;
1297
1298 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1299 continue;
1300
1301 if (nvols == vol)
1302 break;
1303
1304 nvols++;
1305 }
1306
1307 if (nvols != vol ||
1308 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1309 error = ENODEV;
1310 goto out;
1311 }
1312
1313 out:
1314 return error;
1315 }
1316
1317 static int
1318 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1319 {
1320 struct arc_fw_volinfo *volinfo;
1321 uint64_t blocks;
1322 uint32_t status;
1323 int error = 0;
1324
1325 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1326
1327 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1328 if (error != 0)
1329 goto out;
1330
1331 bv->bv_percent = -1;
1332 bv->bv_seconds = 0;
1333
1334 status = htole32(volinfo->volume_status);
1335 if (status == 0x0) {
1336 if (htole32(volinfo->fail_mask) == 0x0)
1337 bv->bv_status = BIOC_SVONLINE;
1338 else
1339 bv->bv_status = BIOC_SVDEGRADED;
1340 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1341 bv->bv_status = BIOC_SVDEGRADED;
1342 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1343 bv->bv_status = BIOC_SVOFFLINE;
1344 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1345 bv->bv_status = BIOC_SVBUILDING;
1346 bv->bv_percent = htole32(volinfo->progress);
1347 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1348 bv->bv_status = BIOC_SVREBUILD;
1349 bv->bv_percent = htole32(volinfo->progress);
1350 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1351 bv->bv_status = BIOC_SVMIGRATING;
1352 bv->bv_percent = htole32(volinfo->progress);
1353 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1354 bv->bv_status = BIOC_SVCHECKING;
1355 bv->bv_percent = htole32(volinfo->progress);
1356 } else if (status & ARC_FW_VOL_STATUS_NEED_INIT) {
1357 bv->bv_status = BIOC_SVOFFLINE;
1358 } else {
1359 printf("%s: volume %d status 0x%x\n",
1360 device_xname(sc->sc_dev), bv->bv_volid, status);
1361 }
1362
1363 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1364 blocks += (uint64_t)htole32(volinfo->capacity);
1365 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1366
1367 switch (volinfo->raid_level) {
1368 case ARC_FW_VOL_RAIDLEVEL_0:
1369 bv->bv_level = 0;
1370 break;
1371 case ARC_FW_VOL_RAIDLEVEL_1:
1372 if (volinfo->member_disks > 2)
1373 bv->bv_level = BIOC_SVOL_RAID10;
1374 else
1375 bv->bv_level = 1;
1376 break;
1377 case ARC_FW_VOL_RAIDLEVEL_3:
1378 bv->bv_level = 3;
1379 break;
1380 case ARC_FW_VOL_RAIDLEVEL_5:
1381 bv->bv_level = 5;
1382 break;
1383 case ARC_FW_VOL_RAIDLEVEL_6:
1384 bv->bv_level = 6;
1385 break;
1386 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1387 bv->bv_level = BIOC_SVOL_PASSTHRU;
1388 break;
1389 default:
1390 bv->bv_level = -1;
1391 break;
1392 }
1393
1394 bv->bv_nodisk = volinfo->member_disks;
1395 bv->bv_stripe_size = volinfo->stripe_size / 2;
1396 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1397 strnvisx(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1398 sizeof(volinfo->set_name), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1399
1400 out:
1401 kmem_free(volinfo, sizeof(*volinfo));
1402 return error;
1403 }
1404
1405 static int
1406 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1407 {
1408 struct arc_fw_diskinfo *diskinfo;
1409 uint8_t request[2];
1410 int error = 0;
1411
1412 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1413
1414 if (bd->bd_diskid >= sc->sc_cchans) {
1415 error = ENODEV;
1416 goto out;
1417 }
1418
1419 request[0] = ARC_FW_DISKINFO;
1420 request[1] = bd->bd_diskid;
1421 error = arc_msgbuf(sc, request, sizeof(request),
1422 diskinfo, sizeof(struct arc_fw_diskinfo));
1423 if (error != 0)
1424 goto out;
1425
1426 /* skip disks with no capacity */
1427 if (htole32(diskinfo->capacity) == 0 &&
1428 htole32(diskinfo->capacity2) == 0)
1429 goto out;
1430
1431 bd->bd_disknovol = true;
1432 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1433
1434 out:
1435 kmem_free(diskinfo, sizeof(*diskinfo));
1436 return error;
1437 }
1438
1439 static void
1440 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1441 struct arc_fw_diskinfo *diskinfo, int diskid)
1442 {
1443 uint64_t blocks;
1444 char model[81];
1445 char serial[41];
1446 char rev[17];
1447
1448 /* Ignore bit zero for now, we don't know what it means */
1449 diskinfo->device_state &= ~0x1;
1450
1451 switch (diskinfo->device_state) {
1452 case ARC_FW_DISK_FAILED:
1453 bd->bd_status = BIOC_SDFAILED;
1454 break;
1455 case ARC_FW_DISK_PASSTHRU:
1456 bd->bd_status = BIOC_SDPASSTHRU;
1457 break;
1458 case ARC_FW_DISK_NORMAL:
1459 bd->bd_status = BIOC_SDONLINE;
1460 break;
1461 case ARC_FW_DISK_HOTSPARE:
1462 bd->bd_status = BIOC_SDHOTSPARE;
1463 break;
1464 case ARC_FW_DISK_UNUSED:
1465 bd->bd_status = BIOC_SDUNUSED;
1466 break;
1467 case 0:
1468 /* disk has been disconnected */
1469 bd->bd_status = BIOC_SDOFFLINE;
1470 bd->bd_channel = 1;
1471 bd->bd_target = 0;
1472 bd->bd_lun = 0;
1473 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1474 break;
1475 default:
1476 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1477 diskinfo->device_state);
1478 bd->bd_status = BIOC_SDINVALID;
1479 return;
1480 }
1481
1482 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1483 blocks += (uint64_t)htole32(diskinfo->capacity);
1484 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1485
1486 strnvisx(model, sizeof(model), diskinfo->model,
1487 sizeof(diskinfo->model), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1488 strnvisx(serial, sizeof(serial), diskinfo->serial,
1489 sizeof(diskinfo->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1490 strnvisx(rev, sizeof(rev), diskinfo->firmware_rev,
1491 sizeof(diskinfo->firmware_rev), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1492
1493 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1494 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1495
1496 #if 0
1497 bd->bd_channel = diskinfo->scsi_attr.channel;
1498 bd->bd_target = diskinfo->scsi_attr.target;
1499 bd->bd_lun = diskinfo->scsi_attr.lun;
1500 #endif
1501
1502 /*
1503 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1504 * the diskid.
1505 */
1506 bd->bd_channel = 0;
1507 bd->bd_target = diskid;
1508 bd->bd_lun = 0;
1509 }
1510
1511 static int
1512 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1513 {
1514 struct arc_fw_raidinfo *raidinfo;
1515 struct arc_fw_volinfo *volinfo;
1516 struct arc_fw_diskinfo *diskinfo;
1517 uint8_t request[2];
1518 int error = 0;
1519
1520 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1521 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1522 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1523
1524 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1525 if (error != 0)
1526 goto out;
1527
1528 request[0] = ARC_FW_RAIDINFO;
1529 request[1] = volinfo->raid_set_number;
1530
1531 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1532 sizeof(struct arc_fw_raidinfo));
1533 if (error != 0)
1534 goto out;
1535
1536 if (bd->bd_diskid >= sc->sc_cchans ||
1537 bd->bd_diskid >= raidinfo->member_devices) {
1538 error = ENODEV;
1539 goto out;
1540 }
1541
1542 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1543 /*
1544 * The disk has been disconnected, mark it offline
1545 * and put it on another bus.
1546 */
1547 bd->bd_channel = 1;
1548 bd->bd_target = 0;
1549 bd->bd_lun = 0;
1550 bd->bd_status = BIOC_SDOFFLINE;
1551 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1552 goto out;
1553 }
1554
1555 request[0] = ARC_FW_DISKINFO;
1556 request[1] = raidinfo->device_array[bd->bd_diskid];
1557 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1558 sizeof(struct arc_fw_diskinfo));
1559 if (error != 0)
1560 goto out;
1561
1562 /* now fill our bio disk with data from the firmware */
1563 arc_bio_disk_filldata(sc, bd, diskinfo,
1564 raidinfo->device_array[bd->bd_diskid]);
1565
1566 out:
1567 kmem_free(raidinfo, sizeof(*raidinfo));
1568 kmem_free(volinfo, sizeof(*volinfo));
1569 kmem_free(diskinfo, sizeof(*diskinfo));
1570 return error;
1571 }
1572
1573 static uint8_t
1574 arc_msg_cksum(void *cmd, uint16_t len)
1575 {
1576 uint8_t *buf = cmd;
1577 uint8_t cksum;
1578 int i;
1579
1580 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1581 for (i = 0; i < len; i++)
1582 cksum += buf[i];
1583
1584 return cksum;
1585 }
1586
1587
1588 static int
1589 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1590 size_t rbuflen)
1591 {
1592 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1593 uint8_t *wbuf, *rbuf;
1594 int wlen, wdone = 0, rlen, rdone = 0;
1595 struct arc_fw_bufhdr *bufhdr;
1596 uint32_t reg, rwlen;
1597 int error = 0;
1598 #ifdef ARC_DEBUG
1599 int i;
1600 #endif
1601
1602 wbuf = rbuf = NULL;
1603
1604 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1605 device_xname(sc->sc_dev), wbuflen, rbuflen);
1606
1607 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1608 wbuf = kmem_alloc(wlen, KM_SLEEP);
1609
1610 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1611 rbuf = kmem_alloc(rlen, KM_SLEEP);
1612
1613 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1614 device_xname(sc->sc_dev), wlen, rlen);
1615
1616 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1617 bufhdr->hdr = arc_fw_hdr;
1618 bufhdr->len = htole16(wbuflen);
1619 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1620 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1621
1622 arc_lock(sc);
1623 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1624 error = EBUSY;
1625 goto out;
1626 }
1627
1628 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1629
1630 do {
1631 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1632 memset(rwbuf, 0, sizeof(rwbuf));
1633 rwlen = (wlen - wdone) % sizeof(rwbuf);
1634 memcpy(rwbuf, &wbuf[wdone], rwlen);
1635
1636 #ifdef ARC_DEBUG
1637 if (arcdebug & ARC_D_DB) {
1638 printf("%s: write %d:",
1639 device_xname(sc->sc_dev), rwlen);
1640 for (i = 0; i < rwlen; i++)
1641 printf(" 0x%02x", rwbuf[i]);
1642 printf("\n");
1643 }
1644 #endif
1645
1646 /* copy the chunk to the hw */
1647 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1648 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1649 sizeof(rwbuf));
1650
1651 /* say we have a buffer for the hw */
1652 arc_write(sc, ARC_REG_INB_DOORBELL,
1653 ARC_REG_INB_DOORBELL_WRITE_OK);
1654
1655 wdone += rwlen;
1656 }
1657
1658 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1659 arc_wait(sc);
1660
1661 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1662
1663 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1664 device_xname(sc->sc_dev), reg);
1665
1666 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1667 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1668 if (rwlen > sizeof(rwbuf)) {
1669 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1670 device_xname(sc->sc_dev));
1671 error = EIO;
1672 goto out;
1673 }
1674
1675 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1676 sizeof(rwbuf));
1677
1678 arc_write(sc, ARC_REG_INB_DOORBELL,
1679 ARC_REG_INB_DOORBELL_READ_OK);
1680
1681 #ifdef ARC_DEBUG
1682 printf("%s: len: %d+%d=%d/%d\n",
1683 device_xname(sc->sc_dev),
1684 rwlen, rdone, rwlen + rdone, rlen);
1685 if (arcdebug & ARC_D_DB) {
1686 printf("%s: read:",
1687 device_xname(sc->sc_dev));
1688 for (i = 0; i < rwlen; i++)
1689 printf(" 0x%02x", rwbuf[i]);
1690 printf("\n");
1691 }
1692 #endif
1693
1694 if ((rdone + rwlen) > rlen) {
1695 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1696 device_xname(sc->sc_dev));
1697 error = EIO;
1698 goto out;
1699 }
1700
1701 memcpy(&rbuf[rdone], rwbuf, rwlen);
1702 rdone += rwlen;
1703 }
1704 } while (rdone != rlen);
1705
1706 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1707 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1708 bufhdr->len != htole16(rbuflen)) {
1709 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1710 device_xname(sc->sc_dev));
1711 error = EIO;
1712 goto out;
1713 }
1714
1715 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1716
1717 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1718 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1719 device_xname(sc->sc_dev));
1720 error = EIO;
1721 goto out;
1722 }
1723
1724 out:
1725 arc_unlock(sc);
1726 kmem_free(wbuf, wlen);
1727 kmem_free(rbuf, rlen);
1728
1729 return error;
1730 }
1731
1732 static void
1733 arc_lock(struct arc_softc *sc)
1734 {
1735 rw_enter(&sc->sc_rwlock, RW_WRITER);
1736 mutex_spin_enter(&sc->sc_mutex);
1737 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1738 sc->sc_talking = 1;
1739 }
1740
1741 static void
1742 arc_unlock(struct arc_softc *sc)
1743 {
1744 KASSERT(mutex_owned(&sc->sc_mutex));
1745
1746 arc_write(sc, ARC_REG_INTRMASK,
1747 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1748 sc->sc_talking = 0;
1749 mutex_spin_exit(&sc->sc_mutex);
1750 rw_exit(&sc->sc_rwlock);
1751 }
1752
1753 static void
1754 arc_wait(struct arc_softc *sc)
1755 {
1756 KASSERT(mutex_owned(&sc->sc_mutex));
1757
1758 arc_write(sc, ARC_REG_INTRMASK,
1759 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1760 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1761 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1762 }
1763
1764
1765 static void
1766 arc_create_sensors(void *arg)
1767 {
1768 struct arc_softc *sc = arg;
1769 struct bioc_inq bi;
1770 struct bioc_vol bv;
1771 int i, j;
1772 size_t slen, count = 0;
1773
1774 memset(&bi, 0, sizeof(bi));
1775 if (arc_bio_inq(sc, &bi) != 0) {
1776 aprint_error("%s: unable to query firmware for sensor info\n",
1777 device_xname(sc->sc_dev));
1778 kthread_exit(0);
1779 }
1780
1781 /* There's no point to continue if there are no volumes */
1782 if (!bi.bi_novol)
1783 kthread_exit(0);
1784
1785 for (i = 0; i < bi.bi_novol; i++) {
1786 memset(&bv, 0, sizeof(bv));
1787 bv.bv_volid = i;
1788 if (arc_bio_vol(sc, &bv) != 0)
1789 kthread_exit(0);
1790
1791 /* Skip passthrough volumes */
1792 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1793 continue;
1794
1795 /* new volume found */
1796 sc->sc_nsensors++;
1797 /* new disk in a volume found */
1798 sc->sc_nsensors+= bv.bv_nodisk;
1799 }
1800
1801 /* No valid volumes */
1802 if (!sc->sc_nsensors)
1803 kthread_exit(0);
1804
1805 sc->sc_sme = sysmon_envsys_create();
1806 slen = sizeof(arc_edata_t) * sc->sc_nsensors;
1807 sc->sc_arc_sensors = kmem_zalloc(slen, KM_SLEEP);
1808
1809 /* Attach sensors for volumes and disks */
1810 for (i = 0; i < bi.bi_novol; i++) {
1811 memset(&bv, 0, sizeof(bv));
1812 bv.bv_volid = i;
1813 if (arc_bio_vol(sc, &bv) != 0)
1814 goto bad;
1815
1816 sc->sc_arc_sensors[count].arc_sensor.units = ENVSYS_DRIVE;
1817 sc->sc_arc_sensors[count].arc_sensor.state = ENVSYS_SINVALID;
1818 sc->sc_arc_sensors[count].arc_sensor.value_cur =
1819 ENVSYS_DRIVE_EMPTY;
1820 sc->sc_arc_sensors[count].arc_sensor.flags =
1821 ENVSYS_FMONSTCHANGED;
1822
1823 /* Skip passthrough volumes */
1824 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1825 continue;
1826
1827 if (bv.bv_level == BIOC_SVOL_RAID10)
1828 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1829 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1830 "RAID 1+0 volume%d (%s)", i, bv.bv_dev);
1831 else
1832 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1833 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1834 "RAID %d volume%d (%s)", bv.bv_level, i,
1835 bv.bv_dev);
1836
1837 sc->sc_arc_sensors[count].arc_volid = i;
1838
1839 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1840 &sc->sc_arc_sensors[count].arc_sensor))
1841 goto bad;
1842
1843 count++;
1844
1845 /* Attach disk sensors for this volume */
1846 for (j = 0; j < bv.bv_nodisk; j++) {
1847 sc->sc_arc_sensors[count].arc_sensor.state =
1848 ENVSYS_SINVALID;
1849 sc->sc_arc_sensors[count].arc_sensor.units =
1850 ENVSYS_DRIVE;
1851 sc->sc_arc_sensors[count].arc_sensor.value_cur =
1852 ENVSYS_DRIVE_EMPTY;
1853 sc->sc_arc_sensors[count].arc_sensor.flags =
1854 ENVSYS_FMONSTCHANGED;
1855
1856 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1857 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1858 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1859 sc->sc_arc_sensors[count].arc_volid = i;
1860 sc->sc_arc_sensors[count].arc_diskid = j + 10;
1861
1862 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1863 &sc->sc_arc_sensors[count].arc_sensor))
1864 goto bad;
1865
1866 count++;
1867 }
1868 }
1869
1870 /*
1871 * Register our envsys driver with the framework now that the
1872 * sensors were all attached.
1873 */
1874 sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1875 sc->sc_sme->sme_cookie = sc;
1876 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1877
1878 if (sysmon_envsys_register(sc->sc_sme)) {
1879 aprint_debug("%s: unable to register with sysmon\n",
1880 device_xname(sc->sc_dev));
1881 goto bad;
1882 }
1883 kthread_exit(0);
1884
1885 bad:
1886 sysmon_envsys_destroy(sc->sc_sme);
1887 kmem_free(sc->sc_arc_sensors, slen);
1888
1889 sc->sc_sme = NULL;
1890 sc->sc_arc_sensors = NULL;
1891
1892 kthread_exit(0);
1893 }
1894
1895 static void
1896 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1897 {
1898 struct arc_softc *sc = sme->sme_cookie;
1899 struct bioc_vol bv;
1900 struct bioc_disk bd;
1901 arc_edata_t *arcdata = (arc_edata_t *)edata;
1902
1903 /* sanity check */
1904 if (edata->units != ENVSYS_DRIVE)
1905 return;
1906
1907 memset(&bv, 0, sizeof(bv));
1908 bv.bv_volid = arcdata->arc_volid;
1909
1910 if (arc_bio_vol(sc, &bv)) {
1911 bv.bv_status = BIOC_SVINVALID;
1912 bio_vol_to_envsys(edata, &bv);
1913 return;
1914 }
1915
1916 if (arcdata->arc_diskid) {
1917 /* Current sensor is handling a disk volume member */
1918 memset(&bd, 0, sizeof(bd));
1919 bd.bd_volid = arcdata->arc_volid;
1920 bd.bd_diskid = arcdata->arc_diskid - 10;
1921
1922 if (arc_bio_disk_volume(sc, &bd))
1923 bd.bd_status = BIOC_SDOFFLINE;
1924 bio_disk_to_envsys(edata, &bd);
1925 } else {
1926 /* Current sensor is handling a volume */
1927 bio_vol_to_envsys(edata, &bv);
1928 }
1929 }
1930 #endif /* NBIO > 0 */
1931
1932 static uint32_t
1933 arc_read(struct arc_softc *sc, bus_size_t r)
1934 {
1935 uint32_t v;
1936
1937 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1938 BUS_SPACE_BARRIER_READ);
1939 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1940
1941 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1942 device_xname(sc->sc_dev), r, v);
1943
1944 return v;
1945 }
1946
1947 static void
1948 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1949 {
1950 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1951 BUS_SPACE_BARRIER_READ);
1952 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1953 (uint32_t *)buf, len >> 2);
1954 }
1955
1956 static void
1957 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1958 {
1959 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1960 device_xname(sc->sc_dev), r, v);
1961
1962 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1963 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1964 BUS_SPACE_BARRIER_WRITE);
1965 }
1966
1967 #if NBIO > 0
1968 static void
1969 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1970 {
1971 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1972 (const uint32_t *)buf, len >> 2);
1973 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1974 BUS_SPACE_BARRIER_WRITE);
1975 }
1976 #endif /* NBIO > 0 */
1977
1978 static int
1979 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1980 uint32_t target)
1981 {
1982 int i;
1983
1984 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1985 device_xname(sc->sc_dev), r, mask, target);
1986
1987 for (i = 0; i < 10000; i++) {
1988 if ((arc_read(sc, r) & mask) == target)
1989 return 0;
1990 delay(1000);
1991 }
1992
1993 return 1;
1994 }
1995
1996 #if unused
1997 static int
1998 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1999 uint32_t target)
2000 {
2001 int i;
2002
2003 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
2004 device_xname(sc->sc_dev), r, mask, target);
2005
2006 for (i = 0; i < 10000; i++) {
2007 if ((arc_read(sc, r) & mask) != target)
2008 return 0;
2009 delay(1000);
2010 }
2011
2012 return 1;
2013 }
2014 #endif
2015
2016 static int
2017 arc_msg0(struct arc_softc *sc, uint32_t m)
2018 {
2019 /* post message */
2020 arc_write(sc, ARC_REG_INB_MSG0, m);
2021 /* wait for the fw to do it */
2022 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
2023 ARC_REG_INTRSTAT_MSG0) != 0)
2024 return 1;
2025
2026 /* ack it */
2027 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
2028
2029 return 0;
2030 }
2031
2032 static struct arc_dmamem *
2033 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
2034 {
2035 struct arc_dmamem *adm;
2036 int nsegs;
2037
2038 adm = kmem_zalloc(sizeof(*adm), KM_SLEEP);
2039 adm->adm_size = size;
2040
2041 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2042 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2043 goto admfree;
2044
2045 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
2046 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2047 goto destroy;
2048
2049 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
2050 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
2051 goto free;
2052
2053 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
2054 NULL, BUS_DMA_NOWAIT) != 0)
2055 goto unmap;
2056
2057 memset(adm->adm_kva, 0, size);
2058
2059 return adm;
2060
2061 unmap:
2062 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2063 free:
2064 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2065 destroy:
2066 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2067 admfree:
2068 kmem_free(adm, sizeof(*adm));
2069
2070 return NULL;
2071 }
2072
2073 static void
2074 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2075 {
2076 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2077 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2078 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2079 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2080 kmem_free(adm, sizeof(*adm));
2081 }
2082
2083 static int
2084 arc_alloc_ccbs(device_t self)
2085 {
2086 struct arc_softc *sc = device_private(self);
2087 struct arc_ccb *ccb;
2088 uint8_t *cmd;
2089 int i;
2090 size_t ccbslen;
2091
2092 TAILQ_INIT(&sc->sc_ccb_free);
2093
2094 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2095 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2096
2097 sc->sc_requests = arc_dmamem_alloc(sc,
2098 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2099 if (sc->sc_requests == NULL) {
2100 aprint_error_dev(self, "unable to allocate ccb dmamem\n");
2101 goto free_ccbs;
2102 }
2103 cmd = ARC_DMA_KVA(sc->sc_requests);
2104
2105 for (i = 0; i < sc->sc_req_count; i++) {
2106 ccb = &sc->sc_ccbs[i];
2107
2108 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2109 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2110 aprint_error_dev(self,
2111 "unable to create dmamap for ccb %d\n", i);
2112 goto free_maps;
2113 }
2114
2115 ccb->ccb_sc = sc;
2116 ccb->ccb_id = i;
2117 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2118
2119 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2120 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2121 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2122
2123 arc_put_ccb(sc, ccb);
2124 }
2125
2126 return 0;
2127
2128 free_maps:
2129 while ((ccb = arc_get_ccb(sc)) != NULL)
2130 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2131 arc_dmamem_free(sc, sc->sc_requests);
2132
2133 free_ccbs:
2134 kmem_free(sc->sc_ccbs, ccbslen);
2135
2136 return 1;
2137 }
2138
2139 static struct arc_ccb *
2140 arc_get_ccb(struct arc_softc *sc)
2141 {
2142 struct arc_ccb *ccb;
2143
2144 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2145 if (ccb != NULL)
2146 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2147
2148 return ccb;
2149 }
2150
2151 static void
2152 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2153 {
2154 ccb->ccb_xs = NULL;
2155 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2156 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2157 }
2158