arcmsr.c revision 1.1 1 /* $NetBSD: arcmsr.c,v 1.1 2007/12/05 00:18:07 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.1 2007/12/05 00:18:07 xtraeme Exp $");
24
25 #include <sys/param.h>
26 #include <sys/buf.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/callout.h>
31 #include <sys/kthread.h>
32 #include <sys/rwlock.h>
33
34 #if NBIO > 0
35 #include <sys/ioctl.h>
36 #include <dev/biovar.h>
37 #endif
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48
49 #include <sys/bus.h>
50
51 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
52
53 #include <dev/pci/arcmsrvar.h>
54
55 #define ARC_REFRESH_TIMO (60 * hz)
56
57 /* #define ARC_DEBUG */
58 #ifdef ARC_DEBUG
59 #define ARC_D_INIT (1<<0)
60 #define ARC_D_RW (1<<1)
61 #define ARC_D_DB (1<<2)
62
63 int arcdebug = 0;
64
65 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
66 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
67
68 #else
69 #define DPRINTF(p...) /* p */
70 #define DNPRINTF(n, p...) /* n, p */
71 #endif
72
73 /*
74 * the fw header must always equal this.
75 */
76 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
77
78 /*
79 * autoconf(9) glue.
80 */
81 static int arc_match(device_t, struct cfdata *, void *);
82 static void arc_attach(device_t, device_t, void *);
83 static int arc_detach(device_t, int);
84 static void arc_shutdown(void *);
85 static int arc_intr(void *);
86 static void arc_minphys(struct buf *);
87
88 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
89 arc_match, arc_attach, arc_detach, NULL);
90
91 /*
92 * bio(4) and sysmon_envsys(9) glue.
93 */
94 #if NBIO > 0
95 static int arc_bioctl(struct device *, u_long, void *);
96 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
97 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
98 static int arc_bio_disk(struct arc_softc *, struct bioc_disk *);
99 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
100 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
101 static int arc_bio_getvol(struct arc_softc *, int,
102 struct arc_fw_volinfo *);
103 static void arc_create_sensors(void *);
104 static void arc_refresh_sensors(void *);
105 #endif
106
107 static int
108 arc_match(device_t parent, struct cfdata *match, void *aux)
109 {
110 struct pci_attach_args *pa = aux;
111
112 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
113 switch (PCI_PRODUCT(pa->pa_id)) {
114 case PCI_PRODUCT_ARECA_ARC1110:
115 case PCI_PRODUCT_ARECA_ARC1120:
116 case PCI_PRODUCT_ARECA_ARC1130:
117 case PCI_PRODUCT_ARECA_ARC1160:
118 case PCI_PRODUCT_ARECA_ARC1170:
119 case PCI_PRODUCT_ARECA_ARC1200:
120 case PCI_PRODUCT_ARECA_ARC1202:
121 case PCI_PRODUCT_ARECA_ARC1210:
122 case PCI_PRODUCT_ARECA_ARC1220:
123 case PCI_PRODUCT_ARECA_ARC1230:
124 case PCI_PRODUCT_ARECA_ARC1260:
125 case PCI_PRODUCT_ARECA_ARC1270:
126 case PCI_PRODUCT_ARECA_ARC1280:
127 case PCI_PRODUCT_ARECA_ARC1380:
128 case PCI_PRODUCT_ARECA_ARC1381:
129 case PCI_PRODUCT_ARECA_ARC1680:
130 case PCI_PRODUCT_ARECA_ARC1681:
131 return 1;
132 default:
133 break;
134 }
135 }
136
137 return 0;
138 }
139
140 static void
141 arc_attach(device_t parent, device_t self, void *aux)
142 {
143 struct arc_softc *sc = device_private(self);
144 struct pci_attach_args *pa = aux;
145 struct scsipi_adapter *adapt = &sc->sc_adapter;
146 struct scsipi_channel *chan = &sc->sc_chan;
147
148 sc->sc_talking = 0;
149 rw_init(&sc->sc_rwlock);
150
151 if (arc_map_pci_resources(sc, pa) != 0) {
152 /* error message printed by arc_map_pci_resources */
153 return;
154 }
155
156 if (arc_query_firmware(sc) != 0) {
157 /* error message printed by arc_query_firmware */
158 goto unmap_pci;
159 }
160
161 if (arc_alloc_ccbs(sc) != 0) {
162 /* error message printed by arc_alloc_ccbs */
163 goto unmap_pci;
164 }
165
166 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
167 if (sc->sc_shutdownhook == NULL)
168 panic("unable to establish arc powerhook");
169
170 memset(adapt, 0, sizeof(*adapt));
171 adapt->adapt_dev = self;
172 adapt->adapt_nchannels = 1;
173 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
174 adapt->adapt_max_periph = adapt->adapt_openings;
175 adapt->adapt_minphys = arc_minphys;
176 adapt->adapt_request = arc_scsi_cmd;
177
178 memset(chan, 0, sizeof(*chan));
179 chan->chan_adapter = adapt;
180 chan->chan_bustype = &scsi_bustype;
181 chan->chan_nluns = ARC_MAX_LUN;
182 chan->chan_ntargets = ARC_MAX_TARGET;
183 chan->chan_id = ARC_MAX_TARGET;
184 chan->chan_channel = 0;
185 chan->chan_flags = 0;
186
187 (void)config_found(self, &sc->sc_chan, scsiprint);
188
189 /* enable interrupts */
190 arc_write(sc, ARC_REG_INTRMASK,
191 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
192
193 #if NBIO > 0
194 if (bio_register(self, arc_bioctl) != 0)
195 panic("%s: bioctl registration failed\n", device_xname(self));
196 /*
197 * you need to talk to the firmware to get volume info. our firmware
198 * interface relies on being able to sleep, so we need to use a thread
199 * to do the work.
200 */
201 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
202 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
203 panic("%s: unable to create a kernel thread for sensors\n",
204 device_xname(self));
205 #endif
206
207 return;
208
209 unmap_pci:
210 arc_unmap_pci_resources(sc);
211 }
212
213 static int
214 arc_detach(device_t self, int flags)
215 {
216 struct arc_softc *sc = device_private(self);
217
218 shutdownhook_disestablish(sc->sc_shutdownhook);
219
220 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
221 aprint_error("%s: timeout waiting to stop bg rebuild\n",
222 device_xname(&sc->sc_dev));
223
224 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
225 aprint_error("%s: timeout waiting to flush cache\n",
226 device_xname(&sc->sc_dev));
227
228 return 0;
229 }
230
231 static void
232 arc_shutdown(void *xsc)
233 {
234 struct arc_softc *sc = xsc;
235
236 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
237 aprint_error("%s: timeout waiting to stop bg rebuild\n",
238 device_xname(&sc->sc_dev));
239
240 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
241 aprint_error("%s: timeout waiting to flush cache\n",
242 device_xname(&sc->sc_dev));
243 }
244
245 static void
246 arc_minphys(struct buf *bp)
247 {
248 if (bp->b_bcount > MAXPHYS)
249 bp->b_bcount = MAXPHYS;
250 minphys(bp);
251 }
252
253 static int
254 arc_intr(void *arg)
255 {
256 struct arc_softc *sc = arg;
257 struct arc_ccb *ccb = NULL;
258 char *kva = ARC_DMA_KVA(sc->sc_requests);
259 struct arc_io_cmd *cmd;
260 uint32_t reg, intrstat;
261
262 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
263 if (intrstat == 0x0)
264 return 0;
265
266 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
267 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
268
269 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
270 if (sc->sc_talking) {
271 /* if an ioctl is talking, wake it up */
272 arc_write(sc, ARC_REG_INTRMASK,
273 ~ARC_REG_INTRMASK_POSTQUEUE);
274 wakeup(sc);
275 } else {
276 /* otherwise drop it */
277 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
278 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
279 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
280 arc_write(sc, ARC_REG_INB_DOORBELL,
281 ARC_REG_INB_DOORBELL_READ_OK);
282 }
283 }
284
285 while ((reg = arc_pop(sc)) != 0xffffffff) {
286 cmd = (struct arc_io_cmd *)(kva +
287 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
288 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
289 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
290
291 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
292 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
293 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
294
295 arc_scsi_cmd_done(sc, ccb, reg);
296 }
297
298 return 1;
299 }
300
301 void
302 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
303 {
304 struct scsipi_periph *periph;
305 struct scsipi_xfer *xs;
306 struct scsipi_adapter *adapt = chan->chan_adapter;
307 struct arc_softc *sc = device_private(adapt->adapt_dev);
308 struct arc_ccb *ccb;
309 struct arc_msg_scsicmd *cmd;
310 uint32_t reg;
311 uint8_t target;
312 int s;
313
314 switch (req) {
315 case ADAPTER_REQ_GROW_RESOURCES:
316 /* Not supported. */
317 return;
318 case ADAPTER_REQ_SET_XFER_MODE:
319 /* Not supported. */
320 return;
321 case ADAPTER_REQ_RUN_XFER:
322 break;
323 }
324
325 xs = arg;
326 periph = xs->xs_periph;
327 target = periph->periph_target;
328
329 if (xs->cmdlen > ARC_MSG_CDBLEN) {
330 memset(&xs->sense, 0, sizeof(xs->sense));
331 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
332 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
333 xs->sense.scsi_sense.asc = 0x20;
334 xs->error = XS_SENSE;
335 xs->status = SCSI_CHECK;
336 s = splbio();
337 scsipi_done(xs);
338 goto out;
339 }
340
341 s = splbio();
342 ccb = arc_get_ccb(sc);
343 if (ccb == NULL) {
344 xs->error = XS_RESOURCE_SHORTAGE;
345 scsipi_done(xs);
346 goto out;
347 }
348 splx(s);
349
350 ccb->ccb_xs = xs;
351
352 if (arc_load_xs(ccb) != 0) {
353 xs->error = XS_DRIVER_STUFFUP;
354 s = splbio();
355 arc_put_ccb(sc, ccb);
356 scsipi_done(xs);
357 goto out;
358 }
359
360 cmd = &ccb->ccb_cmd->cmd;
361 reg = ccb->ccb_cmd_post;
362
363 /* bus is always 0 */
364 cmd->target = target;
365 cmd->lun = periph->periph_lun;
366 cmd->function = 1; /* XXX magic number */
367
368 cmd->cdb_len = xs->cmdlen;
369 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
370 if (xs->xs_control & XS_CTL_DATA_OUT)
371 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
372 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
373 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
374 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
375 }
376
377 cmd->context = htole32(ccb->ccb_id);
378 cmd->data_len = htole32(xs->datalen);
379
380 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
381
382 /* we've built the command, let's put it on the hw */
383 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
384 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
386
387 s = splbio();
388 arc_push(sc, reg);
389 if (xs->xs_control & XS_CTL_POLL) {
390 if (arc_complete(sc, ccb, xs->timeout) != 0) {
391 xs->error = XS_DRIVER_STUFFUP;
392 scsipi_done(xs);
393 }
394 }
395 out:
396 splx(s);
397 }
398
399 int
400 arc_load_xs(struct arc_ccb *ccb)
401 {
402 struct arc_softc *sc = ccb->ccb_sc;
403 struct scsipi_xfer *xs = ccb->ccb_xs;
404 bus_dmamap_t dmap = ccb->ccb_dmamap;
405 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
406 uint64_t addr;
407 int i, error;
408
409 if (xs->datalen == 0)
410 return 0;
411
412 error = bus_dmamap_load(sc->sc_dmat, dmap,
413 xs->data, xs->datalen, NULL,
414 (xs->xs_control & XS_CTL_NOSLEEP) ?
415 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
416 if (error != 0) {
417 aprint_error("%s: error %d loading dmamap\n",
418 device_xname(&sc->sc_dev), error);
419 return 1;
420 }
421
422 for (i = 0; i < dmap->dm_nsegs; i++) {
423 sge = &sgl[i];
424
425 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
426 addr = dmap->dm_segs[i].ds_addr;
427 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
428 sge->sg_lo_addr = htole32((uint32_t)addr);
429 }
430
431 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
432 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
433 BUS_DMASYNC_PREWRITE);
434
435 return 0;
436 }
437
438 void
439 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
440 {
441 struct scsipi_xfer *xs = ccb->ccb_xs;
442 struct arc_msg_scsicmd *cmd;
443
444 if (xs->datalen != 0) {
445 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
446 ccb->ccb_dmamap->dm_mapsize,
447 (xs->xs_control & XS_CTL_DATA_IN) ?
448 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
449 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
450 }
451
452 /* timeout_del */
453 xs->status |= XS_STS_DONE;
454
455 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
456 cmd = &ccb->ccb_cmd->cmd;
457
458 switch (cmd->status) {
459 case ARC_MSG_STATUS_SELTIMEOUT:
460 case ARC_MSG_STATUS_ABORTED:
461 case ARC_MSG_STATUS_INIT_FAIL:
462 xs->status = SCSI_OK;
463 xs->error = XS_SELTIMEOUT;
464 break;
465
466 case SCSI_CHECK:
467 memset(&xs->sense, 0, sizeof(xs->sense));
468 memcpy(&xs->sense, cmd->sense_data,
469 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
470 xs->sense.scsi_sense.response_code =
471 SSD_RCODE_VALID | 0x70;
472 xs->status = SCSI_CHECK;
473 xs->error = XS_SENSE;
474 xs->resid = 0;
475 break;
476
477 default:
478 /* unknown device status */
479 xs->error = XS_BUSY; /* try again later? */
480 xs->status = SCSI_BUSY;
481 break;
482 }
483 } else {
484 xs->status = SCSI_OK;
485 xs->error = XS_NOERROR;
486 xs->resid = 0;
487 }
488
489 arc_put_ccb(sc, ccb);
490 scsipi_done(xs);
491 }
492
493 int
494 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
495 {
496 struct arc_ccb *ccb = NULL;
497 char *kva = ARC_DMA_KVA(sc->sc_requests);
498 struct arc_io_cmd *cmd;
499 uint32_t reg;
500
501 do {
502 reg = arc_pop(sc);
503 if (reg == 0xffffffff) {
504 if (timeout-- == 0)
505 return 1;
506
507 delay(1000);
508 continue;
509 }
510
511 cmd = (struct arc_io_cmd *)(kva +
512 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
513 ARC_DMA_DVA(sc->sc_requests)));
514 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
515
516 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
517 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
519
520 arc_scsi_cmd_done(sc, ccb, reg);
521 } while (nccb != ccb);
522
523 return 0;
524 }
525
526 int
527 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
528 {
529 pcireg_t memtype;
530 pci_intr_handle_t ih;
531
532 sc->sc_pc = pa->pa_pc;
533 sc->sc_tag = pa->pa_tag;
534 sc->sc_dmat = pa->pa_dmat;
535
536 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
537 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
538 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
539 aprint_error(": unable to map system interface register\n");
540 return 1;
541 }
542
543 if (pci_intr_map(pa, &ih) != 0) {
544 aprint_error(": unable to map interrupt\n");
545 goto unmap;
546 }
547
548 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
549 arc_intr, sc);
550 if (sc->sc_ih == NULL) {
551 aprint_error(": unable to map interrupt [2]\n");
552 goto unmap;
553 }
554 aprint_normal(": interrupting at %s\n",
555 pci_intr_string(pa->pa_pc, ih));
556
557 return 0;
558
559 unmap:
560 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
561 sc->sc_ios = 0;
562 return 1;
563 }
564
565 void
566 arc_unmap_pci_resources(struct arc_softc *sc)
567 {
568 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
569 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
570 sc->sc_ios = 0;
571 }
572
573 int
574 arc_query_firmware(struct arc_softc *sc)
575 {
576 struct arc_msg_firmware_info fwinfo;
577 char string[81]; /* sizeof(vendor)*2+1 */
578
579 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
580 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
581 aprint_debug("%s: timeout waiting for firmware ok\n",
582 device_xname(&sc->sc_dev));
583 return 1;
584 }
585
586 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
587 aprint_debug("%s: timeout waiting for get config\n",
588 device_xname(&sc->sc_dev));
589 return 1;
590 }
591
592 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
593
594 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
595 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
596
597 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
598 aprint_error("%s: invalid firmware info from iop\n",
599 device_xname(&sc->sc_dev));
600 return 1;
601 }
602
603 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
604 device_xname(&sc->sc_dev),
605 htole32(fwinfo.request_len));
606 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
607 device_xname(&sc->sc_dev),
608 htole32(fwinfo.queue_len));
609 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
610 device_xname(&sc->sc_dev),
611 htole32(fwinfo.sdram_size));
612 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
613 device_xname(&sc->sc_dev),
614 htole32(fwinfo.sata_ports));
615
616 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
617 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
618 device_xname(&sc->sc_dev), string);
619
620 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
621
622 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
623 device_xname(&sc->sc_dev), string);
624
625 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
626 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
627 device_xname(&sc->sc_dev), string);
628
629 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
630 aprint_error("%s: unexpected request frame size (%d != %d)\n",
631 device_xname(&sc->sc_dev),
632 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
633 return 1;
634 }
635
636 sc->sc_req_count = htole32(fwinfo.queue_len);
637
638 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
639 aprint_debug("%s: timeout waiting to start bg rebuild\n",
640 device_xname(&sc->sc_dev));
641 return 1;
642 }
643
644 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
645 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
646 htole32(fwinfo.sdram_size), string);
647
648 return 0;
649 }
650
651 #if NBIO > 0
652 static int
653 arc_bioctl(struct device *self, u_long cmd, void *addr)
654 {
655 struct arc_softc *sc = device_private(self);
656 int error = 0;
657
658 switch (cmd) {
659 case BIOCINQ:
660 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
661 break;
662
663 case BIOCVOL:
664 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
665 break;
666
667 case BIOCDISK:
668 error = arc_bio_disk(sc, (struct bioc_disk *)addr);
669 break;
670
671 case BIOCALARM:
672 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
673 break;
674
675 default:
676 error = ENOTTY;
677 break;
678 }
679
680 return error;
681 }
682
683 static int
684 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
685 {
686 uint8_t request[2], reply[1];
687 size_t len;
688 int error = 0;
689
690 switch (ba->ba_opcode) {
691 case BIOC_SAENABLE:
692 case BIOC_SADISABLE:
693 request[0] = ARC_FW_SET_ALARM;
694 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
695 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
696 len = sizeof(request);
697
698 break;
699
700 case BIOC_SASILENCE:
701 request[0] = ARC_FW_MUTE_ALARM;
702 len = 1;
703
704 break;
705
706 case BIOC_GASTATUS:
707 /* system info is too big/ugly to deal with here */
708 return arc_bio_alarm_state(sc, ba);
709
710 default:
711 return EOPNOTSUPP;
712 }
713
714 arc_lock(sc);
715 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
716 arc_unlock(sc);
717
718 if (error != 0)
719 return error;
720
721 switch (reply[0]) {
722 case ARC_FW_CMD_OK:
723 return 0;
724 case ARC_FW_CMD_PASS_REQD:
725 return EPERM;
726 default:
727 return EIO;
728 }
729 }
730
731 static int
732 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
733 {
734 uint8_t request = ARC_FW_SYSINFO;
735 struct arc_fw_sysinfo *sysinfo;
736 int error = 0;
737
738 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
739 M_WAITOK|M_ZERO);
740
741 request = ARC_FW_SYSINFO;
742
743 arc_lock(sc);
744 error = arc_msgbuf(sc, &request, sizeof(request),
745 sysinfo, sizeof(struct arc_fw_sysinfo));
746 arc_unlock(sc);
747
748 if (error != 0)
749 goto out;
750
751 ba->ba_status = sysinfo->alarm;
752
753 out:
754 free(sysinfo, M_DEVBUF);
755 return error;
756 }
757
758
759 static int
760 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
761 {
762 uint8_t request[2];
763 struct arc_fw_sysinfo *sysinfo;
764 struct arc_fw_volinfo *volinfo;
765 int maxvols, nvols = 0, i;
766 int error = 0;
767
768 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
769 M_WAITOK|M_ZERO);
770 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
771 M_WAITOK|M_ZERO);
772
773 arc_lock(sc);
774
775 request[0] = ARC_FW_SYSINFO;
776 error = arc_msgbuf(sc, request, 1, sysinfo,
777 sizeof(struct arc_fw_sysinfo));
778 if (error != 0)
779 goto out;
780
781 maxvols = sysinfo->max_volume_set;
782
783 request[0] = ARC_FW_VOLINFO;
784 for (i = 0; i < maxvols; i++) {
785 request[1] = i;
786 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
787 sizeof(struct arc_fw_volinfo));
788 if (error != 0)
789 goto out;
790
791 /*
792 * I can't find an easy way to see if the volume exists or not
793 * except to say that if it has no capacity then it isn't there.
794 * Ignore passthru volumes, bioc_vol doesn't understand them.
795 */
796 if ((volinfo->capacity != 0 || volinfo->capacity2 != 0) &&
797 volinfo->raid_level != ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
798 nvols++;
799 }
800
801 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
802 bi->bi_novol = nvols;
803 out:
804 arc_unlock(sc);
805 free(volinfo, M_DEVBUF);
806 free(sysinfo, M_DEVBUF);
807 return error;
808 }
809
810 static int
811 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
812 {
813 uint8_t request[2];
814 struct arc_fw_sysinfo *sysinfo;
815 int error = 0;
816 int maxvols, nvols = 0, i;
817
818 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
819 M_WAITOK|M_ZERO);
820
821 request[0] = ARC_FW_SYSINFO;
822 error = arc_msgbuf(sc, request, 1, sysinfo,
823 sizeof(struct arc_fw_sysinfo));
824 if (error != 0)
825 goto out;
826
827 maxvols = sysinfo->max_volume_set;
828
829 request[0] = ARC_FW_VOLINFO;
830 for (i = 0; i < maxvols; i++) {
831 request[1] = i;
832 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
833 sizeof(struct arc_fw_volinfo));
834 if (error != 0)
835 goto out;
836
837 if ((volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
838 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
839 continue;
840
841 if (nvols == vol)
842 break;
843
844 nvols++;
845 }
846
847 if (nvols != vol ||
848 (volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
849 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU) {
850 error = ENODEV;
851 goto out;
852 }
853
854 out:
855 free(sysinfo, M_DEVBUF);
856 return error;
857 }
858
859 static int
860 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
861 {
862 struct arc_fw_volinfo *volinfo;
863 uint64_t blocks;
864 uint32_t status;
865 int error = 0;
866
867 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
868 M_WAITOK|M_ZERO);
869
870 arc_lock(sc);
871 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
872 arc_unlock(sc);
873
874 if (error != 0)
875 goto out;
876
877 bv->bv_percent = -1;
878 bv->bv_seconds = 0;
879
880 status = htole32(volinfo->volume_status);
881 if (status == 0x0) {
882 if (htole32(volinfo->fail_mask) == 0x0)
883 bv->bv_status = BIOC_SVONLINE;
884 else
885 bv->bv_status = BIOC_SVDEGRADED;
886 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
887 bv->bv_status = BIOC_SVDEGRADED;
888 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
889 bv->bv_status = BIOC_SVOFFLINE;
890 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
891 bv->bv_status = BIOC_SVBUILDING;
892 bv->bv_percent = htole32(volinfo->progress) / 10;
893 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
894 bv->bv_status = BIOC_SVREBUILD;
895 bv->bv_percent = htole32(volinfo->progress) / 10;
896 }
897
898 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
899 blocks += (uint64_t)htole32(volinfo->capacity);
900 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
901
902 switch (volinfo->raid_level) {
903 case ARC_FW_VOL_RAIDLEVEL_0:
904 bv->bv_level = 0;
905 break;
906 case ARC_FW_VOL_RAIDLEVEL_1:
907 bv->bv_level = 1;
908 break;
909 case ARC_FW_VOL_RAIDLEVEL_3:
910 bv->bv_level = 3;
911 break;
912 case ARC_FW_VOL_RAIDLEVEL_5:
913 bv->bv_level = 5;
914 break;
915 case ARC_FW_VOL_RAIDLEVEL_6:
916 bv->bv_level = 6;
917 break;
918 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
919 default:
920 bv->bv_level = -1;
921 break;
922 }
923
924 bv->bv_nodisk = volinfo->member_disks;
925 strlcpy(bv->bv_dev, volinfo->set_name, sizeof(bv->bv_dev));
926
927 out:
928 free(volinfo, M_DEVBUF);
929 return error;
930 }
931
932 static int
933 arc_bio_disk(struct arc_softc *sc, struct bioc_disk *bd)
934 {
935 uint8_t request[2];
936 struct arc_fw_volinfo *volinfo;
937 struct arc_fw_raidinfo *raidinfo;
938 struct arc_fw_diskinfo *diskinfo;
939 int error = 0;
940 uint64_t blocks;
941 char model[81];
942 char serial[41];
943 char rev[17];
944
945 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
946 M_WAITOK|M_ZERO);
947 raidinfo = malloc(sizeof(struct arc_fw_raidinfo), M_DEVBUF,
948 M_WAITOK|M_ZERO);
949 diskinfo = malloc(sizeof(struct arc_fw_diskinfo), M_DEVBUF,
950 M_WAITOK|M_ZERO);
951
952 arc_lock(sc);
953
954 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
955 if (error != 0)
956 goto out;
957
958 request[0] = ARC_FW_RAIDINFO;
959 request[1] = volinfo->raid_set_number;
960 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
961 sizeof(struct arc_fw_raidinfo));
962 if (error != 0)
963 goto out;
964
965 if (bd->bd_diskid > raidinfo->member_devices) {
966 error = ENODEV;
967 goto out;
968 }
969
970 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
971 /*
972 * the disk doesn't exist anymore. bio is too dumb to be
973 * able to display that, so put it on another bus
974 */
975 bd->bd_channel = 1;
976 bd->bd_target = 0;
977 bd->bd_lun = 0;
978 bd->bd_status = BIOC_SDOFFLINE;
979 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
980 goto out;
981 }
982
983 request[0] = ARC_FW_DISKINFO;
984 request[1] = raidinfo->device_array[bd->bd_diskid];
985 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
986 sizeof(struct arc_fw_diskinfo));
987 if (error != 0)
988 goto out;
989
990 #if 0
991 bd->bd_channel = diskinfo->scsi_attr.channel;
992 bd->bd_target = diskinfo->scsi_attr.target;
993 bd->bd_lun = diskinfo->scsi_attr.lun;
994 #endif
995 /*
996 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
997 * the diskid.
998 */
999 bd->bd_channel = 0;
1000 bd->bd_target = raidinfo->device_array[bd->bd_diskid];
1001 bd->bd_lun = 0;
1002
1003 bd->bd_status = BIOC_SDONLINE;
1004 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1005 blocks += (uint64_t)htole32(diskinfo->capacity);
1006 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1007
1008 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1009 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1010 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1011 sizeof(diskinfo->firmware_rev));
1012
1013 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1014 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1015
1016 out:
1017 arc_unlock(sc);
1018 free(diskinfo, M_DEVBUF);
1019 free(raidinfo, M_DEVBUF);
1020 free(volinfo, M_DEVBUF);
1021 return error;
1022 }
1023 #endif /* NBIO > 0 */
1024
1025 uint8_t
1026 arc_msg_cksum(void *cmd, uint16_t len)
1027 {
1028 uint8_t *buf = cmd;
1029 uint8_t cksum;
1030 int i;
1031
1032 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1033 for (i = 0; i < len; i++)
1034 cksum += buf[i];
1035
1036 return cksum;
1037 }
1038
1039
1040 int
1041 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1042 size_t rbuflen)
1043 {
1044 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1045 uint8_t *wbuf, *rbuf;
1046 int wlen, wdone = 0, rlen, rdone = 0;
1047 struct arc_fw_bufhdr *bufhdr;
1048 uint32_t reg, rwlen;
1049 int error = 0;
1050 #ifdef ARC_DEBUG
1051 int i;
1052 #endif
1053
1054 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1055 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1056
1057 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0)
1058 return EBUSY;
1059
1060 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1061 wbuf = malloc(wlen, M_TEMP, M_WAITOK);
1062
1063 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1064 rbuf = malloc(rlen, M_TEMP, M_WAITOK);
1065
1066 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1067 device_xname(&sc->sc_dev), wlen, rlen);
1068
1069 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1070 bufhdr->hdr = arc_fw_hdr;
1071 bufhdr->len = htole16(wbuflen);
1072 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1073 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1074
1075 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1076
1077 do {
1078 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1079 memset(rwbuf, 0, sizeof(rwbuf));
1080 rwlen = (wlen - wdone) % sizeof(rwbuf);
1081 memcpy(rwbuf, &wbuf[wdone], rwlen);
1082
1083 #ifdef ARC_DEBUG
1084 if (arcdebug & ARC_D_DB) {
1085 printf("%s: write %d:",
1086 device_xname(&sc->sc_dev), rwlen);
1087 for (i = 0; i < rwlen; i++)
1088 printf(" 0x%02x", rwbuf[i]);
1089 printf("\n");
1090 }
1091 #endif
1092
1093 /* copy the chunk to the hw */
1094 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1095 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1096 sizeof(rwbuf));
1097
1098 /* say we have a buffer for the hw */
1099 arc_write(sc, ARC_REG_INB_DOORBELL,
1100 ARC_REG_INB_DOORBELL_WRITE_OK);
1101
1102 wdone += rwlen;
1103 }
1104
1105 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1106 arc_wait(sc);
1107 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1108
1109 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1110 device_xname(&sc->sc_dev), reg);
1111
1112 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1113 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1114 if (rwlen > sizeof(rwbuf)) {
1115 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1116 device_xname(&sc->sc_dev));
1117 error = EIO;
1118 goto out;
1119 }
1120
1121 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1122 sizeof(rwbuf));
1123
1124 arc_write(sc, ARC_REG_INB_DOORBELL,
1125 ARC_REG_INB_DOORBELL_READ_OK);
1126
1127 #ifdef ARC_DEBUG
1128 printf("%s: len: %d+%d=%d/%d\n",
1129 device_xname(&sc->sc_dev),
1130 rwlen, rdone, rwlen + rdone, rlen);
1131 if (arcdebug & ARC_D_DB) {
1132 printf("%s: read:",
1133 device_xname(&sc->sc_dev));
1134 for (i = 0; i < rwlen; i++)
1135 printf(" 0x%02x", rwbuf[i]);
1136 printf("\n");
1137 }
1138 #endif
1139
1140 if ((rdone + rwlen) > rlen) {
1141 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1142 device_xname(&sc->sc_dev));
1143 error = EIO;
1144 goto out;
1145 }
1146
1147 memcpy(&rbuf[rdone], rwbuf, rwlen);
1148 rdone += rwlen;
1149 }
1150 } while (rdone != rlen);
1151
1152 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1153 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1154 bufhdr->len != htole16(rbuflen)) {
1155 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1156 device_xname(&sc->sc_dev));
1157 error = EIO;
1158 goto out;
1159 }
1160
1161 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1162
1163 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1164 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1165 device_xname(&sc->sc_dev));
1166 error = EIO;
1167 goto out;
1168 }
1169
1170 out:
1171 free(wbuf, M_DEVBUF);
1172 free(rbuf, M_DEVBUF);
1173
1174 return error;
1175 }
1176
1177 void
1178 arc_lock(struct arc_softc *sc)
1179 {
1180 int s;
1181
1182 rw_enter(&sc->sc_rwlock, RW_WRITER);
1183 s = splbio();
1184 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1185 sc->sc_talking = 1;
1186 splx(s);
1187 }
1188
1189 void
1190 arc_unlock(struct arc_softc *sc)
1191 {
1192 int s;
1193
1194 s = splbio();
1195 sc->sc_talking = 0;
1196 arc_write(sc, ARC_REG_INTRMASK,
1197 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1198 splx(s);
1199 rw_exit(&sc->sc_rwlock);
1200 }
1201
1202 void
1203 arc_wait(struct arc_softc *sc)
1204 {
1205 int s;
1206
1207 s = splbio();
1208 arc_write(sc, ARC_REG_INTRMASK,
1209 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1210 if (tsleep(sc, PWAIT|PCATCH, "arcdb", hz) == EWOULDBLOCK)
1211 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1212 splx(s);
1213 }
1214
1215 #if NBIO > 0
1216 static void
1217 arc_create_sensors(void *arg)
1218 {
1219 struct arc_softc *sc = arg;
1220 struct bioc_inq bi;
1221 struct bioc_vol bv;
1222 int i;
1223
1224 memset(&bi, 0, sizeof(bi));
1225 if (arc_bio_inq(sc, &bi) != 0) {
1226 aprint_error("%s: unable to query firmware for sensor info\n",
1227 device_xname(&sc->sc_dev));
1228 kthread_exit(0);
1229 }
1230
1231 sc->sc_nsensors = bi.bi_novol;
1232 /*
1233 * There's no point to continue if there are no drives connected...
1234 */
1235 if (!sc->sc_nsensors)
1236 kthread_exit(0);
1237
1238 sc->sc_sme = sysmon_envsys_create();
1239 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_nsensors,
1240 M_DEVBUF, M_WAITOK | M_ZERO);
1241
1242 for (i = 0; i < sc->sc_nsensors; i++) {
1243 memset(&bv, 0, sizeof(bv));
1244 bv.bv_volid = i;
1245 if (arc_bio_vol(sc, &bv) != 0)
1246 goto bad;
1247
1248 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1249 sc->sc_sensors[i].monitor = true;
1250 strlcpy(sc->sc_sensors[i].desc, bv.bv_dev,
1251 sizeof(sc->sc_sensors[i].desc));
1252 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1253 goto bad;
1254 }
1255
1256 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1257 sc->sc_sme->sme_flags = SME_DISABLE_REFRESH;
1258 if (sysmon_envsys_register(sc->sc_sme)) {
1259 aprint_debug("%s: unable to register with sysmon\n",
1260 device_xname(&sc->sc_dev));
1261 goto bad;
1262 }
1263 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
1264 callout_setfunc(&sc->sc_callout, arc_refresh_sensors, sc);
1265 callout_schedule(&sc->sc_callout, ARC_REFRESH_TIMO);
1266 kthread_exit(0);
1267
1268 bad:
1269 free(sc->sc_sensors, M_DEVBUF);
1270 sysmon_envsys_destroy(sc->sc_sme);
1271 kthread_exit(0);
1272 }
1273
1274 static void
1275 arc_refresh_sensors(void *arg)
1276 {
1277 struct arc_softc *sc = arg;
1278 struct bioc_vol bv;
1279 int i;
1280
1281 memset(&bv, 0, sizeof(bv));
1282 for (i = 0; i < sc->sc_nsensors; i++) {
1283 bv.bv_volid = i;
1284 if (arc_bio_vol(sc, &bv)) {
1285 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
1286 sc->sc_sensors[i].state = ENVSYS_SINVALID;
1287 goto out;
1288 }
1289
1290 switch (bv.bv_status) {
1291 case BIOC_SVOFFLINE:
1292 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_FAIL;
1293 sc->sc_sensors[i].state = ENVSYS_SCRITICAL;
1294 break;
1295 case BIOC_SVDEGRADED:
1296 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_PFAIL;
1297 sc->sc_sensors[i].state = ENVSYS_SCRITICAL;
1298 break;
1299 case BIOC_SVBUILDING:
1300 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_REBUILD;
1301 sc->sc_sensors[i].state = ENVSYS_SVALID;
1302 break;
1303 case BIOC_SVSCRUB:
1304 case BIOC_SVONLINE:
1305 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_ONLINE;
1306 sc->sc_sensors[i].state = ENVSYS_SVALID;
1307 break;
1308 case BIOC_SVINVALID:
1309 /* FALLTRHOUGH */
1310 default:
1311 sc->sc_sensors[i].value_cur = ENVSYS_DRIVE_EMPTY;
1312 sc->sc_sensors[i].state = ENVSYS_SINVALID;
1313 }
1314 }
1315 out:
1316 callout_schedule(&sc->sc_callout, ARC_REFRESH_TIMO);
1317 }
1318 #endif /* NBIO > 0 */
1319
1320 uint32_t
1321 arc_read(struct arc_softc *sc, bus_size_t r)
1322 {
1323 uint32_t v;
1324
1325 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1326 BUS_SPACE_BARRIER_READ);
1327 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1328
1329 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1330 device_xname(&sc->sc_dev), r, v);
1331
1332 return v;
1333 }
1334
1335 void
1336 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1337 {
1338 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1339 BUS_SPACE_BARRIER_READ);
1340 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1341 (uint32_t *)buf, len >> 2);
1342 }
1343
1344 void
1345 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1346 {
1347 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1348 device_xname(&sc->sc_dev), r, v);
1349
1350 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1351 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1352 BUS_SPACE_BARRIER_WRITE);
1353 }
1354
1355 void
1356 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1357 {
1358 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1359 (const uint32_t *)buf, len >> 2);
1360 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1361 BUS_SPACE_BARRIER_WRITE);
1362 }
1363
1364 int
1365 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1366 uint32_t target)
1367 {
1368 int i;
1369
1370 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1371 device_xname(&sc->sc_dev), r, mask, target);
1372
1373 for (i = 0; i < 10000; i++) {
1374 if ((arc_read(sc, r) & mask) == target)
1375 return 0;
1376 delay(1000);
1377 }
1378
1379 return 1;
1380 }
1381
1382 int
1383 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1384 uint32_t target)
1385 {
1386 int i;
1387
1388 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1389 device_xname(&sc->sc_dev), r, mask, target);
1390
1391 for (i = 0; i < 10000; i++) {
1392 if ((arc_read(sc, r) & mask) != target)
1393 return 0;
1394 delay(1000);
1395 }
1396
1397 return 1;
1398 }
1399
1400 int
1401 arc_msg0(struct arc_softc *sc, uint32_t m)
1402 {
1403 /* post message */
1404 arc_write(sc, ARC_REG_INB_MSG0, m);
1405 /* wait for the fw to do it */
1406 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1407 ARC_REG_INTRSTAT_MSG0) != 0)
1408 return 1;
1409
1410 /* ack it */
1411 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1412
1413 return 0;
1414 }
1415
1416 struct arc_dmamem *
1417 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1418 {
1419 struct arc_dmamem *adm;
1420 int nsegs;
1421
1422 adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT|M_ZERO);
1423 if (adm == NULL)
1424 return NULL;
1425
1426 adm->adm_size = size;
1427
1428 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1429 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1430 goto admfree;
1431
1432 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1433 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1434 goto destroy;
1435
1436 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1437 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1438 goto free;
1439
1440 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1441 NULL, BUS_DMA_NOWAIT) != 0)
1442 goto unmap;
1443
1444 memset(adm->adm_kva, 0, size);
1445
1446 return adm;
1447
1448 unmap:
1449 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1450 free:
1451 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1452 destroy:
1453 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1454 admfree:
1455 free(adm, M_DEVBUF);
1456
1457 return NULL;
1458 }
1459
1460 void
1461 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1462 {
1463 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1464 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1465 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1466 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1467 free(adm, M_DEVBUF);
1468 }
1469
1470 int
1471 arc_alloc_ccbs(struct arc_softc *sc)
1472 {
1473 struct arc_ccb *ccb;
1474 uint8_t *cmd;
1475 int i;
1476
1477 TAILQ_INIT(&sc->sc_ccb_free);
1478
1479 sc->sc_ccbs = malloc(sizeof(struct arc_ccb) * sc->sc_req_count,
1480 M_DEVBUF, M_WAITOK|M_ZERO);
1481
1482 sc->sc_requests = arc_dmamem_alloc(sc,
1483 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1484 if (sc->sc_requests == NULL) {
1485 aprint_error("%s: unable to allocate ccb dmamem\n",
1486 device_xname(&sc->sc_dev));
1487 goto free_ccbs;
1488 }
1489 cmd = ARC_DMA_KVA(sc->sc_requests);
1490
1491 for (i = 0; i < sc->sc_req_count; i++) {
1492 ccb = &sc->sc_ccbs[i];
1493
1494 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1495 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1496 aprint_error("%s: unable to create dmamap for ccb %d\n",
1497 device_xname(&sc->sc_dev), i);
1498 goto free_maps;
1499 }
1500
1501 ccb->ccb_sc = sc;
1502 ccb->ccb_id = i;
1503 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1504
1505 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1506 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1507 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1508
1509 arc_put_ccb(sc, ccb);
1510 }
1511
1512 return 0;
1513
1514 free_maps:
1515 while ((ccb = arc_get_ccb(sc)) != NULL)
1516 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1517 arc_dmamem_free(sc, sc->sc_requests);
1518
1519 free_ccbs:
1520 free(sc->sc_ccbs, M_DEVBUF);
1521
1522 return 1;
1523 }
1524
1525 struct arc_ccb *
1526 arc_get_ccb(struct arc_softc *sc)
1527 {
1528 struct arc_ccb *ccb;
1529
1530 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1531 if (ccb != NULL)
1532 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1533
1534 return ccb;
1535 }
1536
1537 void
1538 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1539 {
1540 ccb->ccb_xs = NULL;
1541 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1542 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1543 }
1544