arcmsr.c revision 1.4 1 /* $NetBSD: arcmsr.c,v 1.4 2007/12/05 18:25:53 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.4 2007/12/05 18:25:53 xtraeme Exp $");
24
25 #include <sys/param.h>
26 #include <sys/buf.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/kthread.h>
31 #include <sys/mutex.h>
32 #include <sys/condvar.h>
33
34 #if NBIO > 0
35 #include <sys/ioctl.h>
36 #include <dev/biovar.h>
37 #endif
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48
49 #include <sys/bus.h>
50
51 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
52
53 #include <dev/pci/arcmsrvar.h>
54
55 /* #define ARC_DEBUG */
56 #ifdef ARC_DEBUG
57 #define ARC_D_INIT (1<<0)
58 #define ARC_D_RW (1<<1)
59 #define ARC_D_DB (1<<2)
60
61 int arcdebug = 0;
62
63 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
64 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
65
66 #else
67 #define DPRINTF(p...) /* p */
68 #define DNPRINTF(n, p...) /* n, p */
69 #endif
70
71 /*
72 * the fw header must always equal this.
73 */
74 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
75
76 /*
77 * autoconf(9) glue.
78 */
79 static int arc_match(device_t, struct cfdata *, void *);
80 static void arc_attach(device_t, device_t, void *);
81 static int arc_detach(device_t, int);
82 static void arc_shutdown(void *);
83 static int arc_intr(void *);
84 static void arc_minphys(struct buf *);
85
86 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
87 arc_match, arc_attach, arc_detach, NULL);
88
89 /*
90 * bio(4) and sysmon_envsys(9) glue.
91 */
92 #if NBIO > 0
93 static int arc_bioctl(struct device *, u_long, void *);
94 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
95 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
96 static int arc_bio_disk(struct arc_softc *, struct bioc_disk *);
97 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
98 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
99 static int arc_bio_getvol(struct arc_softc *, int,
100 struct arc_fw_volinfo *);
101 static void arc_create_sensors(void *);
102 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
103 #endif
104
105 static int
106 arc_match(device_t parent, struct cfdata *match, void *aux)
107 {
108 struct pci_attach_args *pa = aux;
109
110 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
111 switch (PCI_PRODUCT(pa->pa_id)) {
112 case PCI_PRODUCT_ARECA_ARC1110:
113 case PCI_PRODUCT_ARECA_ARC1120:
114 case PCI_PRODUCT_ARECA_ARC1130:
115 case PCI_PRODUCT_ARECA_ARC1160:
116 case PCI_PRODUCT_ARECA_ARC1170:
117 case PCI_PRODUCT_ARECA_ARC1200:
118 case PCI_PRODUCT_ARECA_ARC1202:
119 case PCI_PRODUCT_ARECA_ARC1210:
120 case PCI_PRODUCT_ARECA_ARC1220:
121 case PCI_PRODUCT_ARECA_ARC1230:
122 case PCI_PRODUCT_ARECA_ARC1260:
123 case PCI_PRODUCT_ARECA_ARC1270:
124 case PCI_PRODUCT_ARECA_ARC1280:
125 case PCI_PRODUCT_ARECA_ARC1380:
126 case PCI_PRODUCT_ARECA_ARC1381:
127 case PCI_PRODUCT_ARECA_ARC1680:
128 case PCI_PRODUCT_ARECA_ARC1681:
129 return 1;
130 default:
131 break;
132 }
133 }
134
135 return 0;
136 }
137
138 static void
139 arc_attach(device_t parent, device_t self, void *aux)
140 {
141 struct arc_softc *sc = device_private(self);
142 struct pci_attach_args *pa = aux;
143 struct scsipi_adapter *adapt = &sc->sc_adapter;
144 struct scsipi_channel *chan = &sc->sc_chan;
145
146 sc->sc_talking = 0;
147 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
148 cv_init(&sc->sc_condvar, "arcdb");
149
150 if (arc_map_pci_resources(sc, pa) != 0) {
151 /* error message printed by arc_map_pci_resources */
152 return;
153 }
154
155 if (arc_query_firmware(sc) != 0) {
156 /* error message printed by arc_query_firmware */
157 goto unmap_pci;
158 }
159
160 if (arc_alloc_ccbs(sc) != 0) {
161 /* error message printed by arc_alloc_ccbs */
162 goto unmap_pci;
163 }
164
165 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
166 if (sc->sc_shutdownhook == NULL)
167 panic("unable to establish arc powerhook");
168
169 memset(adapt, 0, sizeof(*adapt));
170 adapt->adapt_dev = self;
171 adapt->adapt_nchannels = 1;
172 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
173 adapt->adapt_max_periph = adapt->adapt_openings;
174 adapt->adapt_minphys = arc_minphys;
175 adapt->adapt_request = arc_scsi_cmd;
176
177 memset(chan, 0, sizeof(*chan));
178 chan->chan_adapter = adapt;
179 chan->chan_bustype = &scsi_bustype;
180 chan->chan_nluns = ARC_MAX_LUN;
181 chan->chan_ntargets = ARC_MAX_TARGET;
182 chan->chan_id = ARC_MAX_TARGET;
183 chan->chan_channel = 0;
184 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
185
186 (void)config_found(self, &sc->sc_chan, scsiprint);
187
188 /* enable interrupts */
189 arc_write(sc, ARC_REG_INTRMASK,
190 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
191
192 #if NBIO > 0
193 if (bio_register(self, arc_bioctl) != 0)
194 panic("%s: bioctl registration failed\n", device_xname(self));
195 /*
196 * you need to talk to the firmware to get volume info. our firmware
197 * interface relies on being able to sleep, so we need to use a thread
198 * to do the work.
199 */
200 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
201 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
202 panic("%s: unable to create a kernel thread for sensors\n",
203 device_xname(self));
204 #endif
205
206 return;
207
208 unmap_pci:
209 arc_unmap_pci_resources(sc);
210 }
211
212 static int
213 arc_detach(device_t self, int flags)
214 {
215 struct arc_softc *sc = device_private(self);
216
217 shutdownhook_disestablish(sc->sc_shutdownhook);
218
219 mutex_enter(&sc->sc_mutex);
220 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
221 aprint_error("%s: timeout waiting to stop bg rebuild\n",
222 device_xname(&sc->sc_dev));
223
224 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
225 aprint_error("%s: timeout waiting to flush cache\n",
226 device_xname(&sc->sc_dev));
227 mutex_exit(&sc->sc_mutex);
228
229 return 0;
230 }
231
232 static void
233 arc_shutdown(void *xsc)
234 {
235 struct arc_softc *sc = xsc;
236
237 mutex_enter(&sc->sc_mutex);
238 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
239 aprint_error("%s: timeout waiting to stop bg rebuild\n",
240 device_xname(&sc->sc_dev));
241
242 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
243 aprint_error("%s: timeout waiting to flush cache\n",
244 device_xname(&sc->sc_dev));
245 mutex_exit(&sc->sc_mutex);
246 }
247
248 static void
249 arc_minphys(struct buf *bp)
250 {
251 if (bp->b_bcount > MAXPHYS)
252 bp->b_bcount = MAXPHYS;
253 minphys(bp);
254 }
255
256 static int
257 arc_intr(void *arg)
258 {
259 struct arc_softc *sc = arg;
260 struct arc_ccb *ccb = NULL;
261 char *kva = ARC_DMA_KVA(sc->sc_requests);
262 struct arc_io_cmd *cmd;
263 uint32_t reg, intrstat;
264
265 mutex_enter(&sc->sc_mutex);
266 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
267 if (intrstat == 0x0) {
268 mutex_exit(&sc->sc_mutex);
269 return 0;
270 }
271
272 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
273 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
274
275 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
276 if (sc->sc_talking) {
277 /* if an ioctl is talking, wake it up */
278 arc_write(sc, ARC_REG_INTRMASK,
279 ~ARC_REG_INTRMASK_POSTQUEUE);
280 cv_broadcast(&sc->sc_condvar);
281 } else {
282 /* otherwise drop it */
283 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
284 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
285 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
286 arc_write(sc, ARC_REG_INB_DOORBELL,
287 ARC_REG_INB_DOORBELL_READ_OK);
288 }
289 }
290 mutex_exit(&sc->sc_mutex);
291
292 while ((reg = arc_pop(sc)) != 0xffffffff) {
293 cmd = (struct arc_io_cmd *)(kva +
294 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
295 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
296 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
297
298 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
299 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
300 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
301
302 arc_scsi_cmd_done(sc, ccb, reg);
303 }
304
305 return 1;
306 }
307
308 void
309 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
310 {
311 struct scsipi_periph *periph;
312 struct scsipi_xfer *xs;
313 struct scsipi_adapter *adapt = chan->chan_adapter;
314 struct arc_softc *sc = device_private(adapt->adapt_dev);
315 struct arc_ccb *ccb;
316 struct arc_msg_scsicmd *cmd;
317 uint32_t reg;
318 uint8_t target;
319
320 switch (req) {
321 case ADAPTER_REQ_GROW_RESOURCES:
322 /* Not supported. */
323 return;
324 case ADAPTER_REQ_SET_XFER_MODE:
325 /* Not supported. */
326 return;
327 case ADAPTER_REQ_RUN_XFER:
328 break;
329 }
330
331 xs = arg;
332 periph = xs->xs_periph;
333 target = periph->periph_target;
334
335 if (xs->cmdlen > ARC_MSG_CDBLEN) {
336 memset(&xs->sense, 0, sizeof(xs->sense));
337 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
338 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
339 xs->sense.scsi_sense.asc = 0x20;
340 xs->error = XS_SENSE;
341 xs->status = SCSI_CHECK;
342 scsipi_done(xs);
343 return;
344 }
345
346 ccb = arc_get_ccb(sc);
347 if (ccb == NULL) {
348 xs->error = XS_RESOURCE_SHORTAGE;
349 scsipi_done(xs);
350 return;
351 }
352
353 ccb->ccb_xs = xs;
354
355 if (arc_load_xs(ccb) != 0) {
356 xs->error = XS_DRIVER_STUFFUP;
357 arc_put_ccb(sc, ccb);
358 scsipi_done(xs);
359 return;
360 }
361
362 cmd = &ccb->ccb_cmd->cmd;
363 reg = ccb->ccb_cmd_post;
364
365 /* bus is always 0 */
366 cmd->target = target;
367 cmd->lun = periph->periph_lun;
368 cmd->function = 1; /* XXX magic number */
369
370 cmd->cdb_len = xs->cmdlen;
371 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
372 if (xs->xs_control & XS_CTL_DATA_OUT)
373 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
374 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
375 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
376 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
377 }
378
379 cmd->context = htole32(ccb->ccb_id);
380 cmd->data_len = htole32(xs->datalen);
381
382 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
383
384 /* we've built the command, let's put it on the hw */
385 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
386 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
388
389 arc_push(sc, reg);
390 if (xs->xs_control & XS_CTL_POLL) {
391 if (arc_complete(sc, ccb, xs->timeout) != 0) {
392 xs->error = XS_DRIVER_STUFFUP;
393 scsipi_done(xs);
394 }
395 }
396 }
397
398 int
399 arc_load_xs(struct arc_ccb *ccb)
400 {
401 struct arc_softc *sc = ccb->ccb_sc;
402 struct scsipi_xfer *xs = ccb->ccb_xs;
403 bus_dmamap_t dmap = ccb->ccb_dmamap;
404 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
405 uint64_t addr;
406 int i, error;
407
408 if (xs->datalen == 0)
409 return 0;
410
411 error = bus_dmamap_load(sc->sc_dmat, dmap,
412 xs->data, xs->datalen, NULL,
413 (xs->xs_control & XS_CTL_NOSLEEP) ?
414 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
415 if (error != 0) {
416 aprint_error("%s: error %d loading dmamap\n",
417 device_xname(&sc->sc_dev), error);
418 return 1;
419 }
420
421 for (i = 0; i < dmap->dm_nsegs; i++) {
422 sge = &sgl[i];
423
424 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
425 addr = dmap->dm_segs[i].ds_addr;
426 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
427 sge->sg_lo_addr = htole32((uint32_t)addr);
428 }
429
430 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
431 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
432 BUS_DMASYNC_PREWRITE);
433
434 return 0;
435 }
436
437 void
438 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
439 {
440 struct scsipi_xfer *xs = ccb->ccb_xs;
441 struct arc_msg_scsicmd *cmd;
442
443 if (xs->datalen != 0) {
444 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
445 ccb->ccb_dmamap->dm_mapsize,
446 (xs->xs_control & XS_CTL_DATA_IN) ?
447 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
448 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
449 }
450
451 /* timeout_del */
452 xs->status |= XS_STS_DONE;
453
454 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
455 cmd = &ccb->ccb_cmd->cmd;
456
457 switch (cmd->status) {
458 case ARC_MSG_STATUS_SELTIMEOUT:
459 case ARC_MSG_STATUS_ABORTED:
460 case ARC_MSG_STATUS_INIT_FAIL:
461 xs->status = SCSI_OK;
462 xs->error = XS_SELTIMEOUT;
463 break;
464
465 case SCSI_CHECK:
466 memset(&xs->sense, 0, sizeof(xs->sense));
467 memcpy(&xs->sense, cmd->sense_data,
468 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
469 xs->sense.scsi_sense.response_code =
470 SSD_RCODE_VALID | 0x70;
471 xs->status = SCSI_CHECK;
472 xs->error = XS_SENSE;
473 xs->resid = 0;
474 break;
475
476 default:
477 /* unknown device status */
478 xs->error = XS_BUSY; /* try again later? */
479 xs->status = SCSI_BUSY;
480 break;
481 }
482 } else {
483 xs->status = SCSI_OK;
484 xs->error = XS_NOERROR;
485 xs->resid = 0;
486 }
487
488 arc_put_ccb(sc, ccb);
489 scsipi_done(xs);
490 }
491
492 int
493 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
494 {
495 struct arc_ccb *ccb = NULL;
496 char *kva = ARC_DMA_KVA(sc->sc_requests);
497 struct arc_io_cmd *cmd;
498 uint32_t reg;
499
500 do {
501 reg = arc_pop(sc);
502 if (reg == 0xffffffff) {
503 if (timeout-- == 0)
504 return 1;
505
506 delay(1000);
507 continue;
508 }
509
510 cmd = (struct arc_io_cmd *)(kva +
511 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
512 ARC_DMA_DVA(sc->sc_requests)));
513 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
514
515 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
516 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
517 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
518
519 arc_scsi_cmd_done(sc, ccb, reg);
520 } while (nccb != ccb);
521
522 return 0;
523 }
524
525 int
526 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
527 {
528 pcireg_t memtype;
529 pci_intr_handle_t ih;
530
531 sc->sc_pc = pa->pa_pc;
532 sc->sc_tag = pa->pa_tag;
533 sc->sc_dmat = pa->pa_dmat;
534
535 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
536 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
537 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
538 aprint_error(": unable to map system interface register\n");
539 return 1;
540 }
541
542 if (pci_intr_map(pa, &ih) != 0) {
543 aprint_error(": unable to map interrupt\n");
544 goto unmap;
545 }
546
547 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
548 arc_intr, sc);
549 if (sc->sc_ih == NULL) {
550 aprint_error(": unable to map interrupt [2]\n");
551 goto unmap;
552 }
553 aprint_normal(": interrupting at %s\n",
554 pci_intr_string(pa->pa_pc, ih));
555
556 return 0;
557
558 unmap:
559 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
560 sc->sc_ios = 0;
561 return 1;
562 }
563
564 void
565 arc_unmap_pci_resources(struct arc_softc *sc)
566 {
567 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
568 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
569 sc->sc_ios = 0;
570 }
571
572 int
573 arc_query_firmware(struct arc_softc *sc)
574 {
575 struct arc_msg_firmware_info fwinfo;
576 char string[81]; /* sizeof(vendor)*2+1 */
577
578 mutex_enter(&sc->sc_mutex);
579 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
580 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
581 aprint_debug("%s: timeout waiting for firmware ok\n",
582 device_xname(&sc->sc_dev));
583 mutex_enter(&sc->sc_mutex);
584 return 1;
585 }
586
587 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
588 aprint_debug("%s: timeout waiting for get config\n",
589 device_xname(&sc->sc_dev));
590 mutex_exit(&sc->sc_mutex);
591 return 1;
592 }
593
594 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
595 aprint_debug("%s: timeout waiting to start bg rebuild\n",
596 device_xname(&sc->sc_dev));
597 mutex_exit(&sc->sc_mutex);
598 return 1;
599 }
600 mutex_exit(&sc->sc_mutex);
601
602 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
603
604 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
605 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
606
607 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
608 aprint_error("%s: invalid firmware info from iop\n",
609 device_xname(&sc->sc_dev));
610 return 1;
611 }
612
613 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
614 device_xname(&sc->sc_dev),
615 htole32(fwinfo.request_len));
616 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
617 device_xname(&sc->sc_dev),
618 htole32(fwinfo.queue_len));
619 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
620 device_xname(&sc->sc_dev),
621 htole32(fwinfo.sdram_size));
622 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
623 device_xname(&sc->sc_dev),
624 htole32(fwinfo.sata_ports));
625
626 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
627 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
628 device_xname(&sc->sc_dev), string);
629
630 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
631
632 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
633 device_xname(&sc->sc_dev), string);
634
635 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
636 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
637 device_xname(&sc->sc_dev), string);
638
639 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
640 aprint_error("%s: unexpected request frame size (%d != %d)\n",
641 device_xname(&sc->sc_dev),
642 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
643 return 1;
644 }
645
646 sc->sc_req_count = htole32(fwinfo.queue_len);
647
648 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
649 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
650 htole32(fwinfo.sdram_size), string);
651
652 return 0;
653 }
654
655 #if NBIO > 0
656 static int
657 arc_bioctl(struct device *self, u_long cmd, void *addr)
658 {
659 struct arc_softc *sc = device_private(self);
660 int error = 0;
661
662 switch (cmd) {
663 case BIOCINQ:
664 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
665 break;
666
667 case BIOCVOL:
668 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
669 break;
670
671 case BIOCDISK:
672 error = arc_bio_disk(sc, (struct bioc_disk *)addr);
673 break;
674
675 case BIOCALARM:
676 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
677 break;
678
679 default:
680 error = ENOTTY;
681 break;
682 }
683
684 return error;
685 }
686
687 static int
688 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
689 {
690 uint8_t request[2], reply[1];
691 size_t len;
692 int error = 0;
693
694 switch (ba->ba_opcode) {
695 case BIOC_SAENABLE:
696 case BIOC_SADISABLE:
697 request[0] = ARC_FW_SET_ALARM;
698 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
699 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
700 len = sizeof(request);
701
702 break;
703
704 case BIOC_SASILENCE:
705 request[0] = ARC_FW_MUTE_ALARM;
706 len = 1;
707
708 break;
709
710 case BIOC_GASTATUS:
711 /* system info is too big/ugly to deal with here */
712 return arc_bio_alarm_state(sc, ba);
713
714 default:
715 return EOPNOTSUPP;
716 }
717
718 arc_lock(sc);
719 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
720 arc_unlock(sc);
721
722 if (error != 0)
723 return error;
724
725 switch (reply[0]) {
726 case ARC_FW_CMD_OK:
727 return 0;
728 case ARC_FW_CMD_PASS_REQD:
729 return EPERM;
730 default:
731 return EIO;
732 }
733 }
734
735 static int
736 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
737 {
738 uint8_t request = ARC_FW_SYSINFO;
739 struct arc_fw_sysinfo *sysinfo;
740 int error = 0;
741
742 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
743 M_WAITOK|M_ZERO);
744
745 request = ARC_FW_SYSINFO;
746
747 arc_lock(sc);
748 error = arc_msgbuf(sc, &request, sizeof(request),
749 sysinfo, sizeof(struct arc_fw_sysinfo));
750 arc_unlock(sc);
751
752 if (error != 0)
753 goto out;
754
755 ba->ba_status = sysinfo->alarm;
756
757 out:
758 free(sysinfo, M_DEVBUF);
759 return error;
760 }
761
762
763 static int
764 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
765 {
766 uint8_t request[2];
767 struct arc_fw_sysinfo *sysinfo;
768 struct arc_fw_volinfo *volinfo;
769 int maxvols, nvols = 0, i;
770 int error = 0;
771
772 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
773 M_WAITOK|M_ZERO);
774 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
775 M_WAITOK|M_ZERO);
776
777 arc_lock(sc);
778
779 request[0] = ARC_FW_SYSINFO;
780 error = arc_msgbuf(sc, request, 1, sysinfo,
781 sizeof(struct arc_fw_sysinfo));
782 if (error != 0)
783 goto out;
784
785 maxvols = sysinfo->max_volume_set;
786
787 request[0] = ARC_FW_VOLINFO;
788 for (i = 0; i < maxvols; i++) {
789 request[1] = i;
790 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
791 sizeof(struct arc_fw_volinfo));
792 if (error != 0)
793 goto out;
794
795 /*
796 * I can't find an easy way to see if the volume exists or not
797 * except to say that if it has no capacity then it isn't there.
798 * Ignore passthru volumes, bioc_vol doesn't understand them.
799 */
800 if ((volinfo->capacity != 0 || volinfo->capacity2 != 0) &&
801 volinfo->raid_level != ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
802 nvols++;
803 }
804
805 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
806 bi->bi_novol = nvols;
807 out:
808 arc_unlock(sc);
809 free(volinfo, M_DEVBUF);
810 free(sysinfo, M_DEVBUF);
811 return error;
812 }
813
814 static int
815 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
816 {
817 uint8_t request[2];
818 struct arc_fw_sysinfo *sysinfo;
819 int error = 0;
820 int maxvols, nvols = 0, i;
821
822 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
823 M_WAITOK|M_ZERO);
824
825 request[0] = ARC_FW_SYSINFO;
826 error = arc_msgbuf(sc, request, 1, sysinfo,
827 sizeof(struct arc_fw_sysinfo));
828 if (error != 0)
829 goto out;
830
831 maxvols = sysinfo->max_volume_set;
832
833 request[0] = ARC_FW_VOLINFO;
834 for (i = 0; i < maxvols; i++) {
835 request[1] = i;
836 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
837 sizeof(struct arc_fw_volinfo));
838 if (error != 0)
839 goto out;
840
841 if ((volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
842 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
843 continue;
844
845 if (nvols == vol)
846 break;
847
848 nvols++;
849 }
850
851 if (nvols != vol ||
852 (volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
853 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU) {
854 error = ENODEV;
855 goto out;
856 }
857
858 out:
859 free(sysinfo, M_DEVBUF);
860 return error;
861 }
862
863 static int
864 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
865 {
866 struct arc_fw_volinfo *volinfo;
867 uint64_t blocks;
868 uint32_t status;
869 int error = 0;
870
871 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
872 M_WAITOK|M_ZERO);
873
874 arc_lock(sc);
875 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
876 arc_unlock(sc);
877
878 if (error != 0)
879 goto out;
880
881 bv->bv_percent = -1;
882 bv->bv_seconds = 0;
883
884 status = htole32(volinfo->volume_status);
885 if (status == 0x0) {
886 if (htole32(volinfo->fail_mask) == 0x0)
887 bv->bv_status = BIOC_SVONLINE;
888 else
889 bv->bv_status = BIOC_SVDEGRADED;
890 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
891 bv->bv_status = BIOC_SVDEGRADED;
892 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
893 bv->bv_status = BIOC_SVOFFLINE;
894 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
895 bv->bv_status = BIOC_SVBUILDING;
896 bv->bv_percent = htole32(volinfo->progress) / 10;
897 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
898 bv->bv_status = BIOC_SVREBUILD;
899 bv->bv_percent = htole32(volinfo->progress) / 10;
900 }
901
902 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
903 blocks += (uint64_t)htole32(volinfo->capacity);
904 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
905
906 switch (volinfo->raid_level) {
907 case ARC_FW_VOL_RAIDLEVEL_0:
908 bv->bv_level = 0;
909 break;
910 case ARC_FW_VOL_RAIDLEVEL_1:
911 bv->bv_level = 1;
912 break;
913 case ARC_FW_VOL_RAIDLEVEL_3:
914 bv->bv_level = 3;
915 break;
916 case ARC_FW_VOL_RAIDLEVEL_5:
917 bv->bv_level = 5;
918 break;
919 case ARC_FW_VOL_RAIDLEVEL_6:
920 bv->bv_level = 6;
921 break;
922 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
923 default:
924 bv->bv_level = -1;
925 break;
926 }
927
928 bv->bv_nodisk = volinfo->member_disks;
929 strlcpy(bv->bv_dev, volinfo->set_name, sizeof(bv->bv_dev));
930
931 out:
932 free(volinfo, M_DEVBUF);
933 return error;
934 }
935
936 static int
937 arc_bio_disk(struct arc_softc *sc, struct bioc_disk *bd)
938 {
939 uint8_t request[2];
940 struct arc_fw_volinfo *volinfo;
941 struct arc_fw_raidinfo *raidinfo;
942 struct arc_fw_diskinfo *diskinfo;
943 int error = 0;
944 uint64_t blocks;
945 char model[81];
946 char serial[41];
947 char rev[17];
948
949 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
950 M_WAITOK|M_ZERO);
951 raidinfo = malloc(sizeof(struct arc_fw_raidinfo), M_DEVBUF,
952 M_WAITOK|M_ZERO);
953 diskinfo = malloc(sizeof(struct arc_fw_diskinfo), M_DEVBUF,
954 M_WAITOK|M_ZERO);
955
956 arc_lock(sc);
957
958 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
959 if (error != 0)
960 goto out;
961
962 request[0] = ARC_FW_RAIDINFO;
963 request[1] = volinfo->raid_set_number;
964 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
965 sizeof(struct arc_fw_raidinfo));
966 if (error != 0)
967 goto out;
968
969 if (bd->bd_diskid > raidinfo->member_devices) {
970 error = ENODEV;
971 goto out;
972 }
973
974 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
975 /*
976 * the disk doesn't exist anymore. bio is too dumb to be
977 * able to display that, so put it on another bus
978 */
979 bd->bd_channel = 1;
980 bd->bd_target = 0;
981 bd->bd_lun = 0;
982 bd->bd_status = BIOC_SDOFFLINE;
983 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
984 goto out;
985 }
986
987 request[0] = ARC_FW_DISKINFO;
988 request[1] = raidinfo->device_array[bd->bd_diskid];
989 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
990 sizeof(struct arc_fw_diskinfo));
991 if (error != 0)
992 goto out;
993
994 #if 0
995 bd->bd_channel = diskinfo->scsi_attr.channel;
996 bd->bd_target = diskinfo->scsi_attr.target;
997 bd->bd_lun = diskinfo->scsi_attr.lun;
998 #endif
999 /*
1000 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1001 * the diskid.
1002 */
1003 bd->bd_channel = 0;
1004 bd->bd_target = raidinfo->device_array[bd->bd_diskid];
1005 bd->bd_lun = 0;
1006
1007 bd->bd_status = BIOC_SDONLINE;
1008 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1009 blocks += (uint64_t)htole32(diskinfo->capacity);
1010 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1011
1012 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1013 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1014 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1015 sizeof(diskinfo->firmware_rev));
1016
1017 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1018 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1019
1020 out:
1021 arc_unlock(sc);
1022 free(diskinfo, M_DEVBUF);
1023 free(raidinfo, M_DEVBUF);
1024 free(volinfo, M_DEVBUF);
1025 return error;
1026 }
1027 #endif /* NBIO > 0 */
1028
1029 uint8_t
1030 arc_msg_cksum(void *cmd, uint16_t len)
1031 {
1032 uint8_t *buf = cmd;
1033 uint8_t cksum;
1034 int i;
1035
1036 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1037 for (i = 0; i < len; i++)
1038 cksum += buf[i];
1039
1040 return cksum;
1041 }
1042
1043
1044 int
1045 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1046 size_t rbuflen)
1047 {
1048 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1049 uint8_t *wbuf, *rbuf;
1050 int wlen, wdone = 0, rlen, rdone = 0;
1051 struct arc_fw_bufhdr *bufhdr;
1052 uint32_t reg, rwlen;
1053 int error = 0;
1054 #ifdef ARC_DEBUG
1055 int i;
1056 #endif
1057
1058 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1059 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1060
1061 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0)
1062 return EBUSY;
1063
1064 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1065 wbuf = malloc(wlen, M_TEMP, M_WAITOK);
1066
1067 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1068 rbuf = malloc(rlen, M_TEMP, M_WAITOK);
1069
1070 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1071 device_xname(&sc->sc_dev), wlen, rlen);
1072
1073 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1074 bufhdr->hdr = arc_fw_hdr;
1075 bufhdr->len = htole16(wbuflen);
1076 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1077 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1078
1079 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1080
1081 do {
1082 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1083 memset(rwbuf, 0, sizeof(rwbuf));
1084 rwlen = (wlen - wdone) % sizeof(rwbuf);
1085 memcpy(rwbuf, &wbuf[wdone], rwlen);
1086
1087 #ifdef ARC_DEBUG
1088 if (arcdebug & ARC_D_DB) {
1089 printf("%s: write %d:",
1090 device_xname(&sc->sc_dev), rwlen);
1091 for (i = 0; i < rwlen; i++)
1092 printf(" 0x%02x", rwbuf[i]);
1093 printf("\n");
1094 }
1095 #endif
1096
1097 /* copy the chunk to the hw */
1098 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1099 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1100 sizeof(rwbuf));
1101
1102 /* say we have a buffer for the hw */
1103 arc_write(sc, ARC_REG_INB_DOORBELL,
1104 ARC_REG_INB_DOORBELL_WRITE_OK);
1105
1106 wdone += rwlen;
1107 }
1108
1109 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1110 arc_wait(sc);
1111 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1112
1113 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1114 device_xname(&sc->sc_dev), reg);
1115
1116 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1117 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1118 if (rwlen > sizeof(rwbuf)) {
1119 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1120 device_xname(&sc->sc_dev));
1121 error = EIO;
1122 goto out;
1123 }
1124
1125 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1126 sizeof(rwbuf));
1127
1128 arc_write(sc, ARC_REG_INB_DOORBELL,
1129 ARC_REG_INB_DOORBELL_READ_OK);
1130
1131 #ifdef ARC_DEBUG
1132 printf("%s: len: %d+%d=%d/%d\n",
1133 device_xname(&sc->sc_dev),
1134 rwlen, rdone, rwlen + rdone, rlen);
1135 if (arcdebug & ARC_D_DB) {
1136 printf("%s: read:",
1137 device_xname(&sc->sc_dev));
1138 for (i = 0; i < rwlen; i++)
1139 printf(" 0x%02x", rwbuf[i]);
1140 printf("\n");
1141 }
1142 #endif
1143
1144 if ((rdone + rwlen) > rlen) {
1145 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1146 device_xname(&sc->sc_dev));
1147 error = EIO;
1148 goto out;
1149 }
1150
1151 memcpy(&rbuf[rdone], rwbuf, rwlen);
1152 rdone += rwlen;
1153 }
1154 } while (rdone != rlen);
1155
1156 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1157 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1158 bufhdr->len != htole16(rbuflen)) {
1159 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1160 device_xname(&sc->sc_dev));
1161 error = EIO;
1162 goto out;
1163 }
1164
1165 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1166
1167 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1168 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1169 device_xname(&sc->sc_dev));
1170 error = EIO;
1171 goto out;
1172 }
1173
1174 out:
1175 free(wbuf, M_TEMP);
1176 free(rbuf, M_TEMP);
1177
1178 return error;
1179 }
1180
1181 void
1182 arc_lock(struct arc_softc *sc)
1183 {
1184 mutex_enter(&sc->sc_mutex);
1185 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1186 sc->sc_talking = 1;
1187 }
1188
1189 void
1190 arc_unlock(struct arc_softc *sc)
1191 {
1192 KASSERT(mutex_owned(&sc->sc_mutex));
1193
1194 sc->sc_talking = 0;
1195 arc_write(sc, ARC_REG_INTRMASK,
1196 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1197 mutex_exit(&sc->sc_mutex);
1198 }
1199
1200 void
1201 arc_wait(struct arc_softc *sc)
1202 {
1203 KASSERT(mutex_owned(&sc->sc_mutex));
1204
1205 arc_write(sc, ARC_REG_INTRMASK,
1206 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1207 if (cv_timedwait_sig(&sc->sc_condvar, &sc->sc_mutex, hz) ==
1208 EWOULDBLOCK)
1209 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1210 }
1211
1212 #if NBIO > 0
1213 static void
1214 arc_create_sensors(void *arg)
1215 {
1216 struct arc_softc *sc = arg;
1217 struct bioc_inq bi;
1218 struct bioc_vol bv;
1219 int i;
1220
1221 memset(&bi, 0, sizeof(bi));
1222 if (arc_bio_inq(sc, &bi) != 0) {
1223 aprint_error("%s: unable to query firmware for sensor info\n",
1224 device_xname(&sc->sc_dev));
1225 kthread_exit(0);
1226 }
1227
1228 sc->sc_nsensors = bi.bi_novol;
1229 /*
1230 * There's no point to continue if there are no drives connected...
1231 */
1232 if (!sc->sc_nsensors)
1233 kthread_exit(0);
1234
1235 sc->sc_sme = sysmon_envsys_create();
1236 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_nsensors,
1237 M_DEVBUF, M_WAITOK | M_ZERO);
1238
1239 for (i = 0; i < sc->sc_nsensors; i++) {
1240 memset(&bv, 0, sizeof(bv));
1241 bv.bv_volid = i;
1242 if (arc_bio_vol(sc, &bv) != 0)
1243 goto bad;
1244
1245 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1246 sc->sc_sensors[i].monitor = true;
1247 sc->sc_sensors[i].flags = ENVSYS_FMONSTCHANGED;
1248 strlcpy(sc->sc_sensors[i].desc, bv.bv_dev,
1249 sizeof(sc->sc_sensors[i].desc));
1250 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1251 goto bad;
1252 }
1253
1254 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1255 sc->sc_sme->sme_cookie = sc;
1256 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1257 if (sysmon_envsys_register(sc->sc_sme)) {
1258 aprint_debug("%s: unable to register with sysmon\n",
1259 device_xname(&sc->sc_dev));
1260 goto bad;
1261 }
1262 kthread_exit(0);
1263
1264 bad:
1265 free(sc->sc_sensors, M_DEVBUF);
1266 sysmon_envsys_destroy(sc->sc_sme);
1267 kthread_exit(0);
1268 }
1269
1270 static void
1271 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1272 {
1273 struct arc_softc *sc = sme->sme_cookie;
1274 struct bioc_vol bv;
1275
1276 memset(&bv, 0, sizeof(bv));
1277 bv.bv_volid = edata->sensor;
1278
1279 if (arc_bio_vol(sc, &bv)) {
1280 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1281 edata->state = ENVSYS_SINVALID;
1282 return;
1283 }
1284
1285 switch (bv.bv_status) {
1286 case BIOC_SVOFFLINE:
1287 edata->value_cur = ENVSYS_DRIVE_FAIL;
1288 edata->state = ENVSYS_SCRITICAL;
1289 break;
1290 case BIOC_SVDEGRADED:
1291 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1292 edata->state = ENVSYS_SCRITICAL;
1293 break;
1294 case BIOC_SVBUILDING:
1295 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1296 edata->state = ENVSYS_SVALID;
1297 break;
1298 case BIOC_SVSCRUB:
1299 case BIOC_SVONLINE:
1300 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1301 edata->state = ENVSYS_SVALID;
1302 break;
1303 case BIOC_SVINVALID:
1304 /* FALLTRHOUGH */
1305 default:
1306 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1307 edata->state = ENVSYS_SINVALID;
1308 break;
1309 }
1310 }
1311 #endif /* NBIO > 0 */
1312
1313 uint32_t
1314 arc_read(struct arc_softc *sc, bus_size_t r)
1315 {
1316 uint32_t v;
1317
1318 KASSERT(mutex_owned(&sc->sc_mutex));
1319
1320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1321 BUS_SPACE_BARRIER_READ);
1322 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1323
1324 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1325 device_xname(&sc->sc_dev), r, v);
1326
1327 return v;
1328 }
1329
1330 void
1331 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1332 {
1333 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1334 BUS_SPACE_BARRIER_READ);
1335 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1336 (uint32_t *)buf, len >> 2);
1337 }
1338
1339 void
1340 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1341 {
1342 KASSERT(mutex_owned(&sc->sc_mutex));
1343
1344 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1345 device_xname(&sc->sc_dev), r, v);
1346
1347 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1348 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1349 BUS_SPACE_BARRIER_WRITE);
1350 }
1351
1352 void
1353 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1354 {
1355 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1356 (const uint32_t *)buf, len >> 2);
1357 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1358 BUS_SPACE_BARRIER_WRITE);
1359 }
1360
1361 int
1362 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1363 uint32_t target)
1364 {
1365 int i;
1366
1367 KASSERT(mutex_owned(&sc->sc_mutex));
1368
1369 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1370 device_xname(&sc->sc_dev), r, mask, target);
1371
1372 for (i = 0; i < 10000; i++) {
1373 if ((arc_read(sc, r) & mask) == target)
1374 return 0;
1375 delay(1000);
1376 }
1377
1378 return 1;
1379 }
1380
1381 int
1382 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1383 uint32_t target)
1384 {
1385 int i;
1386
1387 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1388 device_xname(&sc->sc_dev), r, mask, target);
1389
1390 for (i = 0; i < 10000; i++) {
1391 if ((arc_read(sc, r) & mask) != target)
1392 return 0;
1393 delay(1000);
1394 }
1395
1396 return 1;
1397 }
1398
1399 int
1400 arc_msg0(struct arc_softc *sc, uint32_t m)
1401 {
1402 KASSERT(mutex_owned(&sc->sc_mutex));
1403
1404 /* post message */
1405 arc_write(sc, ARC_REG_INB_MSG0, m);
1406 /* wait for the fw to do it */
1407 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1408 ARC_REG_INTRSTAT_MSG0) != 0)
1409 return 1;
1410
1411 /* ack it */
1412 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1413
1414 return 0;
1415 }
1416
1417 struct arc_dmamem *
1418 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1419 {
1420 struct arc_dmamem *adm;
1421 int nsegs;
1422
1423 adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT|M_ZERO);
1424 if (adm == NULL)
1425 return NULL;
1426
1427 adm->adm_size = size;
1428
1429 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1430 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1431 goto admfree;
1432
1433 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1434 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1435 goto destroy;
1436
1437 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1438 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1439 goto free;
1440
1441 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1442 NULL, BUS_DMA_NOWAIT) != 0)
1443 goto unmap;
1444
1445 memset(adm->adm_kva, 0, size);
1446
1447 return adm;
1448
1449 unmap:
1450 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1451 free:
1452 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1453 destroy:
1454 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1455 admfree:
1456 free(adm, M_DEVBUF);
1457
1458 return NULL;
1459 }
1460
1461 void
1462 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1463 {
1464 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1465 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1466 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1467 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1468 free(adm, M_DEVBUF);
1469 }
1470
1471 int
1472 arc_alloc_ccbs(struct arc_softc *sc)
1473 {
1474 struct arc_ccb *ccb;
1475 uint8_t *cmd;
1476 int i;
1477
1478 TAILQ_INIT(&sc->sc_ccb_free);
1479
1480 sc->sc_ccbs = malloc(sizeof(struct arc_ccb) * sc->sc_req_count,
1481 M_DEVBUF, M_WAITOK|M_ZERO);
1482
1483 sc->sc_requests = arc_dmamem_alloc(sc,
1484 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1485 if (sc->sc_requests == NULL) {
1486 aprint_error("%s: unable to allocate ccb dmamem\n",
1487 device_xname(&sc->sc_dev));
1488 goto free_ccbs;
1489 }
1490 cmd = ARC_DMA_KVA(sc->sc_requests);
1491
1492 for (i = 0; i < sc->sc_req_count; i++) {
1493 ccb = &sc->sc_ccbs[i];
1494
1495 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1496 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1497 aprint_error("%s: unable to create dmamap for ccb %d\n",
1498 device_xname(&sc->sc_dev), i);
1499 goto free_maps;
1500 }
1501
1502 ccb->ccb_sc = sc;
1503 ccb->ccb_id = i;
1504 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1505
1506 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1507 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1508 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1509
1510 arc_put_ccb(sc, ccb);
1511 }
1512
1513 return 0;
1514
1515 free_maps:
1516 while ((ccb = arc_get_ccb(sc)) != NULL)
1517 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1518 arc_dmamem_free(sc, sc->sc_requests);
1519
1520 free_ccbs:
1521 free(sc->sc_ccbs, M_DEVBUF);
1522
1523 return 1;
1524 }
1525
1526 struct arc_ccb *
1527 arc_get_ccb(struct arc_softc *sc)
1528 {
1529 struct arc_ccb *ccb;
1530
1531 mutex_enter(&sc->sc_mutex);
1532 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1533 if (ccb != NULL)
1534 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1535 mutex_exit(&sc->sc_mutex);
1536
1537 return ccb;
1538 }
1539
1540 void
1541 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1542 {
1543 mutex_enter(&sc->sc_mutex);
1544 ccb->ccb_xs = NULL;
1545 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1546 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1547 mutex_exit(&sc->sc_mutex);
1548 }
1549