arcmsr.c revision 1.7 1 /* $NetBSD: arcmsr.c,v 1.7 2007/12/07 08:50:36 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.7 2007/12/07 08:50:36 xtraeme Exp $");
24
25 #include <sys/param.h>
26 #include <sys/buf.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/kmem.h>
31 #include <sys/kthread.h>
32 #include <sys/mutex.h>
33 #include <sys/condvar.h>
34 #include <sys/rwlock.h>
35
36 #if NBIO > 0
37 #include <sys/ioctl.h>
38 #include <dev/biovar.h>
39 #endif
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44
45 #include <dev/scsipi/scsipi_all.h>
46 #include <dev/scsipi/scsi_all.h>
47 #include <dev/scsipi/scsiconf.h>
48
49 #include <dev/sysmon/sysmonvar.h>
50
51 #include <sys/bus.h>
52
53 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
54
55 #include <dev/pci/arcmsrvar.h>
56
57 /* #define ARC_DEBUG */
58 #ifdef ARC_DEBUG
59 #define ARC_D_INIT (1<<0)
60 #define ARC_D_RW (1<<1)
61 #define ARC_D_DB (1<<2)
62
63 int arcdebug = 0;
64
65 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
66 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
67
68 #else
69 #define DPRINTF(p...) /* p */
70 #define DNPRINTF(n, p...) /* n, p */
71 #endif
72
73 /*
74 * the fw header must always equal this.
75 */
76 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
77
78 /*
79 * autoconf(9) glue.
80 */
81 static int arc_match(device_t, struct cfdata *, void *);
82 static void arc_attach(device_t, device_t, void *);
83 static int arc_detach(device_t, int);
84 static void arc_shutdown(void *);
85 static int arc_intr(void *);
86 static void arc_minphys(struct buf *);
87
88 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
89 arc_match, arc_attach, arc_detach, NULL);
90
91 /*
92 * bio(4) and sysmon_envsys(9) glue.
93 */
94 #if NBIO > 0
95 static int arc_bioctl(struct device *, u_long, void *);
96 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
97 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
98 static int arc_bio_disk(struct arc_softc *, struct bioc_disk *);
99 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
100 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
101 static int arc_bio_getvol(struct arc_softc *, int,
102 struct arc_fw_volinfo *);
103 static void arc_create_sensors(void *);
104 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
105 #endif
106
107 static int
108 arc_match(device_t parent, struct cfdata *match, void *aux)
109 {
110 struct pci_attach_args *pa = aux;
111
112 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
113 switch (PCI_PRODUCT(pa->pa_id)) {
114 case PCI_PRODUCT_ARECA_ARC1110:
115 case PCI_PRODUCT_ARECA_ARC1120:
116 case PCI_PRODUCT_ARECA_ARC1130:
117 case PCI_PRODUCT_ARECA_ARC1160:
118 case PCI_PRODUCT_ARECA_ARC1170:
119 case PCI_PRODUCT_ARECA_ARC1200:
120 case PCI_PRODUCT_ARECA_ARC1202:
121 case PCI_PRODUCT_ARECA_ARC1210:
122 case PCI_PRODUCT_ARECA_ARC1220:
123 case PCI_PRODUCT_ARECA_ARC1230:
124 case PCI_PRODUCT_ARECA_ARC1260:
125 case PCI_PRODUCT_ARECA_ARC1270:
126 case PCI_PRODUCT_ARECA_ARC1280:
127 case PCI_PRODUCT_ARECA_ARC1380:
128 case PCI_PRODUCT_ARECA_ARC1381:
129 case PCI_PRODUCT_ARECA_ARC1680:
130 case PCI_PRODUCT_ARECA_ARC1681:
131 return 1;
132 default:
133 break;
134 }
135 }
136
137 return 0;
138 }
139
140 static void
141 arc_attach(device_t parent, device_t self, void *aux)
142 {
143 struct arc_softc *sc = device_private(self);
144 struct pci_attach_args *pa = aux;
145 struct scsipi_adapter *adapt = &sc->sc_adapter;
146 struct scsipi_channel *chan = &sc->sc_chan;
147
148 sc->sc_talking = 0;
149 rw_init(&sc->sc_rwlock);
150 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
151 cv_init(&sc->sc_condvar, "arcdb");
152
153 if (arc_map_pci_resources(sc, pa) != 0) {
154 /* error message printed by arc_map_pci_resources */
155 return;
156 }
157
158 if (arc_query_firmware(sc) != 0) {
159 /* error message printed by arc_query_firmware */
160 goto unmap_pci;
161 }
162
163 if (arc_alloc_ccbs(sc) != 0) {
164 /* error message printed by arc_alloc_ccbs */
165 goto unmap_pci;
166 }
167
168 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
169 if (sc->sc_shutdownhook == NULL)
170 panic("unable to establish arc powerhook");
171
172 memset(adapt, 0, sizeof(*adapt));
173 adapt->adapt_dev = self;
174 adapt->adapt_nchannels = 1;
175 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
176 adapt->adapt_max_periph = adapt->adapt_openings;
177 adapt->adapt_minphys = arc_minphys;
178 adapt->adapt_request = arc_scsi_cmd;
179
180 memset(chan, 0, sizeof(*chan));
181 chan->chan_adapter = adapt;
182 chan->chan_bustype = &scsi_bustype;
183 chan->chan_nluns = ARC_MAX_LUN;
184 chan->chan_ntargets = ARC_MAX_TARGET;
185 chan->chan_id = ARC_MAX_TARGET;
186 chan->chan_channel = 0;
187 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
188
189 (void)config_found(self, &sc->sc_chan, scsiprint);
190
191 /* enable interrupts */
192 arc_write(sc, ARC_REG_INTRMASK,
193 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
194
195 #if NBIO > 0
196 /*
197 * Register the driver to bio(4) and setup the sensors.
198 */
199 if (bio_register(self, arc_bioctl) != 0)
200 panic("%s: bioctl registration failed\n", device_xname(self));
201
202 /*
203 * you need to talk to the firmware to get volume info. our firmware
204 * interface relies on being able to sleep, so we need to use a thread
205 * to do the work.
206 */
207 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
208 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
209 panic("%s: unable to create a kernel thread for sensors\n",
210 device_xname(self));
211 #endif
212
213 return;
214
215 unmap_pci:
216 arc_unmap_pci_resources(sc);
217 }
218
219 static int
220 arc_detach(device_t self, int flags)
221 {
222 struct arc_softc *sc = device_private(self);
223
224 shutdownhook_disestablish(sc->sc_shutdownhook);
225
226 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
227 aprint_error("%s: timeout waiting to stop bg rebuild\n",
228 device_xname(&sc->sc_dev));
229
230 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
231 aprint_error("%s: timeout waiting to flush cache\n",
232 device_xname(&sc->sc_dev));
233
234 return 0;
235 }
236
237 static void
238 arc_shutdown(void *xsc)
239 {
240 struct arc_softc *sc = xsc;
241
242 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
243 aprint_error("%s: timeout waiting to stop bg rebuild\n",
244 device_xname(&sc->sc_dev));
245
246 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
247 aprint_error("%s: timeout waiting to flush cache\n",
248 device_xname(&sc->sc_dev));
249 }
250
251 static void
252 arc_minphys(struct buf *bp)
253 {
254 if (bp->b_bcount > MAXPHYS)
255 bp->b_bcount = MAXPHYS;
256 minphys(bp);
257 }
258
259 static int
260 arc_intr(void *arg)
261 {
262 struct arc_softc *sc = arg;
263 struct arc_ccb *ccb = NULL;
264 char *kva = ARC_DMA_KVA(sc->sc_requests);
265 struct arc_io_cmd *cmd;
266 uint32_t reg, intrstat;
267
268 mutex_spin_enter(&sc->sc_mutex);
269 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
270 if (intrstat == 0x0) {
271 mutex_spin_exit(&sc->sc_mutex);
272 return 0;
273 }
274
275 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
276 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
277
278 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
279 if (sc->sc_talking) {
280 /* if an ioctl is talking, wake it up */
281 arc_write(sc, ARC_REG_INTRMASK,
282 ~ARC_REG_INTRMASK_POSTQUEUE);
283 cv_broadcast(&sc->sc_condvar);
284 } else {
285 /* otherwise drop it */
286 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
287 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
288 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
289 arc_write(sc, ARC_REG_INB_DOORBELL,
290 ARC_REG_INB_DOORBELL_READ_OK);
291 }
292 }
293 mutex_spin_exit(&sc->sc_mutex);
294
295 while ((reg = arc_pop(sc)) != 0xffffffff) {
296 cmd = (struct arc_io_cmd *)(kva +
297 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
298 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
299 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
300
301 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
302 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
303 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
304
305 arc_scsi_cmd_done(sc, ccb, reg);
306 }
307
308 return 1;
309 }
310
311 void
312 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
313 {
314 struct scsipi_periph *periph;
315 struct scsipi_xfer *xs;
316 struct scsipi_adapter *adapt = chan->chan_adapter;
317 struct arc_softc *sc = device_private(adapt->adapt_dev);
318 struct arc_ccb *ccb;
319 struct arc_msg_scsicmd *cmd;
320 uint32_t reg;
321 uint8_t target;
322
323 switch (req) {
324 case ADAPTER_REQ_GROW_RESOURCES:
325 /* Not supported. */
326 return;
327 case ADAPTER_REQ_SET_XFER_MODE:
328 /* Not supported. */
329 return;
330 case ADAPTER_REQ_RUN_XFER:
331 break;
332 }
333
334 mutex_spin_enter(&sc->sc_mutex);
335
336 xs = arg;
337 periph = xs->xs_periph;
338 target = periph->periph_target;
339
340 if (xs->cmdlen > ARC_MSG_CDBLEN) {
341 memset(&xs->sense, 0, sizeof(xs->sense));
342 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
343 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
344 xs->sense.scsi_sense.asc = 0x20;
345 xs->error = XS_SENSE;
346 xs->status = SCSI_CHECK;
347 mutex_spin_exit(&sc->sc_mutex);
348 scsipi_done(xs);
349 return;
350 }
351
352 ccb = arc_get_ccb(sc);
353 if (ccb == NULL) {
354 xs->error = XS_RESOURCE_SHORTAGE;
355 mutex_spin_exit(&sc->sc_mutex);
356 scsipi_done(xs);
357 return;
358 }
359
360 ccb->ccb_xs = xs;
361
362 if (arc_load_xs(ccb) != 0) {
363 xs->error = XS_DRIVER_STUFFUP;
364 arc_put_ccb(sc, ccb);
365 mutex_spin_exit(&sc->sc_mutex);
366 scsipi_done(xs);
367 return;
368 }
369
370 cmd = &ccb->ccb_cmd->cmd;
371 reg = ccb->ccb_cmd_post;
372
373 /* bus is always 0 */
374 cmd->target = target;
375 cmd->lun = periph->periph_lun;
376 cmd->function = 1; /* XXX magic number */
377
378 cmd->cdb_len = xs->cmdlen;
379 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
380 if (xs->xs_control & XS_CTL_DATA_OUT)
381 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
382 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
383 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
384 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
385 }
386
387 cmd->context = htole32(ccb->ccb_id);
388 cmd->data_len = htole32(xs->datalen);
389
390 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
391
392 /* we've built the command, let's put it on the hw */
393 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
394 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
395 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
396
397 arc_push(sc, reg);
398 if (xs->xs_control & XS_CTL_POLL) {
399 if (arc_complete(sc, ccb, xs->timeout) != 0) {
400 xs->error = XS_DRIVER_STUFFUP;
401 mutex_spin_exit(&sc->sc_mutex);
402 scsipi_done(xs);
403 return;
404 }
405 }
406
407 mutex_spin_exit(&sc->sc_mutex);
408 }
409
410 int
411 arc_load_xs(struct arc_ccb *ccb)
412 {
413 struct arc_softc *sc = ccb->ccb_sc;
414 struct scsipi_xfer *xs = ccb->ccb_xs;
415 bus_dmamap_t dmap = ccb->ccb_dmamap;
416 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
417 uint64_t addr;
418 int i, error;
419
420 if (xs->datalen == 0)
421 return 0;
422
423 error = bus_dmamap_load(sc->sc_dmat, dmap,
424 xs->data, xs->datalen, NULL,
425 (xs->xs_control & XS_CTL_NOSLEEP) ?
426 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
427 if (error != 0) {
428 aprint_error("%s: error %d loading dmamap\n",
429 device_xname(&sc->sc_dev), error);
430 return 1;
431 }
432
433 for (i = 0; i < dmap->dm_nsegs; i++) {
434 sge = &sgl[i];
435
436 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
437 addr = dmap->dm_segs[i].ds_addr;
438 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
439 sge->sg_lo_addr = htole32((uint32_t)addr);
440 }
441
442 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
443 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
444 BUS_DMASYNC_PREWRITE);
445
446 return 0;
447 }
448
449 void
450 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
451 {
452 struct scsipi_xfer *xs = ccb->ccb_xs;
453 struct arc_msg_scsicmd *cmd;
454
455 if (xs->datalen != 0) {
456 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
457 ccb->ccb_dmamap->dm_mapsize,
458 (xs->xs_control & XS_CTL_DATA_IN) ?
459 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
460 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
461 }
462
463 /* timeout_del */
464 xs->status |= XS_STS_DONE;
465
466 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
467 cmd = &ccb->ccb_cmd->cmd;
468
469 switch (cmd->status) {
470 case ARC_MSG_STATUS_SELTIMEOUT:
471 case ARC_MSG_STATUS_ABORTED:
472 case ARC_MSG_STATUS_INIT_FAIL:
473 xs->status = SCSI_OK;
474 xs->error = XS_SELTIMEOUT;
475 break;
476
477 case SCSI_CHECK:
478 memset(&xs->sense, 0, sizeof(xs->sense));
479 memcpy(&xs->sense, cmd->sense_data,
480 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
481 xs->sense.scsi_sense.response_code =
482 SSD_RCODE_VALID | 0x70;
483 xs->status = SCSI_CHECK;
484 xs->error = XS_SENSE;
485 xs->resid = 0;
486 break;
487
488 default:
489 /* unknown device status */
490 xs->error = XS_BUSY; /* try again later? */
491 xs->status = SCSI_BUSY;
492 break;
493 }
494 } else {
495 xs->status = SCSI_OK;
496 xs->error = XS_NOERROR;
497 xs->resid = 0;
498 }
499
500 arc_put_ccb(sc, ccb);
501 scsipi_done(xs);
502 }
503
504 int
505 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
506 {
507 struct arc_ccb *ccb = NULL;
508 char *kva = ARC_DMA_KVA(sc->sc_requests);
509 struct arc_io_cmd *cmd;
510 uint32_t reg;
511
512 do {
513 reg = arc_pop(sc);
514 if (reg == 0xffffffff) {
515 if (timeout-- == 0)
516 return 1;
517
518 delay(1000);
519 continue;
520 }
521
522 cmd = (struct arc_io_cmd *)(kva +
523 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
524 ARC_DMA_DVA(sc->sc_requests)));
525 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
526
527 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
528 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
530
531 arc_scsi_cmd_done(sc, ccb, reg);
532 } while (nccb != ccb);
533
534 return 0;
535 }
536
537 int
538 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
539 {
540 pcireg_t memtype;
541 pci_intr_handle_t ih;
542
543 sc->sc_pc = pa->pa_pc;
544 sc->sc_tag = pa->pa_tag;
545 sc->sc_dmat = pa->pa_dmat;
546
547 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
548 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
549 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
550 aprint_error(": unable to map system interface register\n");
551 return 1;
552 }
553
554 if (pci_intr_map(pa, &ih) != 0) {
555 aprint_error(": unable to map interrupt\n");
556 goto unmap;
557 }
558
559 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
560 arc_intr, sc);
561 if (sc->sc_ih == NULL) {
562 aprint_error(": unable to map interrupt [2]\n");
563 goto unmap;
564 }
565 aprint_normal(": interrupting at %s\n",
566 pci_intr_string(pa->pa_pc, ih));
567
568 return 0;
569
570 unmap:
571 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
572 sc->sc_ios = 0;
573 return 1;
574 }
575
576 void
577 arc_unmap_pci_resources(struct arc_softc *sc)
578 {
579 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
580 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
581 sc->sc_ios = 0;
582 }
583
584 int
585 arc_query_firmware(struct arc_softc *sc)
586 {
587 struct arc_msg_firmware_info fwinfo;
588 char string[81]; /* sizeof(vendor)*2+1 */
589
590 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
591 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
592 aprint_debug("%s: timeout waiting for firmware ok\n",
593 device_xname(&sc->sc_dev));
594 return 1;
595 }
596
597 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
598 aprint_debug("%s: timeout waiting for get config\n",
599 device_xname(&sc->sc_dev));
600 return 1;
601 }
602
603 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
604 aprint_debug("%s: timeout waiting to start bg rebuild\n",
605 device_xname(&sc->sc_dev));
606 return 1;
607 }
608
609 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
610
611 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
612 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
613
614 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
615 aprint_error("%s: invalid firmware info from iop\n",
616 device_xname(&sc->sc_dev));
617 return 1;
618 }
619
620 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
621 device_xname(&sc->sc_dev),
622 htole32(fwinfo.request_len));
623 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
624 device_xname(&sc->sc_dev),
625 htole32(fwinfo.queue_len));
626 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
627 device_xname(&sc->sc_dev),
628 htole32(fwinfo.sdram_size));
629 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
630 device_xname(&sc->sc_dev),
631 htole32(fwinfo.sata_ports));
632
633 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
634 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
635 device_xname(&sc->sc_dev), string);
636
637 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
638
639 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
640 device_xname(&sc->sc_dev), string);
641
642 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
643 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
644 device_xname(&sc->sc_dev), string);
645
646 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
647 aprint_error("%s: unexpected request frame size (%d != %d)\n",
648 device_xname(&sc->sc_dev),
649 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
650 return 1;
651 }
652
653 sc->sc_req_count = htole32(fwinfo.queue_len);
654
655 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
656 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
657 htole32(fwinfo.sdram_size), string);
658
659 return 0;
660 }
661
662 #if NBIO > 0
663 static int
664 arc_bioctl(struct device *self, u_long cmd, void *addr)
665 {
666 struct arc_softc *sc = device_private(self);
667 int error = 0;
668
669 switch (cmd) {
670 case BIOCINQ:
671 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
672 break;
673
674 case BIOCVOL:
675 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
676 break;
677
678 case BIOCDISK:
679 error = arc_bio_disk(sc, (struct bioc_disk *)addr);
680 break;
681
682 case BIOCALARM:
683 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
684 break;
685
686 default:
687 error = ENOTTY;
688 break;
689 }
690
691 return error;
692 }
693
694 static int
695 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
696 {
697 uint8_t request[2], reply[1];
698 size_t len;
699 int error = 0;
700
701 switch (ba->ba_opcode) {
702 case BIOC_SAENABLE:
703 case BIOC_SADISABLE:
704 request[0] = ARC_FW_SET_ALARM;
705 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
706 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
707 len = sizeof(request);
708
709 break;
710
711 case BIOC_SASILENCE:
712 request[0] = ARC_FW_MUTE_ALARM;
713 len = 1;
714
715 break;
716
717 case BIOC_GASTATUS:
718 /* system info is too big/ugly to deal with here */
719 return arc_bio_alarm_state(sc, ba);
720
721 default:
722 return EOPNOTSUPP;
723 }
724
725 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
726 if (error != 0)
727 return error;
728
729 switch (reply[0]) {
730 case ARC_FW_CMD_OK:
731 return 0;
732 case ARC_FW_CMD_PASS_REQD:
733 return EPERM;
734 default:
735 return EIO;
736 }
737 }
738
739 static int
740 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
741 {
742 uint8_t request = ARC_FW_SYSINFO;
743 struct arc_fw_sysinfo *sysinfo;
744 int error = 0;
745
746 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
747
748 request = ARC_FW_SYSINFO;
749 error = arc_msgbuf(sc, &request, sizeof(request),
750 sysinfo, sizeof(struct arc_fw_sysinfo));
751
752 if (error != 0)
753 goto out;
754
755 ba->ba_status = sysinfo->alarm;
756
757 out:
758 kmem_free(sysinfo, sizeof(*sysinfo));
759 return error;
760 }
761
762
763 static int
764 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
765 {
766 uint8_t request[2];
767 struct arc_fw_sysinfo *sysinfo;
768 struct arc_fw_volinfo *volinfo;
769 int maxvols, nvols = 0, i;
770 int error = 0;
771
772 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
773 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
774
775 request[0] = ARC_FW_SYSINFO;
776 error = arc_msgbuf(sc, request, 1, sysinfo,
777 sizeof(struct arc_fw_sysinfo));
778 if (error != 0)
779 goto out;
780
781 maxvols = sysinfo->max_volume_set;
782
783 request[0] = ARC_FW_VOLINFO;
784 for (i = 0; i < maxvols; i++) {
785 request[1] = i;
786 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
787 sizeof(struct arc_fw_volinfo));
788 if (error != 0)
789 goto out;
790
791 /*
792 * I can't find an easy way to see if the volume exists or not
793 * except to say that if it has no capacity then it isn't there.
794 * Ignore passthru volumes, bioc_vol doesn't understand them.
795 */
796 if ((volinfo->capacity != 0 || volinfo->capacity2 != 0) &&
797 volinfo->raid_level != ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
798 nvols++;
799 }
800
801 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
802 bi->bi_novol = nvols;
803 out:
804 kmem_free(volinfo, sizeof(*volinfo));
805 kmem_free(sysinfo, sizeof(*sysinfo));
806 return error;
807 }
808
809 static int
810 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
811 {
812 uint8_t request[2];
813 struct arc_fw_sysinfo *sysinfo;
814 int error = 0;
815 int maxvols, nvols = 0, i;
816
817 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
818
819 request[0] = ARC_FW_SYSINFO;
820
821 error = arc_msgbuf(sc, request, 1, sysinfo,
822 sizeof(struct arc_fw_sysinfo));
823 if (error != 0)
824 goto out;
825
826 maxvols = sysinfo->max_volume_set;
827
828 request[0] = ARC_FW_VOLINFO;
829 for (i = 0; i < maxvols; i++) {
830 request[1] = i;
831 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
832 sizeof(struct arc_fw_volinfo));
833 if (error != 0)
834 goto out;
835
836 if ((volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
837 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
838 continue;
839
840 if (nvols == vol)
841 break;
842
843 nvols++;
844 }
845
846 if (nvols != vol ||
847 (volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
848 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU) {
849 error = ENODEV;
850 goto out;
851 }
852
853 out:
854 kmem_free(sysinfo, sizeof(*sysinfo));
855 return error;
856 }
857
858 static int
859 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
860 {
861 struct arc_fw_volinfo *volinfo;
862 uint64_t blocks;
863 uint32_t status;
864 int error = 0;
865
866 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
867
868 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
869 if (error != 0)
870 goto out;
871
872 bv->bv_percent = -1;
873 bv->bv_seconds = 0;
874
875 status = htole32(volinfo->volume_status);
876 if (status == 0x0) {
877 if (htole32(volinfo->fail_mask) == 0x0)
878 bv->bv_status = BIOC_SVONLINE;
879 else
880 bv->bv_status = BIOC_SVDEGRADED;
881 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
882 bv->bv_status = BIOC_SVDEGRADED;
883 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
884 bv->bv_status = BIOC_SVOFFLINE;
885 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
886 bv->bv_status = BIOC_SVBUILDING;
887 bv->bv_percent = htole32(volinfo->progress) / 10;
888 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
889 bv->bv_status = BIOC_SVREBUILD;
890 bv->bv_percent = htole32(volinfo->progress) / 10;
891 }
892
893 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
894 blocks += (uint64_t)htole32(volinfo->capacity);
895 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
896
897 switch (volinfo->raid_level) {
898 case ARC_FW_VOL_RAIDLEVEL_0:
899 bv->bv_level = 0;
900 break;
901 case ARC_FW_VOL_RAIDLEVEL_1:
902 bv->bv_level = 1;
903 break;
904 case ARC_FW_VOL_RAIDLEVEL_3:
905 bv->bv_level = 3;
906 break;
907 case ARC_FW_VOL_RAIDLEVEL_5:
908 bv->bv_level = 5;
909 break;
910 case ARC_FW_VOL_RAIDLEVEL_6:
911 bv->bv_level = 6;
912 break;
913 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
914 default:
915 bv->bv_level = -1;
916 break;
917 }
918
919 bv->bv_nodisk = volinfo->member_disks;
920 strlcpy(bv->bv_dev, volinfo->set_name, sizeof(bv->bv_dev));
921
922 out:
923 kmem_free(volinfo, sizeof(*volinfo));
924 return error;
925 }
926
927 static int
928 arc_bio_disk(struct arc_softc *sc, struct bioc_disk *bd)
929 {
930 uint8_t request[2];
931 struct arc_fw_volinfo *volinfo;
932 struct arc_fw_raidinfo *raidinfo;
933 struct arc_fw_diskinfo *diskinfo;
934 int error = 0;
935 uint64_t blocks;
936 char model[81];
937 char serial[41];
938 char rev[17];
939
940 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
941 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
942 diskinfo = kmem_zalloc(sizeof(struct arc_fw_diskinfo), KM_SLEEP);
943
944 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
945 if (error != 0)
946 goto out;
947
948 request[0] = ARC_FW_RAIDINFO;
949 request[1] = volinfo->raid_set_number;
950
951 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
952 sizeof(struct arc_fw_raidinfo));
953 if (error != 0)
954 goto out;
955
956 if (bd->bd_diskid > raidinfo->member_devices) {
957 error = ENODEV;
958 goto out;
959 }
960
961 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
962 /*
963 * the disk doesn't exist anymore. bio is too dumb to be
964 * able to display that, so put it on another bus
965 */
966 bd->bd_channel = 1;
967 bd->bd_target = 0;
968 bd->bd_lun = 0;
969 bd->bd_status = BIOC_SDOFFLINE;
970 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
971 goto out;
972 }
973
974 request[0] = ARC_FW_DISKINFO;
975 request[1] = raidinfo->device_array[bd->bd_diskid];
976 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
977 sizeof(struct arc_fw_diskinfo));
978 if (error != 0)
979 goto out;
980
981 #if 0
982 bd->bd_channel = diskinfo->scsi_attr.channel;
983 bd->bd_target = diskinfo->scsi_attr.target;
984 bd->bd_lun = diskinfo->scsi_attr.lun;
985 #endif
986 /*
987 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
988 * the diskid.
989 */
990 bd->bd_channel = 0;
991 bd->bd_target = raidinfo->device_array[bd->bd_diskid];
992 bd->bd_lun = 0;
993
994 bd->bd_status = BIOC_SDONLINE;
995 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
996 blocks += (uint64_t)htole32(diskinfo->capacity);
997 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
998
999 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1000 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1001 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1002 sizeof(diskinfo->firmware_rev));
1003
1004 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1005 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1006
1007 out:
1008 kmem_free(diskinfo, sizeof(*diskinfo));
1009 kmem_free(raidinfo, sizeof(*raidinfo));
1010 kmem_free(volinfo, sizeof(*volinfo));
1011 return error;
1012 }
1013 #endif /* NBIO > 0 */
1014
1015 uint8_t
1016 arc_msg_cksum(void *cmd, uint16_t len)
1017 {
1018 uint8_t *buf = cmd;
1019 uint8_t cksum;
1020 int i;
1021
1022 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1023 for (i = 0; i < len; i++)
1024 cksum += buf[i];
1025
1026 return cksum;
1027 }
1028
1029
1030 int
1031 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1032 size_t rbuflen)
1033 {
1034 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1035 uint8_t *wbuf, *rbuf;
1036 int wlen, wdone = 0, rlen, rdone = 0;
1037 struct arc_fw_bufhdr *bufhdr;
1038 uint32_t reg, rwlen;
1039 int error = 0;
1040 #ifdef ARC_DEBUG
1041 int i;
1042 #endif
1043
1044 wbuf = rbuf = NULL;
1045
1046 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1047 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1048
1049 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1050 wbuf = kmem_alloc(wlen, KM_SLEEP);
1051
1052 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1053 rbuf = kmem_alloc(rlen, KM_SLEEP);
1054
1055 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1056 device_xname(&sc->sc_dev), wlen, rlen);
1057
1058 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1059 bufhdr->hdr = arc_fw_hdr;
1060 bufhdr->len = htole16(wbuflen);
1061 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1062 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1063
1064 arc_lock(sc);
1065 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1066 error = EBUSY;
1067 goto out;
1068 }
1069
1070 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1071
1072 do {
1073 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1074 memset(rwbuf, 0, sizeof(rwbuf));
1075 rwlen = (wlen - wdone) % sizeof(rwbuf);
1076 memcpy(rwbuf, &wbuf[wdone], rwlen);
1077
1078 #ifdef ARC_DEBUG
1079 if (arcdebug & ARC_D_DB) {
1080 printf("%s: write %d:",
1081 device_xname(&sc->sc_dev), rwlen);
1082 for (i = 0; i < rwlen; i++)
1083 printf(" 0x%02x", rwbuf[i]);
1084 printf("\n");
1085 }
1086 #endif
1087
1088 /* copy the chunk to the hw */
1089 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1090 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1091 sizeof(rwbuf));
1092
1093 /* say we have a buffer for the hw */
1094 arc_write(sc, ARC_REG_INB_DOORBELL,
1095 ARC_REG_INB_DOORBELL_WRITE_OK);
1096
1097 wdone += rwlen;
1098 }
1099
1100 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1101 arc_wait(sc);
1102 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1103
1104 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1105 device_xname(&sc->sc_dev), reg);
1106
1107 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1108 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1109 if (rwlen > sizeof(rwbuf)) {
1110 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1111 device_xname(&sc->sc_dev));
1112 error = EIO;
1113 goto out;
1114 }
1115
1116 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1117 sizeof(rwbuf));
1118
1119 arc_write(sc, ARC_REG_INB_DOORBELL,
1120 ARC_REG_INB_DOORBELL_READ_OK);
1121
1122 #ifdef ARC_DEBUG
1123 printf("%s: len: %d+%d=%d/%d\n",
1124 device_xname(&sc->sc_dev),
1125 rwlen, rdone, rwlen + rdone, rlen);
1126 if (arcdebug & ARC_D_DB) {
1127 printf("%s: read:",
1128 device_xname(&sc->sc_dev));
1129 for (i = 0; i < rwlen; i++)
1130 printf(" 0x%02x", rwbuf[i]);
1131 printf("\n");
1132 }
1133 #endif
1134
1135 if ((rdone + rwlen) > rlen) {
1136 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1137 device_xname(&sc->sc_dev));
1138 error = EIO;
1139 goto out;
1140 }
1141
1142 memcpy(&rbuf[rdone], rwbuf, rwlen);
1143 rdone += rwlen;
1144 }
1145 } while (rdone != rlen);
1146
1147 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1148 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1149 bufhdr->len != htole16(rbuflen)) {
1150 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1151 device_xname(&sc->sc_dev));
1152 error = EIO;
1153 goto out;
1154 }
1155
1156 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1157
1158 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1159 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1160 device_xname(&sc->sc_dev));
1161 error = EIO;
1162 goto out;
1163 }
1164
1165 out:
1166 arc_unlock(sc);
1167 kmem_free(wbuf, wlen);
1168 kmem_free(rbuf, rlen);
1169
1170 return error;
1171 }
1172
1173 void
1174 arc_lock(struct arc_softc *sc)
1175 {
1176 rw_enter(&sc->sc_rwlock, RW_WRITER);
1177 mutex_spin_enter(&sc->sc_mutex);
1178 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1179 sc->sc_talking = 1;
1180 }
1181
1182 void
1183 arc_unlock(struct arc_softc *sc)
1184 {
1185 KASSERT(mutex_owned(&sc->sc_mutex));
1186
1187 sc->sc_talking = 0;
1188 arc_write(sc, ARC_REG_INTRMASK,
1189 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1190 mutex_spin_exit(&sc->sc_mutex);
1191 rw_exit(&sc->sc_rwlock);
1192 }
1193
1194 void
1195 arc_wait(struct arc_softc *sc)
1196 {
1197 KASSERT(mutex_owned(&sc->sc_mutex));
1198
1199 arc_write(sc, ARC_REG_INTRMASK,
1200 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1201 if (cv_timedwait_sig(&sc->sc_condvar, &sc->sc_mutex, hz) ==
1202 EWOULDBLOCK)
1203 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1204 }
1205
1206 #if NBIO > 0
1207 static void
1208 arc_create_sensors(void *arg)
1209 {
1210 struct arc_softc *sc = arg;
1211 struct bioc_inq bi;
1212 struct bioc_vol bv;
1213 int i;
1214 size_t slen;
1215
1216 memset(&bi, 0, sizeof(bi));
1217 if (arc_bio_inq(sc, &bi) != 0) {
1218 aprint_error("%s: unable to query firmware for sensor info\n",
1219 device_xname(&sc->sc_dev));
1220 kthread_exit(0);
1221 }
1222
1223 sc->sc_nsensors = bi.bi_novol;
1224 /*
1225 * There's no point to continue if there are no drives connected...
1226 */
1227 if (!sc->sc_nsensors)
1228 kthread_exit(0);
1229
1230 sc->sc_sme = sysmon_envsys_create();
1231 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1232 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1233
1234 for (i = 0; i < sc->sc_nsensors; i++) {
1235 memset(&bv, 0, sizeof(bv));
1236 bv.bv_volid = i;
1237 if (arc_bio_vol(sc, &bv) != 0)
1238 goto bad;
1239
1240 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1241 sc->sc_sensors[i].monitor = true;
1242 sc->sc_sensors[i].flags = ENVSYS_FMONSTCHANGED;
1243 strlcpy(sc->sc_sensors[i].desc, bv.bv_dev,
1244 sizeof(sc->sc_sensors[i].desc));
1245 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1246 goto bad;
1247 }
1248
1249 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1250 sc->sc_sme->sme_cookie = sc;
1251 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1252 if (sysmon_envsys_register(sc->sc_sme)) {
1253 aprint_debug("%s: unable to register with sysmon\n",
1254 device_xname(&sc->sc_dev));
1255 goto bad;
1256 }
1257 kthread_exit(0);
1258
1259 bad:
1260 kmem_free(sc->sc_sensors, slen);
1261 sysmon_envsys_destroy(sc->sc_sme);
1262 kthread_exit(0);
1263 }
1264
1265 static void
1266 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1267 {
1268 struct arc_softc *sc = sme->sme_cookie;
1269 struct bioc_vol bv;
1270
1271 memset(&bv, 0, sizeof(bv));
1272 bv.bv_volid = edata->sensor;
1273
1274 if (arc_bio_vol(sc, &bv)) {
1275 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1276 edata->state = ENVSYS_SINVALID;
1277 return;
1278 }
1279
1280 switch (bv.bv_status) {
1281 case BIOC_SVOFFLINE:
1282 edata->value_cur = ENVSYS_DRIVE_FAIL;
1283 edata->state = ENVSYS_SCRITICAL;
1284 break;
1285 case BIOC_SVDEGRADED:
1286 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1287 edata->state = ENVSYS_SCRITICAL;
1288 break;
1289 case BIOC_SVBUILDING:
1290 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1291 edata->state = ENVSYS_SVALID;
1292 break;
1293 case BIOC_SVSCRUB:
1294 case BIOC_SVONLINE:
1295 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1296 edata->state = ENVSYS_SVALID;
1297 break;
1298 case BIOC_SVINVALID:
1299 /* FALLTRHOUGH */
1300 default:
1301 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1302 edata->state = ENVSYS_SINVALID;
1303 break;
1304 }
1305 }
1306 #endif /* NBIO > 0 */
1307
1308 uint32_t
1309 arc_read(struct arc_softc *sc, bus_size_t r)
1310 {
1311 uint32_t v;
1312
1313 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1314 BUS_SPACE_BARRIER_READ);
1315 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1316
1317 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1318 device_xname(&sc->sc_dev), r, v);
1319
1320 return v;
1321 }
1322
1323 void
1324 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1325 {
1326 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1327 BUS_SPACE_BARRIER_READ);
1328 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1329 (uint32_t *)buf, len >> 2);
1330 }
1331
1332 void
1333 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1334 {
1335 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1336 device_xname(&sc->sc_dev), r, v);
1337
1338 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1339 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1340 BUS_SPACE_BARRIER_WRITE);
1341 }
1342
1343 void
1344 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1345 {
1346 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1347 (const uint32_t *)buf, len >> 2);
1348 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1349 BUS_SPACE_BARRIER_WRITE);
1350 }
1351
1352 int
1353 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1354 uint32_t target)
1355 {
1356 int i;
1357
1358 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1359 device_xname(&sc->sc_dev), r, mask, target);
1360
1361 for (i = 0; i < 10000; i++) {
1362 if ((arc_read(sc, r) & mask) == target)
1363 return 0;
1364 delay(1000);
1365 }
1366
1367 return 1;
1368 }
1369
1370 int
1371 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1372 uint32_t target)
1373 {
1374 int i;
1375
1376 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1377 device_xname(&sc->sc_dev), r, mask, target);
1378
1379 for (i = 0; i < 10000; i++) {
1380 if ((arc_read(sc, r) & mask) != target)
1381 return 0;
1382 delay(1000);
1383 }
1384
1385 return 1;
1386 }
1387
1388 int
1389 arc_msg0(struct arc_softc *sc, uint32_t m)
1390 {
1391 /* post message */
1392 arc_write(sc, ARC_REG_INB_MSG0, m);
1393 /* wait for the fw to do it */
1394 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1395 ARC_REG_INTRSTAT_MSG0) != 0)
1396 return 1;
1397
1398 /* ack it */
1399 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1400
1401 return 0;
1402 }
1403
1404 struct arc_dmamem *
1405 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1406 {
1407 struct arc_dmamem *adm;
1408 int nsegs;
1409
1410 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1411 if (adm == NULL)
1412 return NULL;
1413
1414 adm->adm_size = size;
1415
1416 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1417 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1418 goto admfree;
1419
1420 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1421 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1422 goto destroy;
1423
1424 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1425 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1426 goto free;
1427
1428 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1429 NULL, BUS_DMA_NOWAIT) != 0)
1430 goto unmap;
1431
1432 memset(adm->adm_kva, 0, size);
1433
1434 return adm;
1435
1436 unmap:
1437 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1438 free:
1439 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1440 destroy:
1441 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1442 admfree:
1443 kmem_free(adm, sizeof(*adm));
1444
1445 return NULL;
1446 }
1447
1448 void
1449 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1450 {
1451 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1452 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1453 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1454 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1455 kmem_free(adm, sizeof(*adm));
1456 }
1457
1458 int
1459 arc_alloc_ccbs(struct arc_softc *sc)
1460 {
1461 struct arc_ccb *ccb;
1462 uint8_t *cmd;
1463 int i;
1464 size_t ccbslen;
1465
1466 TAILQ_INIT(&sc->sc_ccb_free);
1467
1468 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
1469 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
1470
1471 sc->sc_requests = arc_dmamem_alloc(sc,
1472 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1473 if (sc->sc_requests == NULL) {
1474 aprint_error("%s: unable to allocate ccb dmamem\n",
1475 device_xname(&sc->sc_dev));
1476 goto free_ccbs;
1477 }
1478 cmd = ARC_DMA_KVA(sc->sc_requests);
1479
1480 for (i = 0; i < sc->sc_req_count; i++) {
1481 ccb = &sc->sc_ccbs[i];
1482
1483 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1484 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1485 aprint_error("%s: unable to create dmamap for ccb %d\n",
1486 device_xname(&sc->sc_dev), i);
1487 goto free_maps;
1488 }
1489
1490 ccb->ccb_sc = sc;
1491 ccb->ccb_id = i;
1492 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1493
1494 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1495 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1496 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1497
1498 arc_put_ccb(sc, ccb);
1499 }
1500
1501 return 0;
1502
1503 free_maps:
1504 while ((ccb = arc_get_ccb(sc)) != NULL)
1505 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1506 arc_dmamem_free(sc, sc->sc_requests);
1507
1508 free_ccbs:
1509 kmem_free(sc->sc_ccbs, ccbslen);
1510
1511 return 1;
1512 }
1513
1514 struct arc_ccb *
1515 arc_get_ccb(struct arc_softc *sc)
1516 {
1517 struct arc_ccb *ccb;
1518
1519 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1520 if (ccb != NULL)
1521 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1522
1523 return ccb;
1524 }
1525
1526 void
1527 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1528 {
1529 ccb->ccb_xs = NULL;
1530 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1531 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1532 }
1533