arcmsr.c revision 1.8 1 /* $NetBSD: arcmsr.c,v 1.8 2007/12/07 11:51:21 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.8 2007/12/07 11:51:21 xtraeme Exp $");
24
25 #include <sys/param.h>
26 #include <sys/buf.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/kmem.h>
31 #include <sys/kthread.h>
32 #include <sys/mutex.h>
33 #include <sys/condvar.h>
34 #include <sys/rwlock.h>
35
36 #if NBIO > 0
37 #include <sys/ioctl.h>
38 #include <dev/biovar.h>
39 #endif
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44
45 #include <dev/scsipi/scsipi_all.h>
46 #include <dev/scsipi/scsi_all.h>
47 #include <dev/scsipi/scsiconf.h>
48
49 #include <dev/sysmon/sysmonvar.h>
50
51 #include <sys/bus.h>
52
53 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
54
55 #include <dev/pci/arcmsrvar.h>
56
57 /* #define ARC_DEBUG */
58 #ifdef ARC_DEBUG
59 #define ARC_D_INIT (1<<0)
60 #define ARC_D_RW (1<<1)
61 #define ARC_D_DB (1<<2)
62
63 int arcdebug = 0;
64
65 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
66 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
67
68 #else
69 #define DPRINTF(p...) /* p */
70 #define DNPRINTF(n, p...) /* n, p */
71 #endif
72
73 /*
74 * the fw header must always equal this.
75 */
76 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
77
78 /*
79 * autoconf(9) glue.
80 */
81 static int arc_match(device_t, struct cfdata *, void *);
82 static void arc_attach(device_t, device_t, void *);
83 static int arc_detach(device_t, int);
84 static void arc_shutdown(void *);
85 static int arc_intr(void *);
86 static void arc_minphys(struct buf *);
87
88 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
89 arc_match, arc_attach, arc_detach, NULL);
90
91 /*
92 * bio(4) and sysmon_envsys(9) glue.
93 */
94 #if NBIO > 0
95 static int arc_bioctl(struct device *, u_long, void *);
96 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
97 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
98 static int arc_bio_disk(struct arc_softc *, struct bioc_disk *);
99 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
100 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
101 static int arc_bio_getvol(struct arc_softc *, int,
102 struct arc_fw_volinfo *);
103 static void arc_create_sensors(void *);
104 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
105 #endif
106
107 static int
108 arc_match(device_t parent, struct cfdata *match, void *aux)
109 {
110 struct pci_attach_args *pa = aux;
111
112 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
113 switch (PCI_PRODUCT(pa->pa_id)) {
114 case PCI_PRODUCT_ARECA_ARC1110:
115 case PCI_PRODUCT_ARECA_ARC1120:
116 case PCI_PRODUCT_ARECA_ARC1130:
117 case PCI_PRODUCT_ARECA_ARC1160:
118 case PCI_PRODUCT_ARECA_ARC1170:
119 case PCI_PRODUCT_ARECA_ARC1200:
120 case PCI_PRODUCT_ARECA_ARC1202:
121 case PCI_PRODUCT_ARECA_ARC1210:
122 case PCI_PRODUCT_ARECA_ARC1220:
123 case PCI_PRODUCT_ARECA_ARC1230:
124 case PCI_PRODUCT_ARECA_ARC1260:
125 case PCI_PRODUCT_ARECA_ARC1270:
126 case PCI_PRODUCT_ARECA_ARC1280:
127 case PCI_PRODUCT_ARECA_ARC1380:
128 case PCI_PRODUCT_ARECA_ARC1381:
129 case PCI_PRODUCT_ARECA_ARC1680:
130 case PCI_PRODUCT_ARECA_ARC1681:
131 return 1;
132 default:
133 break;
134 }
135 }
136
137 return 0;
138 }
139
140 static void
141 arc_attach(device_t parent, device_t self, void *aux)
142 {
143 struct arc_softc *sc = device_private(self);
144 struct pci_attach_args *pa = aux;
145 struct scsipi_adapter *adapt = &sc->sc_adapter;
146 struct scsipi_channel *chan = &sc->sc_chan;
147
148 sc->sc_talking = 0;
149 rw_init(&sc->sc_rwlock);
150 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
151 cv_init(&sc->sc_condvar, "arcdb");
152
153 if (arc_map_pci_resources(sc, pa) != 0) {
154 /* error message printed by arc_map_pci_resources */
155 return;
156 }
157
158 if (arc_query_firmware(sc) != 0) {
159 /* error message printed by arc_query_firmware */
160 goto unmap_pci;
161 }
162
163 if (arc_alloc_ccbs(sc) != 0) {
164 /* error message printed by arc_alloc_ccbs */
165 goto unmap_pci;
166 }
167
168 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
169 if (sc->sc_shutdownhook == NULL)
170 panic("unable to establish arc powerhook");
171
172 memset(adapt, 0, sizeof(*adapt));
173 adapt->adapt_dev = self;
174 adapt->adapt_nchannels = 1;
175 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
176 adapt->adapt_max_periph = adapt->adapt_openings;
177 adapt->adapt_minphys = arc_minphys;
178 adapt->adapt_request = arc_scsi_cmd;
179
180 memset(chan, 0, sizeof(*chan));
181 chan->chan_adapter = adapt;
182 chan->chan_bustype = &scsi_bustype;
183 chan->chan_nluns = ARC_MAX_LUN;
184 chan->chan_ntargets = ARC_MAX_TARGET;
185 chan->chan_id = ARC_MAX_TARGET;
186 chan->chan_channel = 0;
187 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
188
189 (void)config_found(self, &sc->sc_chan, scsiprint);
190
191 /* enable interrupts */
192 arc_write(sc, ARC_REG_INTRMASK,
193 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
194
195 #if NBIO > 0
196 /*
197 * Register the driver to bio(4) and setup the sensors.
198 */
199 if (bio_register(self, arc_bioctl) != 0)
200 panic("%s: bioctl registration failed\n", device_xname(self));
201
202 /*
203 * you need to talk to the firmware to get volume info. our firmware
204 * interface relies on being able to sleep, so we need to use a thread
205 * to do the work.
206 */
207 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
208 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
209 panic("%s: unable to create a kernel thread for sensors\n",
210 device_xname(self));
211 #endif
212
213 return;
214
215 unmap_pci:
216 arc_unmap_pci_resources(sc);
217 }
218
219 static int
220 arc_detach(device_t self, int flags)
221 {
222 struct arc_softc *sc = device_private(self);
223
224 shutdownhook_disestablish(sc->sc_shutdownhook);
225
226 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
227 aprint_error("%s: timeout waiting to stop bg rebuild\n",
228 device_xname(&sc->sc_dev));
229
230 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
231 aprint_error("%s: timeout waiting to flush cache\n",
232 device_xname(&sc->sc_dev));
233
234 return 0;
235 }
236
237 static void
238 arc_shutdown(void *xsc)
239 {
240 struct arc_softc *sc = xsc;
241
242 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
243 aprint_error("%s: timeout waiting to stop bg rebuild\n",
244 device_xname(&sc->sc_dev));
245
246 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
247 aprint_error("%s: timeout waiting to flush cache\n",
248 device_xname(&sc->sc_dev));
249 }
250
251 static void
252 arc_minphys(struct buf *bp)
253 {
254 if (bp->b_bcount > MAXPHYS)
255 bp->b_bcount = MAXPHYS;
256 minphys(bp);
257 }
258
259 static int
260 arc_intr(void *arg)
261 {
262 struct arc_softc *sc = arg;
263 struct arc_ccb *ccb = NULL;
264 char *kva = ARC_DMA_KVA(sc->sc_requests);
265 struct arc_io_cmd *cmd;
266 uint32_t reg, intrstat;
267
268 mutex_spin_enter(&sc->sc_mutex);
269 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
270 if (intrstat == 0x0) {
271 mutex_spin_exit(&sc->sc_mutex);
272 return 0;
273 }
274
275 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
276 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
277
278 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
279 if (sc->sc_talking) {
280 /* if an ioctl is talking, wake it up */
281 arc_write(sc, ARC_REG_INTRMASK,
282 ~ARC_REG_INTRMASK_POSTQUEUE);
283 cv_broadcast(&sc->sc_condvar);
284 } else {
285 /* otherwise drop it */
286 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
287 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
288 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
289 arc_write(sc, ARC_REG_INB_DOORBELL,
290 ARC_REG_INB_DOORBELL_READ_OK);
291 }
292 }
293 mutex_spin_exit(&sc->sc_mutex);
294
295 while ((reg = arc_pop(sc)) != 0xffffffff) {
296 cmd = (struct arc_io_cmd *)(kva +
297 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
298 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
299 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
300
301 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
302 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
303 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
304
305 arc_scsi_cmd_done(sc, ccb, reg);
306 }
307
308 return 1;
309 }
310
311 void
312 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
313 {
314 struct scsipi_periph *periph;
315 struct scsipi_xfer *xs;
316 struct scsipi_adapter *adapt = chan->chan_adapter;
317 struct arc_softc *sc = device_private(adapt->adapt_dev);
318 struct arc_ccb *ccb;
319 struct arc_msg_scsicmd *cmd;
320 uint32_t reg;
321 uint8_t target;
322
323 switch (req) {
324 case ADAPTER_REQ_GROW_RESOURCES:
325 /* Not supported. */
326 return;
327 case ADAPTER_REQ_SET_XFER_MODE:
328 /* Not supported. */
329 return;
330 case ADAPTER_REQ_RUN_XFER:
331 break;
332 }
333
334 mutex_spin_enter(&sc->sc_mutex);
335
336 xs = arg;
337 periph = xs->xs_periph;
338 target = periph->periph_target;
339
340 if (xs->cmdlen > ARC_MSG_CDBLEN) {
341 memset(&xs->sense, 0, sizeof(xs->sense));
342 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
343 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
344 xs->sense.scsi_sense.asc = 0x20;
345 xs->error = XS_SENSE;
346 xs->status = SCSI_CHECK;
347 mutex_spin_exit(&sc->sc_mutex);
348 scsipi_done(xs);
349 return;
350 }
351
352 ccb = arc_get_ccb(sc);
353 if (ccb == NULL) {
354 xs->error = XS_RESOURCE_SHORTAGE;
355 mutex_spin_exit(&sc->sc_mutex);
356 scsipi_done(xs);
357 return;
358 }
359
360 ccb->ccb_xs = xs;
361
362 if (arc_load_xs(ccb) != 0) {
363 xs->error = XS_DRIVER_STUFFUP;
364 arc_put_ccb(sc, ccb);
365 mutex_spin_exit(&sc->sc_mutex);
366 scsipi_done(xs);
367 return;
368 }
369
370 cmd = &ccb->ccb_cmd->cmd;
371 reg = ccb->ccb_cmd_post;
372
373 /* bus is always 0 */
374 cmd->target = target;
375 cmd->lun = periph->periph_lun;
376 cmd->function = 1; /* XXX magic number */
377
378 cmd->cdb_len = xs->cmdlen;
379 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
380 if (xs->xs_control & XS_CTL_DATA_OUT)
381 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
382 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
383 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
384 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
385 }
386
387 cmd->context = htole32(ccb->ccb_id);
388 cmd->data_len = htole32(xs->datalen);
389
390 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
391
392 /* we've built the command, let's put it on the hw */
393 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
394 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
395 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
396
397 arc_push(sc, reg);
398 if (xs->xs_control & XS_CTL_POLL) {
399 if (arc_complete(sc, ccb, xs->timeout) != 0) {
400 xs->error = XS_DRIVER_STUFFUP;
401 mutex_spin_exit(&sc->sc_mutex);
402 scsipi_done(xs);
403 return;
404 }
405 }
406
407 mutex_spin_exit(&sc->sc_mutex);
408 }
409
410 int
411 arc_load_xs(struct arc_ccb *ccb)
412 {
413 struct arc_softc *sc = ccb->ccb_sc;
414 struct scsipi_xfer *xs = ccb->ccb_xs;
415 bus_dmamap_t dmap = ccb->ccb_dmamap;
416 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
417 uint64_t addr;
418 int i, error;
419
420 if (xs->datalen == 0)
421 return 0;
422
423 error = bus_dmamap_load(sc->sc_dmat, dmap,
424 xs->data, xs->datalen, NULL,
425 (xs->xs_control & XS_CTL_NOSLEEP) ?
426 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
427 if (error != 0) {
428 aprint_error("%s: error %d loading dmamap\n",
429 device_xname(&sc->sc_dev), error);
430 return 1;
431 }
432
433 for (i = 0; i < dmap->dm_nsegs; i++) {
434 sge = &sgl[i];
435
436 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
437 addr = dmap->dm_segs[i].ds_addr;
438 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
439 sge->sg_lo_addr = htole32((uint32_t)addr);
440 }
441
442 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
443 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
444 BUS_DMASYNC_PREWRITE);
445
446 return 0;
447 }
448
449 void
450 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
451 {
452 struct scsipi_xfer *xs = ccb->ccb_xs;
453 struct arc_msg_scsicmd *cmd;
454
455 if (xs->datalen != 0) {
456 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
457 ccb->ccb_dmamap->dm_mapsize,
458 (xs->xs_control & XS_CTL_DATA_IN) ?
459 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
460 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
461 }
462
463 /* timeout_del */
464 xs->status |= XS_STS_DONE;
465
466 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
467 cmd = &ccb->ccb_cmd->cmd;
468
469 switch (cmd->status) {
470 case ARC_MSG_STATUS_SELTIMEOUT:
471 case ARC_MSG_STATUS_ABORTED:
472 case ARC_MSG_STATUS_INIT_FAIL:
473 xs->status = SCSI_OK;
474 xs->error = XS_SELTIMEOUT;
475 break;
476
477 case SCSI_CHECK:
478 memset(&xs->sense, 0, sizeof(xs->sense));
479 memcpy(&xs->sense, cmd->sense_data,
480 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
481 xs->sense.scsi_sense.response_code =
482 SSD_RCODE_VALID | 0x70;
483 xs->status = SCSI_CHECK;
484 xs->error = XS_SENSE;
485 xs->resid = 0;
486 break;
487
488 default:
489 /* unknown device status */
490 xs->error = XS_BUSY; /* try again later? */
491 xs->status = SCSI_BUSY;
492 break;
493 }
494 } else {
495 xs->status = SCSI_OK;
496 xs->error = XS_NOERROR;
497 xs->resid = 0;
498 }
499
500 arc_put_ccb(sc, ccb);
501 scsipi_done(xs);
502 }
503
504 int
505 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
506 {
507 struct arc_ccb *ccb = NULL;
508 char *kva = ARC_DMA_KVA(sc->sc_requests);
509 struct arc_io_cmd *cmd;
510 uint32_t reg;
511
512 do {
513 reg = arc_pop(sc);
514 if (reg == 0xffffffff) {
515 if (timeout-- == 0)
516 return 1;
517
518 delay(1000);
519 continue;
520 }
521
522 cmd = (struct arc_io_cmd *)(kva +
523 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
524 ARC_DMA_DVA(sc->sc_requests)));
525 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
526
527 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
528 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
530
531 arc_scsi_cmd_done(sc, ccb, reg);
532 } while (nccb != ccb);
533
534 return 0;
535 }
536
537 int
538 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
539 {
540 pcireg_t memtype;
541 pci_intr_handle_t ih;
542
543 sc->sc_pc = pa->pa_pc;
544 sc->sc_tag = pa->pa_tag;
545 sc->sc_dmat = pa->pa_dmat;
546
547 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
548 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
549 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
550 aprint_error(": unable to map system interface register\n");
551 return 1;
552 }
553
554 if (pci_intr_map(pa, &ih) != 0) {
555 aprint_error(": unable to map interrupt\n");
556 goto unmap;
557 }
558
559 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
560 arc_intr, sc);
561 if (sc->sc_ih == NULL) {
562 aprint_error(": unable to map interrupt [2]\n");
563 goto unmap;
564 }
565 aprint_normal(": interrupting at %s\n",
566 pci_intr_string(pa->pa_pc, ih));
567
568 return 0;
569
570 unmap:
571 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
572 sc->sc_ios = 0;
573 return 1;
574 }
575
576 void
577 arc_unmap_pci_resources(struct arc_softc *sc)
578 {
579 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
580 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
581 sc->sc_ios = 0;
582 }
583
584 int
585 arc_query_firmware(struct arc_softc *sc)
586 {
587 struct arc_msg_firmware_info fwinfo;
588 char string[81]; /* sizeof(vendor)*2+1 */
589
590 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
591 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
592 aprint_debug("%s: timeout waiting for firmware ok\n",
593 device_xname(&sc->sc_dev));
594 return 1;
595 }
596
597 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
598 aprint_debug("%s: timeout waiting for get config\n",
599 device_xname(&sc->sc_dev));
600 return 1;
601 }
602
603 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
604 aprint_debug("%s: timeout waiting to start bg rebuild\n",
605 device_xname(&sc->sc_dev));
606 return 1;
607 }
608
609 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
610
611 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
612 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
613
614 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
615 aprint_error("%s: invalid firmware info from iop\n",
616 device_xname(&sc->sc_dev));
617 return 1;
618 }
619
620 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
621 device_xname(&sc->sc_dev),
622 htole32(fwinfo.request_len));
623 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
624 device_xname(&sc->sc_dev),
625 htole32(fwinfo.queue_len));
626 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
627 device_xname(&sc->sc_dev),
628 htole32(fwinfo.sdram_size));
629 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
630 device_xname(&sc->sc_dev),
631 htole32(fwinfo.sata_ports));
632
633 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
634 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
635 device_xname(&sc->sc_dev), string);
636
637 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
638
639 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
640 device_xname(&sc->sc_dev), string);
641
642 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
643 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
644 device_xname(&sc->sc_dev), string);
645
646 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
647 aprint_error("%s: unexpected request frame size (%d != %d)\n",
648 device_xname(&sc->sc_dev),
649 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
650 return 1;
651 }
652
653 sc->sc_req_count = htole32(fwinfo.queue_len);
654
655 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
656 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
657 htole32(fwinfo.sdram_size), string);
658
659 return 0;
660 }
661
662 #if NBIO > 0
663 static int
664 arc_bioctl(struct device *self, u_long cmd, void *addr)
665 {
666 struct arc_softc *sc = device_private(self);
667 int error = 0;
668
669 switch (cmd) {
670 case BIOCINQ:
671 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
672 break;
673
674 case BIOCVOL:
675 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
676 break;
677
678 case BIOCDISK:
679 error = arc_bio_disk(sc, (struct bioc_disk *)addr);
680 break;
681
682 case BIOCALARM:
683 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
684 break;
685
686 default:
687 error = ENOTTY;
688 break;
689 }
690
691 return error;
692 }
693
694 static int
695 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
696 {
697 uint8_t request[2], reply[1];
698 size_t len;
699 int error = 0;
700
701 switch (ba->ba_opcode) {
702 case BIOC_SAENABLE:
703 case BIOC_SADISABLE:
704 request[0] = ARC_FW_SET_ALARM;
705 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
706 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
707 len = sizeof(request);
708
709 break;
710
711 case BIOC_SASILENCE:
712 request[0] = ARC_FW_MUTE_ALARM;
713 len = 1;
714
715 break;
716
717 case BIOC_GASTATUS:
718 /* system info is too big/ugly to deal with here */
719 return arc_bio_alarm_state(sc, ba);
720
721 default:
722 return EOPNOTSUPP;
723 }
724
725 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
726 if (error != 0)
727 return error;
728
729 switch (reply[0]) {
730 case ARC_FW_CMD_OK:
731 return 0;
732 case ARC_FW_CMD_PASS_REQD:
733 return EPERM;
734 default:
735 return EIO;
736 }
737 }
738
739 static int
740 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
741 {
742 uint8_t request = ARC_FW_SYSINFO;
743 struct arc_fw_sysinfo *sysinfo;
744 int error = 0;
745
746 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
747
748 request = ARC_FW_SYSINFO;
749 error = arc_msgbuf(sc, &request, sizeof(request),
750 sysinfo, sizeof(struct arc_fw_sysinfo));
751
752 if (error != 0)
753 goto out;
754
755 ba->ba_status = sysinfo->alarm;
756
757 out:
758 kmem_free(sysinfo, sizeof(*sysinfo));
759 return error;
760 }
761
762
763 static int
764 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
765 {
766 uint8_t request[2];
767 struct arc_fw_sysinfo *sysinfo;
768 struct arc_fw_volinfo *volinfo;
769 int maxvols, nvols = 0, i;
770 int error = 0;
771
772 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
773 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
774
775 request[0] = ARC_FW_SYSINFO;
776 error = arc_msgbuf(sc, request, 1, sysinfo,
777 sizeof(struct arc_fw_sysinfo));
778 if (error != 0)
779 goto out;
780
781 maxvols = sysinfo->max_volume_set;
782
783 request[0] = ARC_FW_VOLINFO;
784 for (i = 0; i < maxvols; i++) {
785 request[1] = i;
786 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
787 sizeof(struct arc_fw_volinfo));
788 if (error != 0)
789 goto out;
790
791 /*
792 * I can't find an easy way to see if the volume exists or not
793 * except to say that if it has no capacity then it isn't there.
794 * Ignore passthru volumes, bioc_vol doesn't understand them.
795 */
796 if ((volinfo->capacity != 0 || volinfo->capacity2 != 0) &&
797 volinfo->raid_level != ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
798 nvols++;
799 }
800
801 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
802 bi->bi_novol = nvols;
803 out:
804 kmem_free(volinfo, sizeof(*volinfo));
805 kmem_free(sysinfo, sizeof(*sysinfo));
806 return error;
807 }
808
809 static int
810 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
811 {
812 uint8_t request[2];
813 struct arc_fw_sysinfo *sysinfo;
814 int error = 0;
815 int maxvols, nvols = 0, i;
816
817 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
818
819 request[0] = ARC_FW_SYSINFO;
820
821 error = arc_msgbuf(sc, request, 1, sysinfo,
822 sizeof(struct arc_fw_sysinfo));
823 if (error != 0)
824 goto out;
825
826 maxvols = sysinfo->max_volume_set;
827
828 request[0] = ARC_FW_VOLINFO;
829 for (i = 0; i < maxvols; i++) {
830 request[1] = i;
831 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
832 sizeof(struct arc_fw_volinfo));
833 if (error != 0)
834 goto out;
835
836 if ((volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
837 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
838 continue;
839
840 if (nvols == vol)
841 break;
842
843 nvols++;
844 }
845
846 if (nvols != vol ||
847 (volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
848 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU) {
849 error = ENODEV;
850 goto out;
851 }
852
853 out:
854 kmem_free(sysinfo, sizeof(*sysinfo));
855 return error;
856 }
857
858 static int
859 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
860 {
861 struct arc_fw_volinfo *volinfo;
862 uint64_t blocks;
863 uint32_t status;
864 int error = 0;
865
866 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
867
868 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
869 if (error != 0)
870 goto out;
871
872 bv->bv_percent = -1;
873 bv->bv_seconds = 0;
874
875 status = htole32(volinfo->volume_status);
876 if (status == 0x0) {
877 if (htole32(volinfo->fail_mask) == 0x0)
878 bv->bv_status = BIOC_SVONLINE;
879 else
880 bv->bv_status = BIOC_SVDEGRADED;
881 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
882 bv->bv_status = BIOC_SVDEGRADED;
883 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
884 bv->bv_status = BIOC_SVOFFLINE;
885 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
886 bv->bv_status = BIOC_SVBUILDING;
887 bv->bv_percent = htole32(volinfo->progress) / 10;
888 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
889 bv->bv_status = BIOC_SVREBUILD;
890 bv->bv_percent = htole32(volinfo->progress) / 10;
891 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
892 bv->bv_status = BIOC_SVMIGRATING;
893 bv->bv_percent = htole32(volinfo->progress) / 10;
894 }
895
896 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
897 blocks += (uint64_t)htole32(volinfo->capacity);
898 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
899
900 switch (volinfo->raid_level) {
901 case ARC_FW_VOL_RAIDLEVEL_0:
902 bv->bv_level = 0;
903 break;
904 case ARC_FW_VOL_RAIDLEVEL_1:
905 bv->bv_level = 1;
906 break;
907 case ARC_FW_VOL_RAIDLEVEL_3:
908 bv->bv_level = 3;
909 break;
910 case ARC_FW_VOL_RAIDLEVEL_5:
911 bv->bv_level = 5;
912 break;
913 case ARC_FW_VOL_RAIDLEVEL_6:
914 bv->bv_level = 6;
915 break;
916 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
917 default:
918 bv->bv_level = -1;
919 break;
920 }
921
922 bv->bv_nodisk = volinfo->member_disks;
923 strlcpy(bv->bv_dev, volinfo->set_name, sizeof(bv->bv_dev));
924
925 out:
926 kmem_free(volinfo, sizeof(*volinfo));
927 return error;
928 }
929
930 static int
931 arc_bio_disk(struct arc_softc *sc, struct bioc_disk *bd)
932 {
933 uint8_t request[2];
934 struct arc_fw_volinfo *volinfo;
935 struct arc_fw_raidinfo *raidinfo;
936 struct arc_fw_diskinfo *diskinfo;
937 int error = 0;
938 uint64_t blocks;
939 char model[81];
940 char serial[41];
941 char rev[17];
942
943 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
944 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
945 diskinfo = kmem_zalloc(sizeof(struct arc_fw_diskinfo), KM_SLEEP);
946
947 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
948 if (error != 0)
949 goto out;
950
951 request[0] = ARC_FW_RAIDINFO;
952 request[1] = volinfo->raid_set_number;
953
954 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
955 sizeof(struct arc_fw_raidinfo));
956 if (error != 0)
957 goto out;
958
959 if (bd->bd_diskid > raidinfo->member_devices) {
960 error = ENODEV;
961 goto out;
962 }
963
964 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
965 /*
966 * the disk doesn't exist anymore. bio is too dumb to be
967 * able to display that, so put it on another bus
968 */
969 bd->bd_channel = 1;
970 bd->bd_target = 0;
971 bd->bd_lun = 0;
972 bd->bd_status = BIOC_SDOFFLINE;
973 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
974 goto out;
975 }
976
977 request[0] = ARC_FW_DISKINFO;
978 request[1] = raidinfo->device_array[bd->bd_diskid];
979 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
980 sizeof(struct arc_fw_diskinfo));
981 if (error != 0)
982 goto out;
983
984 #if 0
985 bd->bd_channel = diskinfo->scsi_attr.channel;
986 bd->bd_target = diskinfo->scsi_attr.target;
987 bd->bd_lun = diskinfo->scsi_attr.lun;
988 #endif
989 /*
990 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
991 * the diskid.
992 */
993 bd->bd_channel = 0;
994 bd->bd_target = raidinfo->device_array[bd->bd_diskid];
995 bd->bd_lun = 0;
996
997 bd->bd_status = BIOC_SDONLINE;
998 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
999 blocks += (uint64_t)htole32(diskinfo->capacity);
1000 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1001
1002 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1003 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1004 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1005 sizeof(diskinfo->firmware_rev));
1006
1007 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1008 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1009
1010 out:
1011 kmem_free(diskinfo, sizeof(*diskinfo));
1012 kmem_free(raidinfo, sizeof(*raidinfo));
1013 kmem_free(volinfo, sizeof(*volinfo));
1014 return error;
1015 }
1016 #endif /* NBIO > 0 */
1017
1018 uint8_t
1019 arc_msg_cksum(void *cmd, uint16_t len)
1020 {
1021 uint8_t *buf = cmd;
1022 uint8_t cksum;
1023 int i;
1024
1025 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1026 for (i = 0; i < len; i++)
1027 cksum += buf[i];
1028
1029 return cksum;
1030 }
1031
1032
1033 int
1034 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1035 size_t rbuflen)
1036 {
1037 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1038 uint8_t *wbuf, *rbuf;
1039 int wlen, wdone = 0, rlen, rdone = 0;
1040 struct arc_fw_bufhdr *bufhdr;
1041 uint32_t reg, rwlen;
1042 int error = 0;
1043 #ifdef ARC_DEBUG
1044 int i;
1045 #endif
1046
1047 wbuf = rbuf = NULL;
1048
1049 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1050 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1051
1052 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1053 wbuf = kmem_alloc(wlen, KM_SLEEP);
1054
1055 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1056 rbuf = kmem_alloc(rlen, KM_SLEEP);
1057
1058 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1059 device_xname(&sc->sc_dev), wlen, rlen);
1060
1061 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1062 bufhdr->hdr = arc_fw_hdr;
1063 bufhdr->len = htole16(wbuflen);
1064 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1065 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1066
1067 arc_lock(sc);
1068 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1069 error = EBUSY;
1070 goto out;
1071 }
1072
1073 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1074
1075 do {
1076 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1077 memset(rwbuf, 0, sizeof(rwbuf));
1078 rwlen = (wlen - wdone) % sizeof(rwbuf);
1079 memcpy(rwbuf, &wbuf[wdone], rwlen);
1080
1081 #ifdef ARC_DEBUG
1082 if (arcdebug & ARC_D_DB) {
1083 printf("%s: write %d:",
1084 device_xname(&sc->sc_dev), rwlen);
1085 for (i = 0; i < rwlen; i++)
1086 printf(" 0x%02x", rwbuf[i]);
1087 printf("\n");
1088 }
1089 #endif
1090
1091 /* copy the chunk to the hw */
1092 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1093 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1094 sizeof(rwbuf));
1095
1096 /* say we have a buffer for the hw */
1097 arc_write(sc, ARC_REG_INB_DOORBELL,
1098 ARC_REG_INB_DOORBELL_WRITE_OK);
1099
1100 wdone += rwlen;
1101 }
1102
1103 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1104 arc_wait(sc);
1105 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1106
1107 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1108 device_xname(&sc->sc_dev), reg);
1109
1110 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1111 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1112 if (rwlen > sizeof(rwbuf)) {
1113 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1114 device_xname(&sc->sc_dev));
1115 error = EIO;
1116 goto out;
1117 }
1118
1119 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1120 sizeof(rwbuf));
1121
1122 arc_write(sc, ARC_REG_INB_DOORBELL,
1123 ARC_REG_INB_DOORBELL_READ_OK);
1124
1125 #ifdef ARC_DEBUG
1126 printf("%s: len: %d+%d=%d/%d\n",
1127 device_xname(&sc->sc_dev),
1128 rwlen, rdone, rwlen + rdone, rlen);
1129 if (arcdebug & ARC_D_DB) {
1130 printf("%s: read:",
1131 device_xname(&sc->sc_dev));
1132 for (i = 0; i < rwlen; i++)
1133 printf(" 0x%02x", rwbuf[i]);
1134 printf("\n");
1135 }
1136 #endif
1137
1138 if ((rdone + rwlen) > rlen) {
1139 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1140 device_xname(&sc->sc_dev));
1141 error = EIO;
1142 goto out;
1143 }
1144
1145 memcpy(&rbuf[rdone], rwbuf, rwlen);
1146 rdone += rwlen;
1147 }
1148 } while (rdone != rlen);
1149
1150 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1151 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1152 bufhdr->len != htole16(rbuflen)) {
1153 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1154 device_xname(&sc->sc_dev));
1155 error = EIO;
1156 goto out;
1157 }
1158
1159 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1160
1161 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1162 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1163 device_xname(&sc->sc_dev));
1164 error = EIO;
1165 goto out;
1166 }
1167
1168 out:
1169 arc_unlock(sc);
1170 kmem_free(wbuf, wlen);
1171 kmem_free(rbuf, rlen);
1172
1173 return error;
1174 }
1175
1176 void
1177 arc_lock(struct arc_softc *sc)
1178 {
1179 rw_enter(&sc->sc_rwlock, RW_WRITER);
1180 mutex_spin_enter(&sc->sc_mutex);
1181 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1182 sc->sc_talking = 1;
1183 }
1184
1185 void
1186 arc_unlock(struct arc_softc *sc)
1187 {
1188 KASSERT(mutex_owned(&sc->sc_mutex));
1189
1190 sc->sc_talking = 0;
1191 arc_write(sc, ARC_REG_INTRMASK,
1192 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1193 mutex_spin_exit(&sc->sc_mutex);
1194 rw_exit(&sc->sc_rwlock);
1195 }
1196
1197 void
1198 arc_wait(struct arc_softc *sc)
1199 {
1200 KASSERT(mutex_owned(&sc->sc_mutex));
1201
1202 arc_write(sc, ARC_REG_INTRMASK,
1203 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1204 if (cv_timedwait_sig(&sc->sc_condvar, &sc->sc_mutex, hz) ==
1205 EWOULDBLOCK)
1206 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1207 }
1208
1209 #if NBIO > 0
1210 static void
1211 arc_create_sensors(void *arg)
1212 {
1213 struct arc_softc *sc = arg;
1214 struct bioc_inq bi;
1215 struct bioc_vol bv;
1216 int i;
1217 size_t slen;
1218
1219 memset(&bi, 0, sizeof(bi));
1220 if (arc_bio_inq(sc, &bi) != 0) {
1221 aprint_error("%s: unable to query firmware for sensor info\n",
1222 device_xname(&sc->sc_dev));
1223 kthread_exit(0);
1224 }
1225
1226 sc->sc_nsensors = bi.bi_novol;
1227 /*
1228 * There's no point to continue if there are no drives connected...
1229 */
1230 if (!sc->sc_nsensors)
1231 kthread_exit(0);
1232
1233 sc->sc_sme = sysmon_envsys_create();
1234 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1235 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1236
1237 for (i = 0; i < sc->sc_nsensors; i++) {
1238 memset(&bv, 0, sizeof(bv));
1239 bv.bv_volid = i;
1240 if (arc_bio_vol(sc, &bv) != 0)
1241 goto bad;
1242
1243 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1244 sc->sc_sensors[i].monitor = true;
1245 sc->sc_sensors[i].flags = ENVSYS_FMONSTCHANGED;
1246 strlcpy(sc->sc_sensors[i].desc, bv.bv_dev,
1247 sizeof(sc->sc_sensors[i].desc));
1248 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1249 goto bad;
1250 }
1251
1252 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1253 sc->sc_sme->sme_cookie = sc;
1254 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1255 if (sysmon_envsys_register(sc->sc_sme)) {
1256 aprint_debug("%s: unable to register with sysmon\n",
1257 device_xname(&sc->sc_dev));
1258 goto bad;
1259 }
1260 kthread_exit(0);
1261
1262 bad:
1263 kmem_free(sc->sc_sensors, slen);
1264 sysmon_envsys_destroy(sc->sc_sme);
1265 kthread_exit(0);
1266 }
1267
1268 static void
1269 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1270 {
1271 struct arc_softc *sc = sme->sme_cookie;
1272 struct bioc_vol bv;
1273
1274 memset(&bv, 0, sizeof(bv));
1275 bv.bv_volid = edata->sensor;
1276
1277 if (arc_bio_vol(sc, &bv)) {
1278 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1279 edata->state = ENVSYS_SINVALID;
1280 return;
1281 }
1282
1283 switch (bv.bv_status) {
1284 case BIOC_SVOFFLINE:
1285 edata->value_cur = ENVSYS_DRIVE_FAIL;
1286 edata->state = ENVSYS_SCRITICAL;
1287 break;
1288 case BIOC_SVDEGRADED:
1289 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1290 edata->state = ENVSYS_SCRITICAL;
1291 break;
1292 case BIOC_SVBUILDING:
1293 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1294 edata->state = ENVSYS_SVALID;
1295 break;
1296 case BIOC_SVMIGRATING:
1297 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1298 edata->state = ENVSYS_SVALID;
1299 break;
1300 case BIOC_SVSCRUB:
1301 case BIOC_SVONLINE:
1302 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1303 edata->state = ENVSYS_SVALID;
1304 break;
1305 case BIOC_SVINVALID:
1306 /* FALLTRHOUGH */
1307 default:
1308 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1309 edata->state = ENVSYS_SINVALID;
1310 break;
1311 }
1312 }
1313 #endif /* NBIO > 0 */
1314
1315 uint32_t
1316 arc_read(struct arc_softc *sc, bus_size_t r)
1317 {
1318 uint32_t v;
1319
1320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1321 BUS_SPACE_BARRIER_READ);
1322 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1323
1324 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1325 device_xname(&sc->sc_dev), r, v);
1326
1327 return v;
1328 }
1329
1330 void
1331 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1332 {
1333 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1334 BUS_SPACE_BARRIER_READ);
1335 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1336 (uint32_t *)buf, len >> 2);
1337 }
1338
1339 void
1340 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1341 {
1342 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1343 device_xname(&sc->sc_dev), r, v);
1344
1345 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1346 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1347 BUS_SPACE_BARRIER_WRITE);
1348 }
1349
1350 void
1351 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1352 {
1353 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1354 (const uint32_t *)buf, len >> 2);
1355 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1356 BUS_SPACE_BARRIER_WRITE);
1357 }
1358
1359 int
1360 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1361 uint32_t target)
1362 {
1363 int i;
1364
1365 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1366 device_xname(&sc->sc_dev), r, mask, target);
1367
1368 for (i = 0; i < 10000; i++) {
1369 if ((arc_read(sc, r) & mask) == target)
1370 return 0;
1371 delay(1000);
1372 }
1373
1374 return 1;
1375 }
1376
1377 int
1378 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1379 uint32_t target)
1380 {
1381 int i;
1382
1383 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1384 device_xname(&sc->sc_dev), r, mask, target);
1385
1386 for (i = 0; i < 10000; i++) {
1387 if ((arc_read(sc, r) & mask) != target)
1388 return 0;
1389 delay(1000);
1390 }
1391
1392 return 1;
1393 }
1394
1395 int
1396 arc_msg0(struct arc_softc *sc, uint32_t m)
1397 {
1398 /* post message */
1399 arc_write(sc, ARC_REG_INB_MSG0, m);
1400 /* wait for the fw to do it */
1401 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1402 ARC_REG_INTRSTAT_MSG0) != 0)
1403 return 1;
1404
1405 /* ack it */
1406 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1407
1408 return 0;
1409 }
1410
1411 struct arc_dmamem *
1412 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1413 {
1414 struct arc_dmamem *adm;
1415 int nsegs;
1416
1417 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1418 if (adm == NULL)
1419 return NULL;
1420
1421 adm->adm_size = size;
1422
1423 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1424 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1425 goto admfree;
1426
1427 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1428 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1429 goto destroy;
1430
1431 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1432 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1433 goto free;
1434
1435 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1436 NULL, BUS_DMA_NOWAIT) != 0)
1437 goto unmap;
1438
1439 memset(adm->adm_kva, 0, size);
1440
1441 return adm;
1442
1443 unmap:
1444 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1445 free:
1446 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1447 destroy:
1448 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1449 admfree:
1450 kmem_free(adm, sizeof(*adm));
1451
1452 return NULL;
1453 }
1454
1455 void
1456 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1457 {
1458 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1459 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1460 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1461 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1462 kmem_free(adm, sizeof(*adm));
1463 }
1464
1465 int
1466 arc_alloc_ccbs(struct arc_softc *sc)
1467 {
1468 struct arc_ccb *ccb;
1469 uint8_t *cmd;
1470 int i;
1471 size_t ccbslen;
1472
1473 TAILQ_INIT(&sc->sc_ccb_free);
1474
1475 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
1476 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
1477
1478 sc->sc_requests = arc_dmamem_alloc(sc,
1479 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1480 if (sc->sc_requests == NULL) {
1481 aprint_error("%s: unable to allocate ccb dmamem\n",
1482 device_xname(&sc->sc_dev));
1483 goto free_ccbs;
1484 }
1485 cmd = ARC_DMA_KVA(sc->sc_requests);
1486
1487 for (i = 0; i < sc->sc_req_count; i++) {
1488 ccb = &sc->sc_ccbs[i];
1489
1490 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1491 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1492 aprint_error("%s: unable to create dmamap for ccb %d\n",
1493 device_xname(&sc->sc_dev), i);
1494 goto free_maps;
1495 }
1496
1497 ccb->ccb_sc = sc;
1498 ccb->ccb_id = i;
1499 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1500
1501 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1502 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1503 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1504
1505 arc_put_ccb(sc, ccb);
1506 }
1507
1508 return 0;
1509
1510 free_maps:
1511 while ((ccb = arc_get_ccb(sc)) != NULL)
1512 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1513 arc_dmamem_free(sc, sc->sc_requests);
1514
1515 free_ccbs:
1516 kmem_free(sc->sc_ccbs, ccbslen);
1517
1518 return 1;
1519 }
1520
1521 struct arc_ccb *
1522 arc_get_ccb(struct arc_softc *sc)
1523 {
1524 struct arc_ccb *ccb;
1525
1526 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1527 if (ccb != NULL)
1528 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1529
1530 return ccb;
1531 }
1532
1533 void
1534 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1535 {
1536 ccb->ccb_xs = NULL;
1537 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1538 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1539 }
1540