arcmsr.c revision 1.5 1 /* $NetBSD: arcmsr.c,v 1.5 2007/12/05 18:58:00 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bio.h"
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.5 2007/12/05 18:58:00 xtraeme Exp $");
24
25 #include <sys/param.h>
26 #include <sys/buf.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/kthread.h>
31 #include <sys/mutex.h>
32 #include <sys/condvar.h>
33 #include <sys/rwlock.h>
34
35 #if NBIO > 0
36 #include <sys/ioctl.h>
37 #include <dev/biovar.h>
38 #endif
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 #include <dev/scsipi/scsipi_all.h>
45 #include <dev/scsipi/scsi_all.h>
46 #include <dev/scsipi/scsiconf.h>
47
48 #include <dev/sysmon/sysmonvar.h>
49
50 #include <sys/bus.h>
51
52 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
53
54 #include <dev/pci/arcmsrvar.h>
55
56 /* #define ARC_DEBUG */
57 #ifdef ARC_DEBUG
58 #define ARC_D_INIT (1<<0)
59 #define ARC_D_RW (1<<1)
60 #define ARC_D_DB (1<<2)
61
62 int arcdebug = 0;
63
64 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
65 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
66
67 #else
68 #define DPRINTF(p...) /* p */
69 #define DNPRINTF(n, p...) /* n, p */
70 #endif
71
72 /*
73 * the fw header must always equal this.
74 */
75 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
76
77 /*
78 * autoconf(9) glue.
79 */
80 static int arc_match(device_t, struct cfdata *, void *);
81 static void arc_attach(device_t, device_t, void *);
82 static int arc_detach(device_t, int);
83 static void arc_shutdown(void *);
84 static int arc_intr(void *);
85 static void arc_minphys(struct buf *);
86
87 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
88 arc_match, arc_attach, arc_detach, NULL);
89
90 /*
91 * bio(4) and sysmon_envsys(9) glue.
92 */
93 #if NBIO > 0
94 static int arc_bioctl(struct device *, u_long, void *);
95 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
96 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
97 static int arc_bio_disk(struct arc_softc *, struct bioc_disk *);
98 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
99 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
100 static int arc_bio_getvol(struct arc_softc *, int,
101 struct arc_fw_volinfo *);
102 static void arc_create_sensors(void *);
103 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
104 #endif
105
106 static int
107 arc_match(device_t parent, struct cfdata *match, void *aux)
108 {
109 struct pci_attach_args *pa = aux;
110
111 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
112 switch (PCI_PRODUCT(pa->pa_id)) {
113 case PCI_PRODUCT_ARECA_ARC1110:
114 case PCI_PRODUCT_ARECA_ARC1120:
115 case PCI_PRODUCT_ARECA_ARC1130:
116 case PCI_PRODUCT_ARECA_ARC1160:
117 case PCI_PRODUCT_ARECA_ARC1170:
118 case PCI_PRODUCT_ARECA_ARC1200:
119 case PCI_PRODUCT_ARECA_ARC1202:
120 case PCI_PRODUCT_ARECA_ARC1210:
121 case PCI_PRODUCT_ARECA_ARC1220:
122 case PCI_PRODUCT_ARECA_ARC1230:
123 case PCI_PRODUCT_ARECA_ARC1260:
124 case PCI_PRODUCT_ARECA_ARC1270:
125 case PCI_PRODUCT_ARECA_ARC1280:
126 case PCI_PRODUCT_ARECA_ARC1380:
127 case PCI_PRODUCT_ARECA_ARC1381:
128 case PCI_PRODUCT_ARECA_ARC1680:
129 case PCI_PRODUCT_ARECA_ARC1681:
130 return 1;
131 default:
132 break;
133 }
134 }
135
136 return 0;
137 }
138
139 static void
140 arc_attach(device_t parent, device_t self, void *aux)
141 {
142 struct arc_softc *sc = device_private(self);
143 struct pci_attach_args *pa = aux;
144 struct scsipi_adapter *adapt = &sc->sc_adapter;
145 struct scsipi_channel *chan = &sc->sc_chan;
146
147 sc->sc_talking = 0;
148 rw_init(&sc->sc_rwlock);
149 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
150 cv_init(&sc->sc_condvar, "arcdb");
151
152 if (arc_map_pci_resources(sc, pa) != 0) {
153 /* error message printed by arc_map_pci_resources */
154 return;
155 }
156
157 if (arc_query_firmware(sc) != 0) {
158 /* error message printed by arc_query_firmware */
159 goto unmap_pci;
160 }
161
162 if (arc_alloc_ccbs(sc) != 0) {
163 /* error message printed by arc_alloc_ccbs */
164 goto unmap_pci;
165 }
166
167 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
168 if (sc->sc_shutdownhook == NULL)
169 panic("unable to establish arc powerhook");
170
171 memset(adapt, 0, sizeof(*adapt));
172 adapt->adapt_dev = self;
173 adapt->adapt_nchannels = 1;
174 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
175 adapt->adapt_max_periph = adapt->adapt_openings;
176 adapt->adapt_minphys = arc_minphys;
177 adapt->adapt_request = arc_scsi_cmd;
178
179 memset(chan, 0, sizeof(*chan));
180 chan->chan_adapter = adapt;
181 chan->chan_bustype = &scsi_bustype;
182 chan->chan_nluns = ARC_MAX_LUN;
183 chan->chan_ntargets = ARC_MAX_TARGET;
184 chan->chan_id = ARC_MAX_TARGET;
185 chan->chan_channel = 0;
186 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
187
188 (void)config_found(self, &sc->sc_chan, scsiprint);
189
190 /* enable interrupts */
191 arc_write(sc, ARC_REG_INTRMASK,
192 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
193
194 #if NBIO > 0
195 if (bio_register(self, arc_bioctl) != 0)
196 panic("%s: bioctl registration failed\n", device_xname(self));
197 /*
198 * you need to talk to the firmware to get volume info. our firmware
199 * interface relies on being able to sleep, so we need to use a thread
200 * to do the work.
201 */
202 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
203 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
204 panic("%s: unable to create a kernel thread for sensors\n",
205 device_xname(self));
206 #endif
207
208 return;
209
210 unmap_pci:
211 arc_unmap_pci_resources(sc);
212 }
213
214 static int
215 arc_detach(device_t self, int flags)
216 {
217 struct arc_softc *sc = device_private(self);
218
219 shutdownhook_disestablish(sc->sc_shutdownhook);
220
221 mutex_enter(&sc->sc_mutex);
222 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
223 aprint_error("%s: timeout waiting to stop bg rebuild\n",
224 device_xname(&sc->sc_dev));
225
226 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
227 aprint_error("%s: timeout waiting to flush cache\n",
228 device_xname(&sc->sc_dev));
229 mutex_exit(&sc->sc_mutex);
230
231 return 0;
232 }
233
234 static void
235 arc_shutdown(void *xsc)
236 {
237 struct arc_softc *sc = xsc;
238
239 mutex_enter(&sc->sc_mutex);
240 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
241 aprint_error("%s: timeout waiting to stop bg rebuild\n",
242 device_xname(&sc->sc_dev));
243
244 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
245 aprint_error("%s: timeout waiting to flush cache\n",
246 device_xname(&sc->sc_dev));
247 mutex_exit(&sc->sc_mutex);
248 }
249
250 static void
251 arc_minphys(struct buf *bp)
252 {
253 if (bp->b_bcount > MAXPHYS)
254 bp->b_bcount = MAXPHYS;
255 minphys(bp);
256 }
257
258 static int
259 arc_intr(void *arg)
260 {
261 struct arc_softc *sc = arg;
262 struct arc_ccb *ccb = NULL;
263 char *kva = ARC_DMA_KVA(sc->sc_requests);
264 struct arc_io_cmd *cmd;
265 uint32_t reg, intrstat;
266
267 mutex_enter(&sc->sc_mutex);
268 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
269 if (intrstat == 0x0) {
270 mutex_exit(&sc->sc_mutex);
271 return 0;
272 }
273
274 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
275 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
276
277 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
278 if (sc->sc_talking) {
279 /* if an ioctl is talking, wake it up */
280 arc_write(sc, ARC_REG_INTRMASK,
281 ~ARC_REG_INTRMASK_POSTQUEUE);
282 cv_broadcast(&sc->sc_condvar);
283 } else {
284 /* otherwise drop it */
285 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
286 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
287 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
288 arc_write(sc, ARC_REG_INB_DOORBELL,
289 ARC_REG_INB_DOORBELL_READ_OK);
290 }
291 }
292 mutex_exit(&sc->sc_mutex);
293
294 while ((reg = arc_pop(sc)) != 0xffffffff) {
295 cmd = (struct arc_io_cmd *)(kva +
296 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
297 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
298 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
299
300 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
301 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
302 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
303
304 arc_scsi_cmd_done(sc, ccb, reg);
305 }
306
307 return 1;
308 }
309
310 void
311 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
312 {
313 struct scsipi_periph *periph;
314 struct scsipi_xfer *xs;
315 struct scsipi_adapter *adapt = chan->chan_adapter;
316 struct arc_softc *sc = device_private(adapt->adapt_dev);
317 struct arc_ccb *ccb;
318 struct arc_msg_scsicmd *cmd;
319 uint32_t reg;
320 uint8_t target;
321
322 switch (req) {
323 case ADAPTER_REQ_GROW_RESOURCES:
324 /* Not supported. */
325 return;
326 case ADAPTER_REQ_SET_XFER_MODE:
327 /* Not supported. */
328 return;
329 case ADAPTER_REQ_RUN_XFER:
330 break;
331 }
332
333 xs = arg;
334 periph = xs->xs_periph;
335 target = periph->periph_target;
336
337 if (xs->cmdlen > ARC_MSG_CDBLEN) {
338 memset(&xs->sense, 0, sizeof(xs->sense));
339 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
340 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
341 xs->sense.scsi_sense.asc = 0x20;
342 xs->error = XS_SENSE;
343 xs->status = SCSI_CHECK;
344 scsipi_done(xs);
345 return;
346 }
347
348 ccb = arc_get_ccb(sc);
349 if (ccb == NULL) {
350 xs->error = XS_RESOURCE_SHORTAGE;
351 scsipi_done(xs);
352 return;
353 }
354
355 ccb->ccb_xs = xs;
356
357 if (arc_load_xs(ccb) != 0) {
358 xs->error = XS_DRIVER_STUFFUP;
359 arc_put_ccb(sc, ccb);
360 scsipi_done(xs);
361 return;
362 }
363
364 cmd = &ccb->ccb_cmd->cmd;
365 reg = ccb->ccb_cmd_post;
366
367 /* bus is always 0 */
368 cmd->target = target;
369 cmd->lun = periph->periph_lun;
370 cmd->function = 1; /* XXX magic number */
371
372 cmd->cdb_len = xs->cmdlen;
373 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
374 if (xs->xs_control & XS_CTL_DATA_OUT)
375 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
376 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
377 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
378 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
379 }
380
381 cmd->context = htole32(ccb->ccb_id);
382 cmd->data_len = htole32(xs->datalen);
383
384 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
385
386 /* we've built the command, let's put it on the hw */
387 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
388 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
389 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
390
391 arc_push(sc, reg);
392 if (xs->xs_control & XS_CTL_POLL) {
393 if (arc_complete(sc, ccb, xs->timeout) != 0) {
394 xs->error = XS_DRIVER_STUFFUP;
395 scsipi_done(xs);
396 }
397 }
398 }
399
400 int
401 arc_load_xs(struct arc_ccb *ccb)
402 {
403 struct arc_softc *sc = ccb->ccb_sc;
404 struct scsipi_xfer *xs = ccb->ccb_xs;
405 bus_dmamap_t dmap = ccb->ccb_dmamap;
406 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
407 uint64_t addr;
408 int i, error;
409
410 if (xs->datalen == 0)
411 return 0;
412
413 error = bus_dmamap_load(sc->sc_dmat, dmap,
414 xs->data, xs->datalen, NULL,
415 (xs->xs_control & XS_CTL_NOSLEEP) ?
416 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
417 if (error != 0) {
418 aprint_error("%s: error %d loading dmamap\n",
419 device_xname(&sc->sc_dev), error);
420 return 1;
421 }
422
423 for (i = 0; i < dmap->dm_nsegs; i++) {
424 sge = &sgl[i];
425
426 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
427 addr = dmap->dm_segs[i].ds_addr;
428 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
429 sge->sg_lo_addr = htole32((uint32_t)addr);
430 }
431
432 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
433 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
434 BUS_DMASYNC_PREWRITE);
435
436 return 0;
437 }
438
439 void
440 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
441 {
442 struct scsipi_xfer *xs = ccb->ccb_xs;
443 struct arc_msg_scsicmd *cmd;
444
445 if (xs->datalen != 0) {
446 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
447 ccb->ccb_dmamap->dm_mapsize,
448 (xs->xs_control & XS_CTL_DATA_IN) ?
449 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
450 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
451 }
452
453 /* timeout_del */
454 xs->status |= XS_STS_DONE;
455
456 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
457 cmd = &ccb->ccb_cmd->cmd;
458
459 switch (cmd->status) {
460 case ARC_MSG_STATUS_SELTIMEOUT:
461 case ARC_MSG_STATUS_ABORTED:
462 case ARC_MSG_STATUS_INIT_FAIL:
463 xs->status = SCSI_OK;
464 xs->error = XS_SELTIMEOUT;
465 break;
466
467 case SCSI_CHECK:
468 memset(&xs->sense, 0, sizeof(xs->sense));
469 memcpy(&xs->sense, cmd->sense_data,
470 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
471 xs->sense.scsi_sense.response_code =
472 SSD_RCODE_VALID | 0x70;
473 xs->status = SCSI_CHECK;
474 xs->error = XS_SENSE;
475 xs->resid = 0;
476 break;
477
478 default:
479 /* unknown device status */
480 xs->error = XS_BUSY; /* try again later? */
481 xs->status = SCSI_BUSY;
482 break;
483 }
484 } else {
485 xs->status = SCSI_OK;
486 xs->error = XS_NOERROR;
487 xs->resid = 0;
488 }
489
490 arc_put_ccb(sc, ccb);
491 scsipi_done(xs);
492 }
493
494 int
495 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
496 {
497 struct arc_ccb *ccb = NULL;
498 char *kva = ARC_DMA_KVA(sc->sc_requests);
499 struct arc_io_cmd *cmd;
500 uint32_t reg;
501
502 do {
503 reg = arc_pop(sc);
504 if (reg == 0xffffffff) {
505 if (timeout-- == 0)
506 return 1;
507
508 delay(1000);
509 continue;
510 }
511
512 cmd = (struct arc_io_cmd *)(kva +
513 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
514 ARC_DMA_DVA(sc->sc_requests)));
515 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
516
517 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
518 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
519 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
520
521 arc_scsi_cmd_done(sc, ccb, reg);
522 } while (nccb != ccb);
523
524 return 0;
525 }
526
527 int
528 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
529 {
530 pcireg_t memtype;
531 pci_intr_handle_t ih;
532
533 sc->sc_pc = pa->pa_pc;
534 sc->sc_tag = pa->pa_tag;
535 sc->sc_dmat = pa->pa_dmat;
536
537 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
538 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
539 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
540 aprint_error(": unable to map system interface register\n");
541 return 1;
542 }
543
544 if (pci_intr_map(pa, &ih) != 0) {
545 aprint_error(": unable to map interrupt\n");
546 goto unmap;
547 }
548
549 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
550 arc_intr, sc);
551 if (sc->sc_ih == NULL) {
552 aprint_error(": unable to map interrupt [2]\n");
553 goto unmap;
554 }
555 aprint_normal(": interrupting at %s\n",
556 pci_intr_string(pa->pa_pc, ih));
557
558 return 0;
559
560 unmap:
561 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
562 sc->sc_ios = 0;
563 return 1;
564 }
565
566 void
567 arc_unmap_pci_resources(struct arc_softc *sc)
568 {
569 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
570 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
571 sc->sc_ios = 0;
572 }
573
574 int
575 arc_query_firmware(struct arc_softc *sc)
576 {
577 struct arc_msg_firmware_info fwinfo;
578 char string[81]; /* sizeof(vendor)*2+1 */
579
580 mutex_enter(&sc->sc_mutex);
581 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
582 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
583 aprint_debug("%s: timeout waiting for firmware ok\n",
584 device_xname(&sc->sc_dev));
585 mutex_enter(&sc->sc_mutex);
586 return 1;
587 }
588
589 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
590 aprint_debug("%s: timeout waiting for get config\n",
591 device_xname(&sc->sc_dev));
592 mutex_exit(&sc->sc_mutex);
593 return 1;
594 }
595
596 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
597 aprint_debug("%s: timeout waiting to start bg rebuild\n",
598 device_xname(&sc->sc_dev));
599 mutex_exit(&sc->sc_mutex);
600 return 1;
601 }
602 mutex_exit(&sc->sc_mutex);
603
604 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
605
606 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
607 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
608
609 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
610 aprint_error("%s: invalid firmware info from iop\n",
611 device_xname(&sc->sc_dev));
612 return 1;
613 }
614
615 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
616 device_xname(&sc->sc_dev),
617 htole32(fwinfo.request_len));
618 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
619 device_xname(&sc->sc_dev),
620 htole32(fwinfo.queue_len));
621 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
622 device_xname(&sc->sc_dev),
623 htole32(fwinfo.sdram_size));
624 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
625 device_xname(&sc->sc_dev),
626 htole32(fwinfo.sata_ports));
627
628 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
629 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
630 device_xname(&sc->sc_dev), string);
631
632 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
633
634 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
635 device_xname(&sc->sc_dev), string);
636
637 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
638 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
639 device_xname(&sc->sc_dev), string);
640
641 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
642 aprint_error("%s: unexpected request frame size (%d != %d)\n",
643 device_xname(&sc->sc_dev),
644 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
645 return 1;
646 }
647
648 sc->sc_req_count = htole32(fwinfo.queue_len);
649
650 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
651 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
652 htole32(fwinfo.sdram_size), string);
653
654 return 0;
655 }
656
657 #if NBIO > 0
658 static int
659 arc_bioctl(struct device *self, u_long cmd, void *addr)
660 {
661 struct arc_softc *sc = device_private(self);
662 int error = 0;
663
664 switch (cmd) {
665 case BIOCINQ:
666 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
667 break;
668
669 case BIOCVOL:
670 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
671 break;
672
673 case BIOCDISK:
674 error = arc_bio_disk(sc, (struct bioc_disk *)addr);
675 break;
676
677 case BIOCALARM:
678 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
679 break;
680
681 default:
682 error = ENOTTY;
683 break;
684 }
685
686 return error;
687 }
688
689 static int
690 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
691 {
692 uint8_t request[2], reply[1];
693 size_t len;
694 int error = 0;
695
696 switch (ba->ba_opcode) {
697 case BIOC_SAENABLE:
698 case BIOC_SADISABLE:
699 request[0] = ARC_FW_SET_ALARM;
700 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
701 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
702 len = sizeof(request);
703
704 break;
705
706 case BIOC_SASILENCE:
707 request[0] = ARC_FW_MUTE_ALARM;
708 len = 1;
709
710 break;
711
712 case BIOC_GASTATUS:
713 /* system info is too big/ugly to deal with here */
714 return arc_bio_alarm_state(sc, ba);
715
716 default:
717 return EOPNOTSUPP;
718 }
719
720 arc_lock(sc);
721 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
722 arc_unlock(sc);
723
724 if (error != 0)
725 return error;
726
727 switch (reply[0]) {
728 case ARC_FW_CMD_OK:
729 return 0;
730 case ARC_FW_CMD_PASS_REQD:
731 return EPERM;
732 default:
733 return EIO;
734 }
735 }
736
737 static int
738 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
739 {
740 uint8_t request = ARC_FW_SYSINFO;
741 struct arc_fw_sysinfo *sysinfo;
742 int error = 0;
743
744 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
745 M_WAITOK|M_ZERO);
746
747 request = ARC_FW_SYSINFO;
748
749 arc_lock(sc);
750 error = arc_msgbuf(sc, &request, sizeof(request),
751 sysinfo, sizeof(struct arc_fw_sysinfo));
752 arc_unlock(sc);
753
754 if (error != 0)
755 goto out;
756
757 ba->ba_status = sysinfo->alarm;
758
759 out:
760 free(sysinfo, M_DEVBUF);
761 return error;
762 }
763
764
765 static int
766 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
767 {
768 uint8_t request[2];
769 struct arc_fw_sysinfo *sysinfo;
770 struct arc_fw_volinfo *volinfo;
771 int maxvols, nvols = 0, i;
772 int error = 0;
773
774 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
775 M_WAITOK|M_ZERO);
776 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
777 M_WAITOK|M_ZERO);
778
779 arc_lock(sc);
780
781 request[0] = ARC_FW_SYSINFO;
782 error = arc_msgbuf(sc, request, 1, sysinfo,
783 sizeof(struct arc_fw_sysinfo));
784 if (error != 0)
785 goto out;
786
787 maxvols = sysinfo->max_volume_set;
788
789 request[0] = ARC_FW_VOLINFO;
790 for (i = 0; i < maxvols; i++) {
791 request[1] = i;
792 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
793 sizeof(struct arc_fw_volinfo));
794 if (error != 0)
795 goto out;
796
797 /*
798 * I can't find an easy way to see if the volume exists or not
799 * except to say that if it has no capacity then it isn't there.
800 * Ignore passthru volumes, bioc_vol doesn't understand them.
801 */
802 if ((volinfo->capacity != 0 || volinfo->capacity2 != 0) &&
803 volinfo->raid_level != ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
804 nvols++;
805 }
806
807 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
808 bi->bi_novol = nvols;
809 out:
810 arc_unlock(sc);
811 free(volinfo, M_DEVBUF);
812 free(sysinfo, M_DEVBUF);
813 return error;
814 }
815
816 static int
817 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
818 {
819 uint8_t request[2];
820 struct arc_fw_sysinfo *sysinfo;
821 int error = 0;
822 int maxvols, nvols = 0, i;
823
824 sysinfo = malloc(sizeof(struct arc_fw_sysinfo), M_DEVBUF,
825 M_WAITOK|M_ZERO);
826
827 request[0] = ARC_FW_SYSINFO;
828 error = arc_msgbuf(sc, request, 1, sysinfo,
829 sizeof(struct arc_fw_sysinfo));
830 if (error != 0)
831 goto out;
832
833 maxvols = sysinfo->max_volume_set;
834
835 request[0] = ARC_FW_VOLINFO;
836 for (i = 0; i < maxvols; i++) {
837 request[1] = i;
838 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
839 sizeof(struct arc_fw_volinfo));
840 if (error != 0)
841 goto out;
842
843 if ((volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
844 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU)
845 continue;
846
847 if (nvols == vol)
848 break;
849
850 nvols++;
851 }
852
853 if (nvols != vol ||
854 (volinfo->capacity == 0 && volinfo->capacity2 == 0) ||
855 volinfo->raid_level == ARC_FW_VOL_RAIDLEVEL_PASSTHRU) {
856 error = ENODEV;
857 goto out;
858 }
859
860 out:
861 free(sysinfo, M_DEVBUF);
862 return error;
863 }
864
865 static int
866 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
867 {
868 struct arc_fw_volinfo *volinfo;
869 uint64_t blocks;
870 uint32_t status;
871 int error = 0;
872
873 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
874 M_WAITOK|M_ZERO);
875
876 arc_lock(sc);
877 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
878 arc_unlock(sc);
879
880 if (error != 0)
881 goto out;
882
883 bv->bv_percent = -1;
884 bv->bv_seconds = 0;
885
886 status = htole32(volinfo->volume_status);
887 if (status == 0x0) {
888 if (htole32(volinfo->fail_mask) == 0x0)
889 bv->bv_status = BIOC_SVONLINE;
890 else
891 bv->bv_status = BIOC_SVDEGRADED;
892 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
893 bv->bv_status = BIOC_SVDEGRADED;
894 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
895 bv->bv_status = BIOC_SVOFFLINE;
896 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
897 bv->bv_status = BIOC_SVBUILDING;
898 bv->bv_percent = htole32(volinfo->progress) / 10;
899 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
900 bv->bv_status = BIOC_SVREBUILD;
901 bv->bv_percent = htole32(volinfo->progress) / 10;
902 }
903
904 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
905 blocks += (uint64_t)htole32(volinfo->capacity);
906 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
907
908 switch (volinfo->raid_level) {
909 case ARC_FW_VOL_RAIDLEVEL_0:
910 bv->bv_level = 0;
911 break;
912 case ARC_FW_VOL_RAIDLEVEL_1:
913 bv->bv_level = 1;
914 break;
915 case ARC_FW_VOL_RAIDLEVEL_3:
916 bv->bv_level = 3;
917 break;
918 case ARC_FW_VOL_RAIDLEVEL_5:
919 bv->bv_level = 5;
920 break;
921 case ARC_FW_VOL_RAIDLEVEL_6:
922 bv->bv_level = 6;
923 break;
924 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
925 default:
926 bv->bv_level = -1;
927 break;
928 }
929
930 bv->bv_nodisk = volinfo->member_disks;
931 strlcpy(bv->bv_dev, volinfo->set_name, sizeof(bv->bv_dev));
932
933 out:
934 free(volinfo, M_DEVBUF);
935 return error;
936 }
937
938 static int
939 arc_bio_disk(struct arc_softc *sc, struct bioc_disk *bd)
940 {
941 uint8_t request[2];
942 struct arc_fw_volinfo *volinfo;
943 struct arc_fw_raidinfo *raidinfo;
944 struct arc_fw_diskinfo *diskinfo;
945 int error = 0;
946 uint64_t blocks;
947 char model[81];
948 char serial[41];
949 char rev[17];
950
951 volinfo = malloc(sizeof(struct arc_fw_volinfo), M_DEVBUF,
952 M_WAITOK|M_ZERO);
953 raidinfo = malloc(sizeof(struct arc_fw_raidinfo), M_DEVBUF,
954 M_WAITOK|M_ZERO);
955 diskinfo = malloc(sizeof(struct arc_fw_diskinfo), M_DEVBUF,
956 M_WAITOK|M_ZERO);
957
958 arc_lock(sc);
959
960 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
961 if (error != 0)
962 goto out;
963
964 request[0] = ARC_FW_RAIDINFO;
965 request[1] = volinfo->raid_set_number;
966 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
967 sizeof(struct arc_fw_raidinfo));
968 if (error != 0)
969 goto out;
970
971 if (bd->bd_diskid > raidinfo->member_devices) {
972 error = ENODEV;
973 goto out;
974 }
975
976 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
977 /*
978 * the disk doesn't exist anymore. bio is too dumb to be
979 * able to display that, so put it on another bus
980 */
981 bd->bd_channel = 1;
982 bd->bd_target = 0;
983 bd->bd_lun = 0;
984 bd->bd_status = BIOC_SDOFFLINE;
985 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
986 goto out;
987 }
988
989 request[0] = ARC_FW_DISKINFO;
990 request[1] = raidinfo->device_array[bd->bd_diskid];
991 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
992 sizeof(struct arc_fw_diskinfo));
993 if (error != 0)
994 goto out;
995
996 #if 0
997 bd->bd_channel = diskinfo->scsi_attr.channel;
998 bd->bd_target = diskinfo->scsi_attr.target;
999 bd->bd_lun = diskinfo->scsi_attr.lun;
1000 #endif
1001 /*
1002 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1003 * the diskid.
1004 */
1005 bd->bd_channel = 0;
1006 bd->bd_target = raidinfo->device_array[bd->bd_diskid];
1007 bd->bd_lun = 0;
1008
1009 bd->bd_status = BIOC_SDONLINE;
1010 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1011 blocks += (uint64_t)htole32(diskinfo->capacity);
1012 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1013
1014 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1015 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1016 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1017 sizeof(diskinfo->firmware_rev));
1018
1019 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1020 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1021
1022 out:
1023 arc_unlock(sc);
1024 free(diskinfo, M_DEVBUF);
1025 free(raidinfo, M_DEVBUF);
1026 free(volinfo, M_DEVBUF);
1027 return error;
1028 }
1029 #endif /* NBIO > 0 */
1030
1031 uint8_t
1032 arc_msg_cksum(void *cmd, uint16_t len)
1033 {
1034 uint8_t *buf = cmd;
1035 uint8_t cksum;
1036 int i;
1037
1038 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1039 for (i = 0; i < len; i++)
1040 cksum += buf[i];
1041
1042 return cksum;
1043 }
1044
1045
1046 int
1047 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1048 size_t rbuflen)
1049 {
1050 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1051 uint8_t *wbuf, *rbuf;
1052 int wlen, wdone = 0, rlen, rdone = 0;
1053 struct arc_fw_bufhdr *bufhdr;
1054 uint32_t reg, rwlen;
1055 int error = 0;
1056 #ifdef ARC_DEBUG
1057 int i;
1058 #endif
1059
1060 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1061 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1062
1063 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0)
1064 return EBUSY;
1065
1066 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1067 wbuf = malloc(wlen, M_TEMP, M_WAITOK);
1068
1069 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1070 rbuf = malloc(rlen, M_TEMP, M_WAITOK);
1071
1072 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1073 device_xname(&sc->sc_dev), wlen, rlen);
1074
1075 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1076 bufhdr->hdr = arc_fw_hdr;
1077 bufhdr->len = htole16(wbuflen);
1078 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1079 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1080
1081 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1082
1083 do {
1084 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1085 memset(rwbuf, 0, sizeof(rwbuf));
1086 rwlen = (wlen - wdone) % sizeof(rwbuf);
1087 memcpy(rwbuf, &wbuf[wdone], rwlen);
1088
1089 #ifdef ARC_DEBUG
1090 if (arcdebug & ARC_D_DB) {
1091 printf("%s: write %d:",
1092 device_xname(&sc->sc_dev), rwlen);
1093 for (i = 0; i < rwlen; i++)
1094 printf(" 0x%02x", rwbuf[i]);
1095 printf("\n");
1096 }
1097 #endif
1098
1099 /* copy the chunk to the hw */
1100 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1101 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1102 sizeof(rwbuf));
1103
1104 /* say we have a buffer for the hw */
1105 arc_write(sc, ARC_REG_INB_DOORBELL,
1106 ARC_REG_INB_DOORBELL_WRITE_OK);
1107
1108 wdone += rwlen;
1109 }
1110
1111 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1112 arc_wait(sc);
1113 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1114
1115 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1116 device_xname(&sc->sc_dev), reg);
1117
1118 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1119 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1120 if (rwlen > sizeof(rwbuf)) {
1121 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1122 device_xname(&sc->sc_dev));
1123 error = EIO;
1124 goto out;
1125 }
1126
1127 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1128 sizeof(rwbuf));
1129
1130 arc_write(sc, ARC_REG_INB_DOORBELL,
1131 ARC_REG_INB_DOORBELL_READ_OK);
1132
1133 #ifdef ARC_DEBUG
1134 printf("%s: len: %d+%d=%d/%d\n",
1135 device_xname(&sc->sc_dev),
1136 rwlen, rdone, rwlen + rdone, rlen);
1137 if (arcdebug & ARC_D_DB) {
1138 printf("%s: read:",
1139 device_xname(&sc->sc_dev));
1140 for (i = 0; i < rwlen; i++)
1141 printf(" 0x%02x", rwbuf[i]);
1142 printf("\n");
1143 }
1144 #endif
1145
1146 if ((rdone + rwlen) > rlen) {
1147 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1148 device_xname(&sc->sc_dev));
1149 error = EIO;
1150 goto out;
1151 }
1152
1153 memcpy(&rbuf[rdone], rwbuf, rwlen);
1154 rdone += rwlen;
1155 }
1156 } while (rdone != rlen);
1157
1158 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1159 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1160 bufhdr->len != htole16(rbuflen)) {
1161 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1162 device_xname(&sc->sc_dev));
1163 error = EIO;
1164 goto out;
1165 }
1166
1167 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1168
1169 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1170 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1171 device_xname(&sc->sc_dev));
1172 error = EIO;
1173 goto out;
1174 }
1175
1176 out:
1177 free(wbuf, M_TEMP);
1178 free(rbuf, M_TEMP);
1179
1180 return error;
1181 }
1182
1183 void
1184 arc_lock(struct arc_softc *sc)
1185 {
1186 rw_enter(&sc->sc_rwlock, RW_WRITER);
1187 mutex_enter(&sc->sc_mutex);
1188 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1189 sc->sc_talking = 1;
1190 }
1191
1192 void
1193 arc_unlock(struct arc_softc *sc)
1194 {
1195 KASSERT(mutex_owned(&sc->sc_mutex));
1196
1197 sc->sc_talking = 0;
1198 arc_write(sc, ARC_REG_INTRMASK,
1199 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1200 mutex_exit(&sc->sc_mutex);
1201 rw_exit(&sc->sc_rwlock);
1202 }
1203
1204 void
1205 arc_wait(struct arc_softc *sc)
1206 {
1207 KASSERT(mutex_owned(&sc->sc_mutex));
1208
1209 arc_write(sc, ARC_REG_INTRMASK,
1210 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1211 if (cv_timedwait_sig(&sc->sc_condvar, &sc->sc_mutex, hz) ==
1212 EWOULDBLOCK)
1213 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1214 }
1215
1216 #if NBIO > 0
1217 static void
1218 arc_create_sensors(void *arg)
1219 {
1220 struct arc_softc *sc = arg;
1221 struct bioc_inq bi;
1222 struct bioc_vol bv;
1223 int i;
1224
1225 memset(&bi, 0, sizeof(bi));
1226 if (arc_bio_inq(sc, &bi) != 0) {
1227 aprint_error("%s: unable to query firmware for sensor info\n",
1228 device_xname(&sc->sc_dev));
1229 kthread_exit(0);
1230 }
1231
1232 sc->sc_nsensors = bi.bi_novol;
1233 /*
1234 * There's no point to continue if there are no drives connected...
1235 */
1236 if (!sc->sc_nsensors)
1237 kthread_exit(0);
1238
1239 sc->sc_sme = sysmon_envsys_create();
1240 sc->sc_sensors = malloc(sizeof(envsys_data_t) * sc->sc_nsensors,
1241 M_DEVBUF, M_WAITOK | M_ZERO);
1242
1243 for (i = 0; i < sc->sc_nsensors; i++) {
1244 memset(&bv, 0, sizeof(bv));
1245 bv.bv_volid = i;
1246 if (arc_bio_vol(sc, &bv) != 0)
1247 goto bad;
1248
1249 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1250 sc->sc_sensors[i].monitor = true;
1251 sc->sc_sensors[i].flags = ENVSYS_FMONSTCHANGED;
1252 strlcpy(sc->sc_sensors[i].desc, bv.bv_dev,
1253 sizeof(sc->sc_sensors[i].desc));
1254 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1255 goto bad;
1256 }
1257
1258 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1259 sc->sc_sme->sme_cookie = sc;
1260 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1261 if (sysmon_envsys_register(sc->sc_sme)) {
1262 aprint_debug("%s: unable to register with sysmon\n",
1263 device_xname(&sc->sc_dev));
1264 goto bad;
1265 }
1266 kthread_exit(0);
1267
1268 bad:
1269 free(sc->sc_sensors, M_DEVBUF);
1270 sysmon_envsys_destroy(sc->sc_sme);
1271 kthread_exit(0);
1272 }
1273
1274 static void
1275 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1276 {
1277 struct arc_softc *sc = sme->sme_cookie;
1278 struct bioc_vol bv;
1279
1280 memset(&bv, 0, sizeof(bv));
1281 bv.bv_volid = edata->sensor;
1282
1283 if (arc_bio_vol(sc, &bv)) {
1284 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1285 edata->state = ENVSYS_SINVALID;
1286 return;
1287 }
1288
1289 switch (bv.bv_status) {
1290 case BIOC_SVOFFLINE:
1291 edata->value_cur = ENVSYS_DRIVE_FAIL;
1292 edata->state = ENVSYS_SCRITICAL;
1293 break;
1294 case BIOC_SVDEGRADED:
1295 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1296 edata->state = ENVSYS_SCRITICAL;
1297 break;
1298 case BIOC_SVBUILDING:
1299 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1300 edata->state = ENVSYS_SVALID;
1301 break;
1302 case BIOC_SVSCRUB:
1303 case BIOC_SVONLINE:
1304 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1305 edata->state = ENVSYS_SVALID;
1306 break;
1307 case BIOC_SVINVALID:
1308 /* FALLTRHOUGH */
1309 default:
1310 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1311 edata->state = ENVSYS_SINVALID;
1312 break;
1313 }
1314 }
1315 #endif /* NBIO > 0 */
1316
1317 uint32_t
1318 arc_read(struct arc_softc *sc, bus_size_t r)
1319 {
1320 uint32_t v;
1321
1322 KASSERT(mutex_owned(&sc->sc_mutex));
1323
1324 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1325 BUS_SPACE_BARRIER_READ);
1326 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1327
1328 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1329 device_xname(&sc->sc_dev), r, v);
1330
1331 return v;
1332 }
1333
1334 void
1335 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1336 {
1337 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1338 BUS_SPACE_BARRIER_READ);
1339 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1340 (uint32_t *)buf, len >> 2);
1341 }
1342
1343 void
1344 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1345 {
1346 KASSERT(mutex_owned(&sc->sc_mutex));
1347
1348 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1349 device_xname(&sc->sc_dev), r, v);
1350
1351 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1352 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1353 BUS_SPACE_BARRIER_WRITE);
1354 }
1355
1356 void
1357 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1358 {
1359 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1360 (const uint32_t *)buf, len >> 2);
1361 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1362 BUS_SPACE_BARRIER_WRITE);
1363 }
1364
1365 int
1366 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1367 uint32_t target)
1368 {
1369 int i;
1370
1371 KASSERT(mutex_owned(&sc->sc_mutex));
1372
1373 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1374 device_xname(&sc->sc_dev), r, mask, target);
1375
1376 for (i = 0; i < 10000; i++) {
1377 if ((arc_read(sc, r) & mask) == target)
1378 return 0;
1379 delay(1000);
1380 }
1381
1382 return 1;
1383 }
1384
1385 int
1386 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1387 uint32_t target)
1388 {
1389 int i;
1390
1391 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1392 device_xname(&sc->sc_dev), r, mask, target);
1393
1394 for (i = 0; i < 10000; i++) {
1395 if ((arc_read(sc, r) & mask) != target)
1396 return 0;
1397 delay(1000);
1398 }
1399
1400 return 1;
1401 }
1402
1403 int
1404 arc_msg0(struct arc_softc *sc, uint32_t m)
1405 {
1406 KASSERT(mutex_owned(&sc->sc_mutex));
1407
1408 /* post message */
1409 arc_write(sc, ARC_REG_INB_MSG0, m);
1410 /* wait for the fw to do it */
1411 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1412 ARC_REG_INTRSTAT_MSG0) != 0)
1413 return 1;
1414
1415 /* ack it */
1416 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1417
1418 return 0;
1419 }
1420
1421 struct arc_dmamem *
1422 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1423 {
1424 struct arc_dmamem *adm;
1425 int nsegs;
1426
1427 adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT|M_ZERO);
1428 if (adm == NULL)
1429 return NULL;
1430
1431 adm->adm_size = size;
1432
1433 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1434 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1435 goto admfree;
1436
1437 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1438 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1439 goto destroy;
1440
1441 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1442 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1443 goto free;
1444
1445 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1446 NULL, BUS_DMA_NOWAIT) != 0)
1447 goto unmap;
1448
1449 memset(adm->adm_kva, 0, size);
1450
1451 return adm;
1452
1453 unmap:
1454 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1455 free:
1456 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1457 destroy:
1458 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1459 admfree:
1460 free(adm, M_DEVBUF);
1461
1462 return NULL;
1463 }
1464
1465 void
1466 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1467 {
1468 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1469 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1470 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1471 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1472 free(adm, M_DEVBUF);
1473 }
1474
1475 int
1476 arc_alloc_ccbs(struct arc_softc *sc)
1477 {
1478 struct arc_ccb *ccb;
1479 uint8_t *cmd;
1480 int i;
1481
1482 TAILQ_INIT(&sc->sc_ccb_free);
1483
1484 sc->sc_ccbs = malloc(sizeof(struct arc_ccb) * sc->sc_req_count,
1485 M_DEVBUF, M_WAITOK|M_ZERO);
1486
1487 sc->sc_requests = arc_dmamem_alloc(sc,
1488 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1489 if (sc->sc_requests == NULL) {
1490 aprint_error("%s: unable to allocate ccb dmamem\n",
1491 device_xname(&sc->sc_dev));
1492 goto free_ccbs;
1493 }
1494 cmd = ARC_DMA_KVA(sc->sc_requests);
1495
1496 for (i = 0; i < sc->sc_req_count; i++) {
1497 ccb = &sc->sc_ccbs[i];
1498
1499 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1500 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1501 aprint_error("%s: unable to create dmamap for ccb %d\n",
1502 device_xname(&sc->sc_dev), i);
1503 goto free_maps;
1504 }
1505
1506 ccb->ccb_sc = sc;
1507 ccb->ccb_id = i;
1508 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1509
1510 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1511 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1512 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1513
1514 arc_put_ccb(sc, ccb);
1515 }
1516
1517 return 0;
1518
1519 free_maps:
1520 while ((ccb = arc_get_ccb(sc)) != NULL)
1521 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1522 arc_dmamem_free(sc, sc->sc_requests);
1523
1524 free_ccbs:
1525 free(sc->sc_ccbs, M_DEVBUF);
1526
1527 return 1;
1528 }
1529
1530 struct arc_ccb *
1531 arc_get_ccb(struct arc_softc *sc)
1532 {
1533 struct arc_ccb *ccb;
1534
1535 mutex_enter(&sc->sc_mutex);
1536 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1537 if (ccb != NULL)
1538 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1539 mutex_exit(&sc->sc_mutex);
1540
1541 return ccb;
1542 }
1543
1544 void
1545 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1546 {
1547 mutex_enter(&sc->sc_mutex);
1548 ccb->ccb_xs = NULL;
1549 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1550 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1551 mutex_exit(&sc->sc_mutex);
1552 }
1553