arcmsr.c revision 1.18 1 /* $NetBSD: arcmsr.c,v 1.18 2008/03/03 14:57:22 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.18 2008/03/03 14:57:22 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
176 panic("%s: couldn't establish shutdown handler\n",
177 device_xname(self));
178
179 memset(adapt, 0, sizeof(*adapt));
180 adapt->adapt_dev = self;
181 adapt->adapt_nchannels = 1;
182 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
183 adapt->adapt_max_periph = adapt->adapt_openings;
184 adapt->adapt_minphys = arc_minphys;
185 adapt->adapt_request = arc_scsi_cmd;
186
187 memset(chan, 0, sizeof(*chan));
188 chan->chan_adapter = adapt;
189 chan->chan_bustype = &scsi_bustype;
190 chan->chan_nluns = ARC_MAX_LUN;
191 chan->chan_ntargets = ARC_MAX_TARGET;
192 chan->chan_id = ARC_MAX_TARGET;
193 chan->chan_channel = 0;
194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195
196 /*
197 * Save the device_t returned, because we could to attach
198 * devices via the management interface.
199 */
200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201
202 /* enable interrupts */
203 arc_write(sc, ARC_REG_INTRMASK,
204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205
206 #if NBIO > 0
207 /*
208 * Register the driver to bio(4) and setup the sensors.
209 */
210 if (bio_register(self, arc_bioctl) != 0)
211 panic("%s: bioctl registration failed\n", device_xname(self));
212
213 /*
214 * you need to talk to the firmware to get volume info. our firmware
215 * interface relies on being able to sleep, so we need to use a thread
216 * to do the work.
217 */
218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 panic("%s: unable to create a kernel thread for sensors\n",
221 device_xname(self));
222 #endif
223
224 return;
225
226 unmap_pci:
227 arc_unmap_pci_resources(sc);
228 }
229
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 struct arc_softc *sc = device_private(self);
234
235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
236 aprint_error("%s: timeout waiting to stop bg rebuild\n",
237 device_xname(&sc->sc_dev));
238
239 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
240 aprint_error("%s: timeout waiting to flush cache\n",
241 device_xname(&sc->sc_dev));
242
243 return 0;
244 }
245
246 static bool
247 arc_shutdown(device_t self, int how)
248 {
249 struct arc_softc *sc = device_private(self);
250
251 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
252 aprint_error("%s: timeout waiting to stop bg rebuild\n",
253 device_xname(&sc->sc_dev));
254
255 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
256 aprint_error("%s: timeout waiting to flush cache\n",
257 device_xname(&sc->sc_dev));
258
259 return true;
260 }
261
262 static void
263 arc_minphys(struct buf *bp)
264 {
265 if (bp->b_bcount > MAXPHYS)
266 bp->b_bcount = MAXPHYS;
267 minphys(bp);
268 }
269
270 static int
271 arc_intr(void *arg)
272 {
273 struct arc_softc *sc = arg;
274 struct arc_ccb *ccb = NULL;
275 char *kva = ARC_DMA_KVA(sc->sc_requests);
276 struct arc_io_cmd *cmd;
277 uint32_t reg, intrstat;
278
279 mutex_spin_enter(&sc->sc_mutex);
280 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
281 if (intrstat == 0x0) {
282 mutex_spin_exit(&sc->sc_mutex);
283 return 0;
284 }
285
286 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
287 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
288
289 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
290 if (sc->sc_talking) {
291 arc_write(sc, ARC_REG_INTRMASK,
292 ~ARC_REG_INTRMASK_POSTQUEUE);
293 cv_broadcast(&sc->sc_condvar);
294 } else {
295 /* otherwise drop it */
296 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
297 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
298 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
299 arc_write(sc, ARC_REG_INB_DOORBELL,
300 ARC_REG_INB_DOORBELL_READ_OK);
301 }
302 }
303 mutex_spin_exit(&sc->sc_mutex);
304
305 while ((reg = arc_pop(sc)) != 0xffffffff) {
306 cmd = (struct arc_io_cmd *)(kva +
307 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
308 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
309 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
310
311 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
312 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314
315 arc_scsi_cmd_done(sc, ccb, reg);
316 }
317
318
319 return 1;
320 }
321
322 void
323 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
324 {
325 struct scsipi_periph *periph;
326 struct scsipi_xfer *xs;
327 struct scsipi_adapter *adapt = chan->chan_adapter;
328 struct arc_softc *sc = device_private(adapt->adapt_dev);
329 struct arc_ccb *ccb;
330 struct arc_msg_scsicmd *cmd;
331 uint32_t reg;
332 uint8_t target;
333
334 switch (req) {
335 case ADAPTER_REQ_GROW_RESOURCES:
336 /* Not supported. */
337 return;
338 case ADAPTER_REQ_SET_XFER_MODE:
339 /* Not supported. */
340 return;
341 case ADAPTER_REQ_RUN_XFER:
342 break;
343 }
344
345 mutex_spin_enter(&sc->sc_mutex);
346
347 xs = arg;
348 periph = xs->xs_periph;
349 target = periph->periph_target;
350
351 if (xs->cmdlen > ARC_MSG_CDBLEN) {
352 memset(&xs->sense, 0, sizeof(xs->sense));
353 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
354 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
355 xs->sense.scsi_sense.asc = 0x20;
356 xs->error = XS_SENSE;
357 xs->status = SCSI_CHECK;
358 mutex_spin_exit(&sc->sc_mutex);
359 scsipi_done(xs);
360 return;
361 }
362
363 ccb = arc_get_ccb(sc);
364 if (ccb == NULL) {
365 xs->error = XS_RESOURCE_SHORTAGE;
366 mutex_spin_exit(&sc->sc_mutex);
367 scsipi_done(xs);
368 return;
369 }
370
371 ccb->ccb_xs = xs;
372
373 if (arc_load_xs(ccb) != 0) {
374 xs->error = XS_DRIVER_STUFFUP;
375 arc_put_ccb(sc, ccb);
376 mutex_spin_exit(&sc->sc_mutex);
377 scsipi_done(xs);
378 return;
379 }
380
381 cmd = &ccb->ccb_cmd->cmd;
382 reg = ccb->ccb_cmd_post;
383
384 /* bus is always 0 */
385 cmd->target = target;
386 cmd->lun = periph->periph_lun;
387 cmd->function = 1; /* XXX magic number */
388
389 cmd->cdb_len = xs->cmdlen;
390 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
391 if (xs->xs_control & XS_CTL_DATA_OUT)
392 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
393 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
394 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
395 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
396 }
397
398 cmd->context = htole32(ccb->ccb_id);
399 cmd->data_len = htole32(xs->datalen);
400
401 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
402
403 /* we've built the command, let's put it on the hw */
404 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
405 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
406 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
407
408 arc_push(sc, reg);
409 if (xs->xs_control & XS_CTL_POLL) {
410 if (arc_complete(sc, ccb, xs->timeout) != 0) {
411 xs->error = XS_DRIVER_STUFFUP;
412 mutex_spin_exit(&sc->sc_mutex);
413 scsipi_done(xs);
414 return;
415 }
416 }
417
418 mutex_spin_exit(&sc->sc_mutex);
419 }
420
421 int
422 arc_load_xs(struct arc_ccb *ccb)
423 {
424 struct arc_softc *sc = ccb->ccb_sc;
425 struct scsipi_xfer *xs = ccb->ccb_xs;
426 bus_dmamap_t dmap = ccb->ccb_dmamap;
427 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
428 uint64_t addr;
429 int i, error;
430
431 if (xs->datalen == 0)
432 return 0;
433
434 error = bus_dmamap_load(sc->sc_dmat, dmap,
435 xs->data, xs->datalen, NULL,
436 (xs->xs_control & XS_CTL_NOSLEEP) ?
437 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
438 if (error != 0) {
439 aprint_error("%s: error %d loading dmamap\n",
440 device_xname(&sc->sc_dev), error);
441 return 1;
442 }
443
444 for (i = 0; i < dmap->dm_nsegs; i++) {
445 sge = &sgl[i];
446
447 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
448 addr = dmap->dm_segs[i].ds_addr;
449 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
450 sge->sg_lo_addr = htole32((uint32_t)addr);
451 }
452
453 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
454 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
455 BUS_DMASYNC_PREWRITE);
456
457 return 0;
458 }
459
460 void
461 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
462 {
463 struct scsipi_xfer *xs = ccb->ccb_xs;
464 struct arc_msg_scsicmd *cmd;
465
466 if (xs->datalen != 0) {
467 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
468 ccb->ccb_dmamap->dm_mapsize,
469 (xs->xs_control & XS_CTL_DATA_IN) ?
470 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
471 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
472 }
473
474 /* timeout_del */
475 xs->status |= XS_STS_DONE;
476
477 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
478 cmd = &ccb->ccb_cmd->cmd;
479
480 switch (cmd->status) {
481 case ARC_MSG_STATUS_SELTIMEOUT:
482 case ARC_MSG_STATUS_ABORTED:
483 case ARC_MSG_STATUS_INIT_FAIL:
484 xs->status = SCSI_OK;
485 xs->error = XS_SELTIMEOUT;
486 break;
487
488 case SCSI_CHECK:
489 memset(&xs->sense, 0, sizeof(xs->sense));
490 memcpy(&xs->sense, cmd->sense_data,
491 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
492 xs->sense.scsi_sense.response_code =
493 SSD_RCODE_VALID | 0x70;
494 xs->status = SCSI_CHECK;
495 xs->error = XS_SENSE;
496 xs->resid = 0;
497 break;
498
499 default:
500 /* unknown device status */
501 xs->error = XS_BUSY; /* try again later? */
502 xs->status = SCSI_BUSY;
503 break;
504 }
505 } else {
506 xs->status = SCSI_OK;
507 xs->error = XS_NOERROR;
508 xs->resid = 0;
509 }
510
511 arc_put_ccb(sc, ccb);
512 scsipi_done(xs);
513 }
514
515 int
516 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
517 {
518 struct arc_ccb *ccb = NULL;
519 char *kva = ARC_DMA_KVA(sc->sc_requests);
520 struct arc_io_cmd *cmd;
521 uint32_t reg;
522
523 do {
524 reg = arc_pop(sc);
525 if (reg == 0xffffffff) {
526 if (timeout-- == 0)
527 return 1;
528
529 delay(1000);
530 continue;
531 }
532
533 cmd = (struct arc_io_cmd *)(kva +
534 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
535 ARC_DMA_DVA(sc->sc_requests)));
536 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
537
538 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
539 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541
542 arc_scsi_cmd_done(sc, ccb, reg);
543 } while (nccb != ccb);
544
545 return 0;
546 }
547
548 int
549 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
550 {
551 pcireg_t memtype;
552 pci_intr_handle_t ih;
553
554 sc->sc_pc = pa->pa_pc;
555 sc->sc_tag = pa->pa_tag;
556 sc->sc_dmat = pa->pa_dmat;
557
558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
561 aprint_error(": unable to map system interface register\n");
562 return 1;
563 }
564
565 if (pci_intr_map(pa, &ih) != 0) {
566 aprint_error(": unable to map interrupt\n");
567 goto unmap;
568 }
569
570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
571 arc_intr, sc);
572 if (sc->sc_ih == NULL) {
573 aprint_error(": unable to map interrupt [2]\n");
574 goto unmap;
575 }
576
577 aprint_normal("\n");
578 aprint_normal("%s: interrupting at %s\n",
579 device_xname(&sc->sc_dev), pci_intr_string(pa->pa_pc, ih));
580
581 return 0;
582
583 unmap:
584 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
585 sc->sc_ios = 0;
586 return 1;
587 }
588
589 void
590 arc_unmap_pci_resources(struct arc_softc *sc)
591 {
592 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
594 sc->sc_ios = 0;
595 }
596
597 int
598 arc_query_firmware(struct arc_softc *sc)
599 {
600 struct arc_msg_firmware_info fwinfo;
601 char string[81]; /* sizeof(vendor)*2+1 */
602
603 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
604 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
605 aprint_debug("%s: timeout waiting for firmware ok\n",
606 device_xname(&sc->sc_dev));
607 return 1;
608 }
609
610 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
611 aprint_debug("%s: timeout waiting for get config\n",
612 device_xname(&sc->sc_dev));
613 return 1;
614 }
615
616 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
617 aprint_debug("%s: timeout waiting to start bg rebuild\n",
618 device_xname(&sc->sc_dev));
619 return 1;
620 }
621
622 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
623
624 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
625 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
626
627 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
628 aprint_error("%s: invalid firmware info from iop\n",
629 device_xname(&sc->sc_dev));
630 return 1;
631 }
632
633 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
634 device_xname(&sc->sc_dev),
635 htole32(fwinfo.request_len));
636 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
637 device_xname(&sc->sc_dev),
638 htole32(fwinfo.queue_len));
639 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
640 device_xname(&sc->sc_dev),
641 htole32(fwinfo.sdram_size));
642 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
643 device_xname(&sc->sc_dev),
644 htole32(fwinfo.sata_ports));
645
646 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
647 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
648 device_xname(&sc->sc_dev), string);
649
650 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
651 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
652 device_xname(&sc->sc_dev), string);
653
654 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
655 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
656 device_xname(&sc->sc_dev), string);
657
658 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
659 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
660 htole32(fwinfo.sdram_size), string);
661
662 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
663 aprint_error("%s: unexpected request frame size (%d != %d)\n",
664 device_xname(&sc->sc_dev),
665 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
666 return 1;
667 }
668
669 sc->sc_req_count = htole32(fwinfo.queue_len);
670
671 return 0;
672 }
673
674 #if NBIO > 0
675 static int
676 arc_bioctl(struct device *self, u_long cmd, void *addr)
677 {
678 struct arc_softc *sc = device_private(self);
679 int error = 0;
680
681 switch (cmd) {
682 case BIOCINQ:
683 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
684 break;
685
686 case BIOCVOL:
687 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
688 break;
689
690 case BIOCDISK:
691 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
692 break;
693
694 case BIOCDISK_NOVOL:
695 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
696 break;
697
698 case BIOCALARM:
699 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
700 break;
701
702 case BIOCSETSTATE:
703 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
704 break;
705
706 case BIOCVOLOPS:
707 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
708 break;
709
710 default:
711 error = ENOTTY;
712 break;
713 }
714
715 return error;
716 }
717
718 static int
719 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
720 {
721 switch (*reply) {
722 case ARC_FW_CMD_RAIDINVAL:
723 printf("%s: firmware error (invalid raid set)\n",
724 device_xname(&sc->sc_dev));
725 return EINVAL;
726 case ARC_FW_CMD_VOLINVAL:
727 printf("%s: firmware error (invalid volume set)\n",
728 device_xname(&sc->sc_dev));
729 return EINVAL;
730 case ARC_FW_CMD_NORAID:
731 printf("%s: firmware error (unexistent raid set)\n",
732 device_xname(&sc->sc_dev));
733 return ENODEV;
734 case ARC_FW_CMD_NOVOLUME:
735 printf("%s: firmware error (unexistent volume set)\n",
736 device_xname(&sc->sc_dev));
737 return ENODEV;
738 case ARC_FW_CMD_NOPHYSDRV:
739 printf("%s: firmware error (unexistent physical drive)\n",
740 device_xname(&sc->sc_dev));
741 return ENODEV;
742 case ARC_FW_CMD_PARAM_ERR:
743 printf("%s: firmware error (parameter error)\n",
744 device_xname(&sc->sc_dev));
745 return EINVAL;
746 case ARC_FW_CMD_UNSUPPORTED:
747 printf("%s: firmware error (unsupported command)\n",
748 device_xname(&sc->sc_dev));
749 return EOPNOTSUPP;
750 case ARC_FW_CMD_DISKCFG_CHGD:
751 printf("%s: firmware error (disk configuration changed)\n",
752 device_xname(&sc->sc_dev));
753 return EINVAL;
754 case ARC_FW_CMD_PASS_INVAL:
755 printf("%s: firmware error (invalid password)\n",
756 device_xname(&sc->sc_dev));
757 return EINVAL;
758 case ARC_FW_CMD_NODISKSPACE:
759 printf("%s: firmware error (no disk space available)\n",
760 device_xname(&sc->sc_dev));
761 return EOPNOTSUPP;
762 case ARC_FW_CMD_CHECKSUM_ERR:
763 printf("%s: firmware error (checksum error)\n",
764 device_xname(&sc->sc_dev));
765 return EINVAL;
766 case ARC_FW_CMD_PASS_REQD:
767 printf("%s: firmware error (password required)\n",
768 device_xname(&sc->sc_dev));
769 return EPERM;
770 case ARC_FW_CMD_OK:
771 default:
772 return 0;
773 }
774 }
775
776 static int
777 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
778 {
779 uint8_t request[2], reply[1];
780 size_t len;
781 int error = 0;
782
783 switch (ba->ba_opcode) {
784 case BIOC_SAENABLE:
785 case BIOC_SADISABLE:
786 request[0] = ARC_FW_SET_ALARM;
787 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
788 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
789 len = sizeof(request);
790
791 break;
792
793 case BIOC_SASILENCE:
794 request[0] = ARC_FW_MUTE_ALARM;
795 len = 1;
796
797 break;
798
799 case BIOC_GASTATUS:
800 /* system info is too big/ugly to deal with here */
801 return arc_bio_alarm_state(sc, ba);
802
803 default:
804 return EOPNOTSUPP;
805 }
806
807 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
808 if (error != 0)
809 return error;
810
811 return arc_fw_parse_status_code(sc, &reply[0]);
812 }
813
814 static int
815 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
816 {
817 struct arc_fw_sysinfo *sysinfo;
818 uint8_t request;
819 int error = 0;
820
821 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
822
823 request = ARC_FW_SYSINFO;
824 error = arc_msgbuf(sc, &request, sizeof(request),
825 sysinfo, sizeof(struct arc_fw_sysinfo));
826
827 if (error != 0)
828 goto out;
829
830 ba->ba_status = sysinfo->alarm;
831
832 out:
833 kmem_free(sysinfo, sizeof(*sysinfo));
834 return error;
835 }
836
837 static int
838 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
839 {
840 /* to create a raid set */
841 struct req_craidset {
842 uint8_t cmdcode;
843 uint32_t devmask;
844 uint8_t raidset_name[16];
845 } __packed;
846
847 /* to create a volume set */
848 struct req_cvolset {
849 uint8_t cmdcode;
850 uint8_t raidset;
851 uint8_t volset_name[16];
852 uint64_t capacity;
853 uint8_t raidlevel;
854 uint8_t stripe;
855 uint8_t scsi_chan;
856 uint8_t scsi_target;
857 uint8_t scsi_lun;
858 uint8_t tagqueue;
859 uint8_t cache;
860 uint8_t speed;
861 uint8_t quick_init;
862 } __packed;
863
864 struct scsibus_softc *scsibus_sc = NULL;
865 struct req_craidset req_craidset;
866 struct req_cvolset req_cvolset;
867 uint8_t request[2];
868 uint8_t reply[1];
869 int error = 0;
870
871 switch (bc->bc_opcode) {
872 case BIOC_VCREATE_VOLUME:
873 {
874 /*
875 * Zero out the structs so that we use some defaults
876 * in raid and volume sets.
877 */
878 memset(&req_craidset, 0, sizeof(req_craidset));
879 memset(&req_cvolset, 0, sizeof(req_cvolset));
880
881 /*
882 * Firstly we have to create the raid set and
883 * use the default name for all them.
884 */
885 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
886 req_craidset.devmask = bc->bc_devmask;
887 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
888 reply, sizeof(reply));
889 if (error != 0)
890 return error;
891
892 error = arc_fw_parse_status_code(sc, &reply[0]);
893 if (error) {
894 printf("%s: create raidset%d failed\n",
895 device_xname(&sc->sc_dev), bc->bc_volid);
896 return error;
897 }
898
899 /*
900 * At this point the raid set was created, so it's
901 * time to create the volume set.
902 */
903 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
904 req_cvolset.raidset = bc->bc_volid;
905 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
906
907 /*
908 * Set the RAID level.
909 */
910 switch (bc->bc_level) {
911 case 0:
912 case 1:
913 req_cvolset.raidlevel = bc->bc_level;
914 break;
915 case BIOC_SVOL_RAID10:
916 req_cvolset.raidlevel = 1;
917 break;
918 case 3:
919 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
920 break;
921 case 5:
922 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
923 break;
924 case 6:
925 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
926 break;
927 default:
928 return EOPNOTSUPP;
929 }
930
931 /*
932 * Set the stripe size.
933 */
934 switch (bc->bc_stripe) {
935 case 4:
936 req_cvolset.stripe = 0;
937 break;
938 case 8:
939 req_cvolset.stripe = 1;
940 break;
941 case 16:
942 req_cvolset.stripe = 2;
943 break;
944 case 32:
945 req_cvolset.stripe = 3;
946 break;
947 case 64:
948 req_cvolset.stripe = 4;
949 break;
950 case 128:
951 req_cvolset.stripe = 5;
952 break;
953 default:
954 req_cvolset.stripe = 4; /* by default 64K */
955 break;
956 }
957
958 req_cvolset.scsi_chan = bc->bc_channel;
959 req_cvolset.scsi_target = bc->bc_target;
960 req_cvolset.scsi_lun = bc->bc_lun;
961 req_cvolset.tagqueue = 1; /* always enabled */
962 req_cvolset.cache = 1; /* always enabled */
963 req_cvolset.speed = 4; /* always max speed */
964
965 /* RAID 1 and 1+0 levels need foreground initialization */
966 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
967 req_cvolset.quick_init = 1; /* foreground init */
968
969 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
970 reply, sizeof(reply));
971 if (error != 0)
972 return error;
973
974 error = arc_fw_parse_status_code(sc, &reply[0]);
975 if (error) {
976 printf("%s: create volumeset%d failed\n",
977 device_xname(&sc->sc_dev), bc->bc_volid);
978 return error;
979 }
980
981 /*
982 * If we are creating a RAID 1 or RAID 1+0 volume,
983 * the volume will be created immediately but it won't
984 * be available until the initialization is done... so
985 * don't bother attaching the sd(4) device.
986 */
987 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
988 break;
989
990 /*
991 * Do a rescan on the bus to attach the device associated
992 * with the new volume.
993 */
994 scsibus_sc = device_private(sc->sc_scsibus_dv);
995 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
996
997 break;
998 }
999 case BIOC_VREMOVE_VOLUME:
1000 {
1001 /*
1002 * Remove the volume set specified in bc_volid.
1003 */
1004 request[0] = ARC_FW_DELETE_VOLUME;
1005 request[1] = bc->bc_volid;
1006 error = arc_msgbuf(sc, request, sizeof(request),
1007 reply, sizeof(reply));
1008 if (error != 0)
1009 return error;
1010
1011 error = arc_fw_parse_status_code(sc, &reply[0]);
1012 if (error) {
1013 printf("%s: delete volumeset%d failed\n",
1014 device_xname(&sc->sc_dev), bc->bc_volid);
1015 return error;
1016 }
1017
1018 /*
1019 * Detach the sd(4) device associated with the volume,
1020 * but if there's an error don't make it a priority.
1021 */
1022 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1023 bc->bc_lun, 0);
1024 if (error)
1025 printf("%s: couldn't detach sd device for volume %d "
1026 "at %u:%u.%u (error=%d)\n",
1027 device_xname(&sc->sc_dev), bc->bc_volid,
1028 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1029
1030 /*
1031 * and remove the raid set specified in bc_volid,
1032 * we only care about volumes.
1033 */
1034 request[0] = ARC_FW_DELETE_RAIDSET;
1035 request[1] = bc->bc_volid;
1036 error = arc_msgbuf(sc, request, sizeof(request),
1037 reply, sizeof(reply));
1038 if (error != 0)
1039 return error;
1040
1041 error = arc_fw_parse_status_code(sc, &reply[0]);
1042 if (error) {
1043 printf("%s: delete raidset%d failed\n",
1044 device_xname(&sc->sc_dev), bc->bc_volid);
1045 return error;
1046 }
1047
1048 break;
1049 }
1050 default:
1051 return EOPNOTSUPP;
1052 }
1053
1054 return error;
1055 }
1056
1057 static int
1058 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1059 {
1060 /* for a hotspare disk */
1061 struct request_hs {
1062 uint8_t cmdcode;
1063 uint32_t devmask;
1064 } __packed;
1065
1066 /* for a pass-through disk */
1067 struct request_pt {
1068 uint8_t cmdcode;
1069 uint8_t devid;
1070 uint8_t scsi_chan;
1071 uint8_t scsi_id;
1072 uint8_t scsi_lun;
1073 uint8_t tagged_queue;
1074 uint8_t cache_mode;
1075 uint8_t max_speed;
1076 } __packed;
1077
1078 struct scsibus_softc *scsibus_sc = NULL;
1079 struct request_hs req_hs; /* to add/remove hotspare */
1080 struct request_pt req_pt; /* to add a pass-through */
1081 uint8_t req_gen[2];
1082 uint8_t reply[1];
1083 int error = 0;
1084
1085 switch (bs->bs_status) {
1086 case BIOC_SSHOTSPARE:
1087 {
1088 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1089 req_hs.devmask = (1 << bs->bs_target);
1090 goto hotspare;
1091 }
1092 case BIOC_SSDELHOTSPARE:
1093 {
1094 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1095 req_hs.devmask = (1 << bs->bs_target);
1096 goto hotspare;
1097 }
1098 case BIOC_SSPASSTHRU:
1099 {
1100 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1101 req_pt.devid = bs->bs_other_id; /* this wants device# */
1102 req_pt.scsi_chan = bs->bs_channel;
1103 req_pt.scsi_id = bs->bs_target;
1104 req_pt.scsi_lun = bs->bs_lun;
1105 req_pt.tagged_queue = 1; /* always enabled */
1106 req_pt.cache_mode = 1; /* always enabled */
1107 req_pt.max_speed = 4; /* always max speed */
1108
1109 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1110 reply, sizeof(reply));
1111 if (error != 0)
1112 return error;
1113
1114 /*
1115 * Do a rescan on the bus to attach the new device
1116 * associated with the pass-through disk.
1117 */
1118 scsibus_sc = device_private(sc->sc_scsibus_dv);
1119 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1120
1121 goto out;
1122 }
1123 case BIOC_SSDELPASSTHRU:
1124 {
1125 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1126 req_gen[1] = bs->bs_target;
1127 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1128 reply, sizeof(reply));
1129 if (error != 0)
1130 return error;
1131
1132 /*
1133 * Detach the sd device associated with this pass-through disk.
1134 */
1135 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1136 bs->bs_lun, 0);
1137 if (error)
1138 printf("%s: couldn't detach sd device for the "
1139 "pass-through disk at %u:%u.%u (error=%d)\n",
1140 device_xname(&sc->sc_dev),
1141 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1142
1143 goto out;
1144 }
1145 case BIOC_SSCHECKSTART_VOL:
1146 {
1147 req_gen[0] = ARC_FW_START_CHECKVOL;
1148 req_gen[1] = bs->bs_volid;
1149 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1150 reply, sizeof(reply));
1151 if (error != 0)
1152 return error;
1153
1154 goto out;
1155 }
1156 case BIOC_SSCHECKSTOP_VOL:
1157 {
1158 uint8_t req = ARC_FW_STOP_CHECKVOL;
1159 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1160 if (error != 0)
1161 return error;
1162
1163 goto out;
1164 }
1165 default:
1166 return EOPNOTSUPP;
1167 }
1168
1169 hotspare:
1170 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1171 reply, sizeof(reply));
1172 if (error != 0)
1173 return error;
1174
1175 out:
1176 return arc_fw_parse_status_code(sc, &reply[0]);
1177 }
1178
1179 static int
1180 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1181 {
1182 uint8_t request[2];
1183 struct arc_fw_sysinfo *sysinfo = NULL;
1184 struct arc_fw_raidinfo *raidinfo;
1185 int nvols = 0, i;
1186 int error = 0;
1187
1188 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1189
1190 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1191 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1192
1193 request[0] = ARC_FW_SYSINFO;
1194 error = arc_msgbuf(sc, request, 1, sysinfo,
1195 sizeof(struct arc_fw_sysinfo));
1196 if (error != 0)
1197 goto out;
1198
1199 sc->sc_maxraidset = sysinfo->max_raid_set;
1200 sc->sc_maxvolset = sysinfo->max_volume_set;
1201 sc->sc_cchans = sysinfo->ide_channels;
1202 }
1203
1204 request[0] = ARC_FW_RAIDINFO;
1205 for (i = 0; i < sc->sc_maxraidset; i++) {
1206 request[1] = i;
1207 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1208 sizeof(struct arc_fw_raidinfo));
1209 if (error != 0)
1210 goto out;
1211
1212 if (raidinfo->volumes)
1213 nvols++;
1214 }
1215
1216 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1217 bi->bi_novol = nvols;
1218 bi->bi_nodisk = sc->sc_cchans;
1219
1220 out:
1221 if (sysinfo)
1222 kmem_free(sysinfo, sizeof(*sysinfo));
1223 kmem_free(raidinfo, sizeof(*raidinfo));
1224 return error;
1225 }
1226
1227 static int
1228 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1229 {
1230 uint8_t request[2];
1231 int error = 0;
1232 int nvols = 0, i;
1233
1234 request[0] = ARC_FW_VOLINFO;
1235 for (i = 0; i < sc->sc_maxvolset; i++) {
1236 request[1] = i;
1237 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1238 sizeof(struct arc_fw_volinfo));
1239 if (error != 0)
1240 goto out;
1241
1242 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1243 continue;
1244
1245 if (nvols == vol)
1246 break;
1247
1248 nvols++;
1249 }
1250
1251 if (nvols != vol ||
1252 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1253 error = ENODEV;
1254 goto out;
1255 }
1256
1257 out:
1258 return error;
1259 }
1260
1261 static int
1262 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1263 {
1264 struct arc_fw_volinfo *volinfo;
1265 uint64_t blocks;
1266 uint32_t status;
1267 int error = 0;
1268
1269 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1270
1271 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1272 if (error != 0)
1273 goto out;
1274
1275 bv->bv_percent = -1;
1276 bv->bv_seconds = 0;
1277
1278 status = htole32(volinfo->volume_status);
1279 if (status == 0x0) {
1280 if (htole32(volinfo->fail_mask) == 0x0)
1281 bv->bv_status = BIOC_SVONLINE;
1282 else
1283 bv->bv_status = BIOC_SVDEGRADED;
1284 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1285 bv->bv_status = BIOC_SVDEGRADED;
1286 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1287 bv->bv_status = BIOC_SVOFFLINE;
1288 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1289 bv->bv_status = BIOC_SVBUILDING;
1290 bv->bv_percent = htole32(volinfo->progress);
1291 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1292 bv->bv_status = BIOC_SVREBUILD;
1293 bv->bv_percent = htole32(volinfo->progress);
1294 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1295 bv->bv_status = BIOC_SVMIGRATING;
1296 bv->bv_percent = htole32(volinfo->progress);
1297 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1298 bv->bv_status = BIOC_SVCHECKING;
1299 bv->bv_percent = htole32(volinfo->progress);
1300 } else if (status & ARC_FW_VOL_STATUS_NEED_INIT) {
1301 bv->bv_status = BIOC_SVOFFLINE;
1302 } else {
1303 printf("%s: volume %d status 0x%x\n",
1304 device_xname(&sc->sc_dev), bv->bv_volid, status);
1305 }
1306
1307 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1308 blocks += (uint64_t)htole32(volinfo->capacity);
1309 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1310
1311 switch (volinfo->raid_level) {
1312 case ARC_FW_VOL_RAIDLEVEL_0:
1313 bv->bv_level = 0;
1314 break;
1315 case ARC_FW_VOL_RAIDLEVEL_1:
1316 if (volinfo->member_disks > 2)
1317 bv->bv_level = BIOC_SVOL_RAID10;
1318 else
1319 bv->bv_level = 1;
1320 break;
1321 case ARC_FW_VOL_RAIDLEVEL_3:
1322 bv->bv_level = 3;
1323 break;
1324 case ARC_FW_VOL_RAIDLEVEL_5:
1325 bv->bv_level = 5;
1326 break;
1327 case ARC_FW_VOL_RAIDLEVEL_6:
1328 bv->bv_level = 6;
1329 break;
1330 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1331 bv->bv_level = BIOC_SVOL_PASSTHRU;
1332 break;
1333 default:
1334 bv->bv_level = -1;
1335 break;
1336 }
1337
1338 bv->bv_nodisk = volinfo->member_disks;
1339 bv->bv_stripe_size = volinfo->stripe_size / 2;
1340 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1341 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1342 sizeof(volinfo->set_name));
1343
1344 out:
1345 kmem_free(volinfo, sizeof(*volinfo));
1346 return error;
1347 }
1348
1349 static int
1350 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1351 {
1352 struct arc_fw_diskinfo *diskinfo;
1353 uint8_t request[2];
1354 int error = 0;
1355
1356 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1357
1358 if (bd->bd_diskid >= sc->sc_cchans) {
1359 error = ENODEV;
1360 goto out;
1361 }
1362
1363 request[0] = ARC_FW_DISKINFO;
1364 request[1] = bd->bd_diskid;
1365 error = arc_msgbuf(sc, request, sizeof(request),
1366 diskinfo, sizeof(struct arc_fw_diskinfo));
1367 if (error != 0)
1368 goto out;
1369
1370 /* skip disks with no capacity */
1371 if (htole32(diskinfo->capacity) == 0 &&
1372 htole32(diskinfo->capacity2) == 0)
1373 goto out;
1374
1375 bd->bd_disknovol = true;
1376 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1377
1378 out:
1379 kmem_free(diskinfo, sizeof(*diskinfo));
1380 return error;
1381 }
1382
1383 static void
1384 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1385 struct arc_fw_diskinfo *diskinfo, int diskid)
1386 {
1387 uint64_t blocks;
1388 char model[81];
1389 char serial[41];
1390 char rev[17];
1391
1392 switch (htole32(diskinfo->device_state)) {
1393 case ARC_FW_DISK_PASSTHRU:
1394 bd->bd_status = BIOC_SDPASSTHRU;
1395 break;
1396 case ARC_FW_DISK_RAIDMEMBER:
1397 bd->bd_status = BIOC_SDONLINE;
1398 break;
1399 case ARC_FW_DISK_HOTSPARE:
1400 bd->bd_status = BIOC_SDHOTSPARE;
1401 break;
1402 case ARC_FW_DISK_UNUSED:
1403 bd->bd_status = BIOC_SDUNUSED;
1404 break;
1405 case 0:
1406 /* disk has been disconnected */
1407 bd->bd_status = BIOC_SDOFFLINE;
1408 bd->bd_channel = 1;
1409 bd->bd_target = 0;
1410 bd->bd_lun = 0;
1411 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1412 break;
1413 default:
1414 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1415 htole32(diskinfo->device_state));
1416 bd->bd_status = BIOC_SDINVALID;
1417 return;
1418 }
1419
1420 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1421 blocks += (uint64_t)htole32(diskinfo->capacity);
1422 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1423
1424 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1425 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1426 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1427 sizeof(diskinfo->firmware_rev));
1428
1429 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1430 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1431
1432 #if 0
1433 bd->bd_channel = diskinfo->scsi_attr.channel;
1434 bd->bd_target = diskinfo->scsi_attr.target;
1435 bd->bd_lun = diskinfo->scsi_attr.lun;
1436 #endif
1437
1438 /*
1439 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1440 * the diskid.
1441 */
1442 bd->bd_channel = 0;
1443 bd->bd_target = diskid;
1444 bd->bd_lun = 0;
1445 }
1446
1447 static int
1448 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1449 {
1450 struct arc_fw_raidinfo *raidinfo;
1451 struct arc_fw_volinfo *volinfo;
1452 struct arc_fw_diskinfo *diskinfo;
1453 uint8_t request[2];
1454 int error = 0;
1455
1456 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1457 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1458 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1459
1460 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1461 if (error != 0)
1462 goto out;
1463
1464 request[0] = ARC_FW_RAIDINFO;
1465 request[1] = volinfo->raid_set_number;
1466
1467 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1468 sizeof(struct arc_fw_raidinfo));
1469 if (error != 0)
1470 goto out;
1471
1472 if (bd->bd_diskid >= sc->sc_cchans ||
1473 bd->bd_diskid >= raidinfo->member_devices) {
1474 error = ENODEV;
1475 goto out;
1476 }
1477
1478 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1479 /*
1480 * The disk has been disconnected, mark it offline
1481 * and put it on another bus.
1482 */
1483 bd->bd_channel = 1;
1484 bd->bd_target = 0;
1485 bd->bd_lun = 0;
1486 bd->bd_status = BIOC_SDOFFLINE;
1487 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1488 goto out;
1489 }
1490
1491 request[0] = ARC_FW_DISKINFO;
1492 request[1] = raidinfo->device_array[bd->bd_diskid];
1493 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1494 sizeof(struct arc_fw_diskinfo));
1495 if (error != 0)
1496 goto out;
1497
1498 /* now fill our bio disk with data from the firmware */
1499 arc_bio_disk_filldata(sc, bd, diskinfo,
1500 raidinfo->device_array[bd->bd_diskid]);
1501
1502 out:
1503 kmem_free(raidinfo, sizeof(*raidinfo));
1504 kmem_free(volinfo, sizeof(*volinfo));
1505 kmem_free(diskinfo, sizeof(*diskinfo));
1506 return error;
1507 }
1508 #endif /* NBIO > 0 */
1509
1510 uint8_t
1511 arc_msg_cksum(void *cmd, uint16_t len)
1512 {
1513 uint8_t *buf = cmd;
1514 uint8_t cksum;
1515 int i;
1516
1517 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1518 for (i = 0; i < len; i++)
1519 cksum += buf[i];
1520
1521 return cksum;
1522 }
1523
1524
1525 int
1526 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1527 size_t rbuflen)
1528 {
1529 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1530 uint8_t *wbuf, *rbuf;
1531 int wlen, wdone = 0, rlen, rdone = 0;
1532 struct arc_fw_bufhdr *bufhdr;
1533 uint32_t reg, rwlen;
1534 int error = 0;
1535 #ifdef ARC_DEBUG
1536 int i;
1537 #endif
1538
1539 wbuf = rbuf = NULL;
1540
1541 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1542 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1543
1544 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1545 wbuf = kmem_alloc(wlen, KM_SLEEP);
1546
1547 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1548 rbuf = kmem_alloc(rlen, KM_SLEEP);
1549
1550 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1551 device_xname(&sc->sc_dev), wlen, rlen);
1552
1553 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1554 bufhdr->hdr = arc_fw_hdr;
1555 bufhdr->len = htole16(wbuflen);
1556 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1557 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1558
1559 arc_lock(sc);
1560 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1561 error = EBUSY;
1562 goto out;
1563 }
1564
1565 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1566
1567 do {
1568 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1569 memset(rwbuf, 0, sizeof(rwbuf));
1570 rwlen = (wlen - wdone) % sizeof(rwbuf);
1571 memcpy(rwbuf, &wbuf[wdone], rwlen);
1572
1573 #ifdef ARC_DEBUG
1574 if (arcdebug & ARC_D_DB) {
1575 printf("%s: write %d:",
1576 device_xname(&sc->sc_dev), rwlen);
1577 for (i = 0; i < rwlen; i++)
1578 printf(" 0x%02x", rwbuf[i]);
1579 printf("\n");
1580 }
1581 #endif
1582
1583 /* copy the chunk to the hw */
1584 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1585 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1586 sizeof(rwbuf));
1587
1588 /* say we have a buffer for the hw */
1589 arc_write(sc, ARC_REG_INB_DOORBELL,
1590 ARC_REG_INB_DOORBELL_WRITE_OK);
1591
1592 wdone += rwlen;
1593 }
1594
1595 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1596 arc_wait(sc);
1597
1598 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1599
1600 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1601 device_xname(&sc->sc_dev), reg);
1602
1603 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1604 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1605 if (rwlen > sizeof(rwbuf)) {
1606 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1607 device_xname(&sc->sc_dev));
1608 error = EIO;
1609 goto out;
1610 }
1611
1612 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1613 sizeof(rwbuf));
1614
1615 arc_write(sc, ARC_REG_INB_DOORBELL,
1616 ARC_REG_INB_DOORBELL_READ_OK);
1617
1618 #ifdef ARC_DEBUG
1619 printf("%s: len: %d+%d=%d/%d\n",
1620 device_xname(&sc->sc_dev),
1621 rwlen, rdone, rwlen + rdone, rlen);
1622 if (arcdebug & ARC_D_DB) {
1623 printf("%s: read:",
1624 device_xname(&sc->sc_dev));
1625 for (i = 0; i < rwlen; i++)
1626 printf(" 0x%02x", rwbuf[i]);
1627 printf("\n");
1628 }
1629 #endif
1630
1631 if ((rdone + rwlen) > rlen) {
1632 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1633 device_xname(&sc->sc_dev));
1634 error = EIO;
1635 goto out;
1636 }
1637
1638 memcpy(&rbuf[rdone], rwbuf, rwlen);
1639 rdone += rwlen;
1640 }
1641 } while (rdone != rlen);
1642
1643 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1644 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1645 bufhdr->len != htole16(rbuflen)) {
1646 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1647 device_xname(&sc->sc_dev));
1648 error = EIO;
1649 goto out;
1650 }
1651
1652 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1653
1654 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1655 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1656 device_xname(&sc->sc_dev));
1657 error = EIO;
1658 goto out;
1659 }
1660
1661 out:
1662 arc_unlock(sc);
1663 kmem_free(wbuf, wlen);
1664 kmem_free(rbuf, rlen);
1665
1666 return error;
1667 }
1668
1669 void
1670 arc_lock(struct arc_softc *sc)
1671 {
1672 rw_enter(&sc->sc_rwlock, RW_WRITER);
1673 mutex_spin_enter(&sc->sc_mutex);
1674 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1675 sc->sc_talking = 1;
1676 }
1677
1678 void
1679 arc_unlock(struct arc_softc *sc)
1680 {
1681 KASSERT(mutex_owned(&sc->sc_mutex));
1682
1683 arc_write(sc, ARC_REG_INTRMASK,
1684 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1685 sc->sc_talking = 0;
1686 mutex_spin_exit(&sc->sc_mutex);
1687 rw_exit(&sc->sc_rwlock);
1688 }
1689
1690 void
1691 arc_wait(struct arc_softc *sc)
1692 {
1693 KASSERT(mutex_owned(&sc->sc_mutex));
1694
1695 arc_write(sc, ARC_REG_INTRMASK,
1696 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1697 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1698 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1699 }
1700
1701 #if NBIO > 0
1702 static void
1703 arc_create_sensors(void *arg)
1704 {
1705 struct arc_softc *sc = arg;
1706 struct bioc_inq bi;
1707 struct bioc_vol bv;
1708 int i, j;
1709 size_t slen, count = 0;
1710
1711 memset(&bi, 0, sizeof(bi));
1712 if (arc_bio_inq(sc, &bi) != 0) {
1713 aprint_error("%s: unable to query firmware for sensor info\n",
1714 device_xname(&sc->sc_dev));
1715 kthread_exit(0);
1716 }
1717
1718 /* There's no point to continue if there are no volumes */
1719 if (!bi.bi_novol)
1720 kthread_exit(0);
1721
1722 for (i = 0; i < bi.bi_novol; i++) {
1723 memset(&bv, 0, sizeof(bv));
1724 bv.bv_volid = i;
1725 if (arc_bio_vol(sc, &bv) != 0)
1726 kthread_exit(0);
1727
1728 /* Skip passthrough volumes */
1729 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1730 continue;
1731
1732 /* new volume found */
1733 sc->sc_nsensors++;
1734 /* new disk in a volume found */
1735 sc->sc_nsensors+= bv.bv_nodisk;
1736 }
1737
1738 /* No valid volumes */
1739 if (!sc->sc_nsensors)
1740 kthread_exit(0);
1741
1742 sc->sc_sme = sysmon_envsys_create();
1743 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1744 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1745
1746 /* Attach sensors for volumes and disks */
1747 for (i = 0; i < bi.bi_novol; i++) {
1748 memset(&bv, 0, sizeof(bv));
1749 bv.bv_volid = i;
1750 if (arc_bio_vol(sc, &bv) != 0)
1751 goto bad;
1752
1753 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1754 sc->sc_sensors[count].monitor = true;
1755 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1756
1757 /* Skip passthrough volumes */
1758 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1759 continue;
1760
1761 if (bv.bv_level == BIOC_SVOL_RAID10)
1762 snprintf(sc->sc_sensors[count].desc,
1763 sizeof(sc->sc_sensors[count].desc),
1764 "RAID 1+0 volume%d (%s)", i, bv.bv_dev);
1765 else
1766 snprintf(sc->sc_sensors[count].desc,
1767 sizeof(sc->sc_sensors[count].desc),
1768 "RAID %d volume%d (%s)", bv.bv_level, i,
1769 bv.bv_dev);
1770
1771 sc->sc_sensors[count].value_max = i;
1772
1773 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1774 &sc->sc_sensors[count]))
1775 goto bad;
1776
1777 count++;
1778
1779 /* Attach disk sensors for this volume */
1780 for (j = 0; j < bv.bv_nodisk; j++) {
1781 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1782 sc->sc_sensors[count].monitor = true;
1783 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1784
1785 snprintf(sc->sc_sensors[count].desc,
1786 sizeof(sc->sc_sensors[count].desc),
1787 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1788 sc->sc_sensors[count].value_max = i;
1789 sc->sc_sensors[count].value_avg = j + 10;
1790
1791 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1792 &sc->sc_sensors[count]))
1793 goto bad;
1794
1795 count++;
1796 }
1797 }
1798
1799 /*
1800 * Register our envsys driver with the framework now that the
1801 * sensors were all attached.
1802 */
1803 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1804 sc->sc_sme->sme_cookie = sc;
1805 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1806
1807 if (sysmon_envsys_register(sc->sc_sme)) {
1808 aprint_debug("%s: unable to register with sysmon\n",
1809 device_xname(&sc->sc_dev));
1810 goto bad;
1811 }
1812 kthread_exit(0);
1813
1814 bad:
1815 kmem_free(sc->sc_sensors, slen);
1816 sysmon_envsys_destroy(sc->sc_sme);
1817 kthread_exit(0);
1818 }
1819
1820 static void
1821 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1822 {
1823 struct arc_softc *sc = sme->sme_cookie;
1824 struct bioc_vol bv;
1825 struct bioc_disk bd;
1826
1827 /* sanity check */
1828 if (edata->units != ENVSYS_DRIVE)
1829 return;
1830
1831 memset(&bv, 0, sizeof(bv));
1832 bv.bv_volid = edata->value_max;
1833
1834 if (arc_bio_vol(sc, &bv)) {
1835 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1836 edata->state = ENVSYS_SINVALID;
1837 return;
1838 }
1839
1840 /* Current sensor is handling a disk volume member */
1841 if (edata->value_avg) {
1842 memset(&bd, 0, sizeof(bd));
1843 bd.bd_volid = edata->value_max;
1844 bd.bd_diskid = edata->value_avg - 10;
1845
1846 if (arc_bio_disk_volume(sc, &bd)) {
1847 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1848 edata->state = ENVSYS_SCRITICAL;
1849 return;
1850 }
1851
1852 switch (bd.bd_status) {
1853 case BIOC_SDONLINE:
1854 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1855 edata->state = ENVSYS_SVALID;
1856 break;
1857 case BIOC_SDOFFLINE:
1858 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1859 edata->state = ENVSYS_SCRITICAL;
1860 break;
1861 default:
1862 edata->value_cur = ENVSYS_DRIVE_FAIL;
1863 edata->state = ENVSYS_SCRITICAL;
1864 break;
1865 }
1866
1867 return;
1868 }
1869
1870 /* Current sensor is handling a volume */
1871 switch (bv.bv_status) {
1872 case BIOC_SVOFFLINE:
1873 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1874 edata->state = ENVSYS_SCRITICAL;
1875 break;
1876 case BIOC_SVDEGRADED:
1877 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1878 edata->state = ENVSYS_SCRITICAL;
1879 break;
1880 case BIOC_SVBUILDING:
1881 edata->value_cur = ENVSYS_DRIVE_BUILD;
1882 edata->state = ENVSYS_SVALID;
1883 break;
1884 case BIOC_SVMIGRATING:
1885 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1886 edata->state = ENVSYS_SVALID;
1887 break;
1888 case BIOC_SVCHECKING:
1889 edata->value_cur = ENVSYS_DRIVE_CHECK;
1890 edata->state = ENVSYS_SVALID;
1891 break;
1892 case BIOC_SVREBUILD:
1893 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1894 edata->state = ENVSYS_SCRITICAL;
1895 break;
1896 case BIOC_SVSCRUB:
1897 case BIOC_SVONLINE:
1898 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1899 edata->state = ENVSYS_SVALID;
1900 break;
1901 case BIOC_SVINVALID:
1902 /* FALLTHROUGH */
1903 default:
1904 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1905 edata->state = ENVSYS_SINVALID;
1906 break;
1907 }
1908 }
1909 #endif /* NBIO > 0 */
1910
1911 uint32_t
1912 arc_read(struct arc_softc *sc, bus_size_t r)
1913 {
1914 uint32_t v;
1915
1916 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1917 BUS_SPACE_BARRIER_READ);
1918 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1919
1920 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1921 device_xname(&sc->sc_dev), r, v);
1922
1923 return v;
1924 }
1925
1926 void
1927 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1928 {
1929 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1930 BUS_SPACE_BARRIER_READ);
1931 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1932 (uint32_t *)buf, len >> 2);
1933 }
1934
1935 void
1936 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1937 {
1938 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1939 device_xname(&sc->sc_dev), r, v);
1940
1941 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1942 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1943 BUS_SPACE_BARRIER_WRITE);
1944 }
1945
1946 void
1947 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1948 {
1949 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1950 (const uint32_t *)buf, len >> 2);
1951 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1952 BUS_SPACE_BARRIER_WRITE);
1953 }
1954
1955 int
1956 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1957 uint32_t target)
1958 {
1959 int i;
1960
1961 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1962 device_xname(&sc->sc_dev), r, mask, target);
1963
1964 for (i = 0; i < 10000; i++) {
1965 if ((arc_read(sc, r) & mask) == target)
1966 return 0;
1967 delay(1000);
1968 }
1969
1970 return 1;
1971 }
1972
1973 int
1974 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1975 uint32_t target)
1976 {
1977 int i;
1978
1979 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1980 device_xname(&sc->sc_dev), r, mask, target);
1981
1982 for (i = 0; i < 10000; i++) {
1983 if ((arc_read(sc, r) & mask) != target)
1984 return 0;
1985 delay(1000);
1986 }
1987
1988 return 1;
1989 }
1990
1991 int
1992 arc_msg0(struct arc_softc *sc, uint32_t m)
1993 {
1994 /* post message */
1995 arc_write(sc, ARC_REG_INB_MSG0, m);
1996 /* wait for the fw to do it */
1997 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1998 ARC_REG_INTRSTAT_MSG0) != 0)
1999 return 1;
2000
2001 /* ack it */
2002 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
2003
2004 return 0;
2005 }
2006
2007 struct arc_dmamem *
2008 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
2009 {
2010 struct arc_dmamem *adm;
2011 int nsegs;
2012
2013 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
2014 if (adm == NULL)
2015 return NULL;
2016
2017 adm->adm_size = size;
2018
2019 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2020 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2021 goto admfree;
2022
2023 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
2024 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2025 goto destroy;
2026
2027 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
2028 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
2029 goto free;
2030
2031 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
2032 NULL, BUS_DMA_NOWAIT) != 0)
2033 goto unmap;
2034
2035 memset(adm->adm_kva, 0, size);
2036
2037 return adm;
2038
2039 unmap:
2040 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2041 free:
2042 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2043 destroy:
2044 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2045 admfree:
2046 kmem_free(adm, sizeof(*adm));
2047
2048 return NULL;
2049 }
2050
2051 void
2052 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2053 {
2054 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2055 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2056 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2057 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2058 kmem_free(adm, sizeof(*adm));
2059 }
2060
2061 int
2062 arc_alloc_ccbs(struct arc_softc *sc)
2063 {
2064 struct arc_ccb *ccb;
2065 uint8_t *cmd;
2066 int i;
2067 size_t ccbslen;
2068
2069 TAILQ_INIT(&sc->sc_ccb_free);
2070
2071 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2072 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2073
2074 sc->sc_requests = arc_dmamem_alloc(sc,
2075 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2076 if (sc->sc_requests == NULL) {
2077 aprint_error("%s: unable to allocate ccb dmamem\n",
2078 device_xname(&sc->sc_dev));
2079 goto free_ccbs;
2080 }
2081 cmd = ARC_DMA_KVA(sc->sc_requests);
2082
2083 for (i = 0; i < sc->sc_req_count; i++) {
2084 ccb = &sc->sc_ccbs[i];
2085
2086 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2087 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2088 aprint_error("%s: unable to create dmamap for ccb %d\n",
2089 device_xname(&sc->sc_dev), i);
2090 goto free_maps;
2091 }
2092
2093 ccb->ccb_sc = sc;
2094 ccb->ccb_id = i;
2095 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2096
2097 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2098 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2099 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2100
2101 arc_put_ccb(sc, ccb);
2102 }
2103
2104 return 0;
2105
2106 free_maps:
2107 while ((ccb = arc_get_ccb(sc)) != NULL)
2108 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2109 arc_dmamem_free(sc, sc->sc_requests);
2110
2111 free_ccbs:
2112 kmem_free(sc->sc_ccbs, ccbslen);
2113
2114 return 1;
2115 }
2116
2117 struct arc_ccb *
2118 arc_get_ccb(struct arc_softc *sc)
2119 {
2120 struct arc_ccb *ccb;
2121
2122 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2123 if (ccb != NULL)
2124 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2125
2126 return ccb;
2127 }
2128
2129 void
2130 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2131 {
2132 ccb->ccb_xs = NULL;
2133 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2134 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2135 }
2136