arcmsr.c revision 1.35 1 /* $NetBSD: arcmsr.c,v 1.35 2016/06/19 06:58:17 dholland Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.35 2016/06/19 06:58:17 dholland Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <dev/pci/arcmsrvar.h>
55
56 /* #define ARC_DEBUG */
57 #ifdef ARC_DEBUG
58 #define ARC_D_INIT (1<<0)
59 #define ARC_D_RW (1<<1)
60 #define ARC_D_DB (1<<2)
61
62 int arcdebug = 0;
63
64 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
65 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
66
67 #else
68 #define DPRINTF(p, ...) /* p */
69 #define DNPRINTF(n, p, ...) /* n, p */
70 #endif
71
72 /*
73 * the fw header must always equal this.
74 */
75 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
76
77 /*
78 * autoconf(9) glue.
79 */
80 static int arc_match(device_t, cfdata_t, void *);
81 static void arc_attach(device_t, device_t, void *);
82 static int arc_detach(device_t, int);
83 static bool arc_shutdown(device_t, int);
84 static int arc_intr(void *);
85 static void arc_minphys(struct buf *);
86
87 CFATTACH_DECL_NEW(arcmsr, sizeof(struct arc_softc),
88 arc_match, arc_attach, arc_detach, NULL);
89
90 /*
91 * bio(4) and sysmon_envsys(9) glue.
92 */
93 #if NBIO > 0
94 static int arc_bioctl(device_t, u_long, void *);
95 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
96 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
97 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
98 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
99 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
100 struct arc_fw_diskinfo *, int);
101 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
102 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
103 static int arc_bio_getvol(struct arc_softc *, int,
104 struct arc_fw_volinfo *);
105 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
106 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
107 static void arc_create_sensors(void *);
108 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
109 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
110 #endif
111
112 /*
113 * interface for scsi midlayer to talk to.
114 */
115 static void arc_scsi_cmd(struct scsipi_channel *, scsipi_adapter_req_t,
116 void *);
117
118 /*
119 * code to deal with getting bits in and out of the bus space.
120 */
121 static uint32_t arc_read(struct arc_softc *, bus_size_t);
122 static void arc_read_region(struct arc_softc *, bus_size_t, void *,
123 size_t);
124 static void arc_write(struct arc_softc *, bus_size_t, uint32_t);
125 static void arc_write_region(struct arc_softc *, bus_size_t, void *,
126 size_t);
127 static int arc_wait_eq(struct arc_softc *, bus_size_t, uint32_t,
128 uint32_t);
129 #ifdef unused
130 static int arc_wait_ne(struct arc_softc *, bus_size_t, uint32_t,
131 uint32_t);
132 #endif
133 static int arc_msg0(struct arc_softc *, uint32_t);
134 static struct arc_dmamem *arc_dmamem_alloc(struct arc_softc *, size_t);
135 static void arc_dmamem_free(struct arc_softc *,
136 struct arc_dmamem *);
137
138 static int arc_alloc_ccbs(device_t);
139 static struct arc_ccb *arc_get_ccb(struct arc_softc *);
140 static void arc_put_ccb(struct arc_softc *, struct arc_ccb *);
141 static int arc_load_xs(struct arc_ccb *);
142 static int arc_complete(struct arc_softc *, struct arc_ccb *, int);
143 static void arc_scsi_cmd_done(struct arc_softc *, struct arc_ccb *,
144 uint32_t);
145
146 /*
147 * real stuff for dealing with the hardware.
148 */
149 static int arc_map_pci_resources(device_t, struct pci_attach_args *);
150 static void arc_unmap_pci_resources(struct arc_softc *);
151 static int arc_query_firmware(device_t);
152
153 /*
154 * stuff to do messaging via the doorbells.
155 */
156 #if NBIO > 0
157 static void arc_lock(struct arc_softc *);
158 static void arc_unlock(struct arc_softc *);
159 static void arc_wait(struct arc_softc *);
160 static uint8_t arc_msg_cksum(void *, uint16_t);
161 static int arc_msgbuf(struct arc_softc *, void *, size_t, void *, size_t);
162 #endif
163
164 #define arc_push(_s, _r) arc_write((_s), ARC_REG_POST_QUEUE, (_r))
165 #define arc_pop(_s) arc_read((_s), ARC_REG_REPLY_QUEUE)
166
167 static int
168 arc_match(device_t parent, cfdata_t match, void *aux)
169 {
170 struct pci_attach_args *pa = aux;
171
172 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
173 switch (PCI_PRODUCT(pa->pa_id)) {
174 case PCI_PRODUCT_ARECA_ARC1110:
175 case PCI_PRODUCT_ARECA_ARC1120:
176 case PCI_PRODUCT_ARECA_ARC1130:
177 case PCI_PRODUCT_ARECA_ARC1160:
178 case PCI_PRODUCT_ARECA_ARC1170:
179 case PCI_PRODUCT_ARECA_ARC1200:
180 case PCI_PRODUCT_ARECA_ARC1202:
181 case PCI_PRODUCT_ARECA_ARC1210:
182 case PCI_PRODUCT_ARECA_ARC1220:
183 case PCI_PRODUCT_ARECA_ARC1230:
184 case PCI_PRODUCT_ARECA_ARC1260:
185 case PCI_PRODUCT_ARECA_ARC1270:
186 case PCI_PRODUCT_ARECA_ARC1280:
187 case PCI_PRODUCT_ARECA_ARC1380:
188 case PCI_PRODUCT_ARECA_ARC1381:
189 case PCI_PRODUCT_ARECA_ARC1680:
190 case PCI_PRODUCT_ARECA_ARC1681:
191 return 1;
192 default:
193 break;
194 }
195 }
196
197 return 0;
198 }
199
200 static void
201 arc_attach(device_t parent, device_t self, void *aux)
202 {
203 struct arc_softc *sc = device_private(self);
204 struct pci_attach_args *pa = aux;
205 struct scsipi_adapter *adapt = &sc->sc_adapter;
206 struct scsipi_channel *chan = &sc->sc_chan;
207
208 sc->sc_dev = self;
209 sc->sc_talking = 0;
210 rw_init(&sc->sc_rwlock);
211 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
212 cv_init(&sc->sc_condvar, "arcdb");
213
214 if (arc_map_pci_resources(self, pa) != 0) {
215 /* error message printed by arc_map_pci_resources */
216 return;
217 }
218
219 if (arc_query_firmware(self) != 0) {
220 /* error message printed by arc_query_firmware */
221 goto unmap_pci;
222 }
223
224 if (arc_alloc_ccbs(self) != 0) {
225 /* error message printed by arc_alloc_ccbs */
226 goto unmap_pci;
227 }
228
229 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
230 panic("%s: couldn't establish shutdown handler\n",
231 device_xname(self));
232
233 memset(adapt, 0, sizeof(*adapt));
234 adapt->adapt_dev = self;
235 adapt->adapt_nchannels = 1;
236 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
237 adapt->adapt_max_periph = adapt->adapt_openings;
238 adapt->adapt_minphys = arc_minphys;
239 adapt->adapt_request = arc_scsi_cmd;
240
241 memset(chan, 0, sizeof(*chan));
242 chan->chan_adapter = adapt;
243 chan->chan_bustype = &scsi_bustype;
244 chan->chan_nluns = ARC_MAX_LUN;
245 chan->chan_ntargets = ARC_MAX_TARGET;
246 chan->chan_id = ARC_MAX_TARGET;
247 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
248
249 /*
250 * Save the device_t returned, because we could to attach
251 * devices via the management interface.
252 */
253 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
254
255 /* enable interrupts */
256 arc_write(sc, ARC_REG_INTRMASK,
257 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
258
259 #if NBIO > 0
260 /*
261 * Register the driver to bio(4) and setup the sensors.
262 */
263 if (bio_register(self, arc_bioctl) != 0)
264 panic("%s: bioctl registration failed\n", device_xname(self));
265
266 /*
267 * you need to talk to the firmware to get volume info. our firmware
268 * interface relies on being able to sleep, so we need to use a thread
269 * to do the work.
270 */
271 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
272 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
273 panic("%s: unable to create a kernel thread for sensors\n",
274 device_xname(self));
275 #endif
276
277 return;
278
279 unmap_pci:
280 arc_unmap_pci_resources(sc);
281 }
282
283 static int
284 arc_detach(device_t self, int flags)
285 {
286 struct arc_softc *sc = device_private(self);
287
288 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
289 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
290
291 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
292 aprint_error_dev(self, "timeout waiting to flush cache\n");
293
294 if (sc->sc_sme != NULL)
295 sysmon_envsys_unregister(sc->sc_sme);
296
297 return 0;
298 }
299
300 static bool
301 arc_shutdown(device_t self, int how)
302 {
303 struct arc_softc *sc = device_private(self);
304
305 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
306 aprint_error_dev(self, "timeout waiting to stop bg rebuild\n");
307
308 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
309 aprint_error_dev(self, "timeout waiting to flush cache\n");
310
311 return true;
312 }
313
314 static void
315 arc_minphys(struct buf *bp)
316 {
317 if (bp->b_bcount > MAXPHYS)
318 bp->b_bcount = MAXPHYS;
319 minphys(bp);
320 }
321
322 static int
323 arc_intr(void *arg)
324 {
325 struct arc_softc *sc = arg;
326 struct arc_ccb *ccb = NULL;
327 char *kva = ARC_DMA_KVA(sc->sc_requests);
328 struct arc_io_cmd *cmd;
329 uint32_t reg, intrstat;
330
331 mutex_spin_enter(&sc->sc_mutex);
332 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
333 if (intrstat == 0x0) {
334 mutex_spin_exit(&sc->sc_mutex);
335 return 0;
336 }
337
338 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
339 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
340
341 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
342 if (sc->sc_talking) {
343 arc_write(sc, ARC_REG_INTRMASK,
344 ~ARC_REG_INTRMASK_POSTQUEUE);
345 cv_broadcast(&sc->sc_condvar);
346 } else {
347 /* otherwise drop it */
348 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
349 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
350 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
351 arc_write(sc, ARC_REG_INB_DOORBELL,
352 ARC_REG_INB_DOORBELL_READ_OK);
353 }
354 }
355 mutex_spin_exit(&sc->sc_mutex);
356
357 while ((reg = arc_pop(sc)) != 0xffffffff) {
358 cmd = (struct arc_io_cmd *)(kva +
359 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
360 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
361 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
362
363 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
364 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
365 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
366
367 arc_scsi_cmd_done(sc, ccb, reg);
368 }
369
370
371 return 1;
372 }
373
374 void
375 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
376 {
377 struct scsipi_periph *periph;
378 struct scsipi_xfer *xs;
379 struct scsipi_adapter *adapt = chan->chan_adapter;
380 struct arc_softc *sc = device_private(adapt->adapt_dev);
381 struct arc_ccb *ccb;
382 struct arc_msg_scsicmd *cmd;
383 uint32_t reg;
384 uint8_t target;
385
386 switch (req) {
387 case ADAPTER_REQ_GROW_RESOURCES:
388 /* Not supported. */
389 return;
390 case ADAPTER_REQ_SET_XFER_MODE:
391 /* Not supported. */
392 return;
393 case ADAPTER_REQ_RUN_XFER:
394 break;
395 }
396
397 mutex_spin_enter(&sc->sc_mutex);
398
399 xs = arg;
400 periph = xs->xs_periph;
401 target = periph->periph_target;
402
403 if (xs->cmdlen > ARC_MSG_CDBLEN) {
404 memset(&xs->sense, 0, sizeof(xs->sense));
405 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
406 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
407 xs->sense.scsi_sense.asc = 0x20;
408 xs->error = XS_SENSE;
409 xs->status = SCSI_CHECK;
410 mutex_spin_exit(&sc->sc_mutex);
411 scsipi_done(xs);
412 return;
413 }
414
415 ccb = arc_get_ccb(sc);
416 if (ccb == NULL) {
417 xs->error = XS_RESOURCE_SHORTAGE;
418 mutex_spin_exit(&sc->sc_mutex);
419 scsipi_done(xs);
420 return;
421 }
422
423 ccb->ccb_xs = xs;
424
425 if (arc_load_xs(ccb) != 0) {
426 xs->error = XS_DRIVER_STUFFUP;
427 arc_put_ccb(sc, ccb);
428 mutex_spin_exit(&sc->sc_mutex);
429 scsipi_done(xs);
430 return;
431 }
432
433 cmd = &ccb->ccb_cmd->cmd;
434 reg = ccb->ccb_cmd_post;
435
436 /* bus is always 0 */
437 cmd->target = target;
438 cmd->lun = periph->periph_lun;
439 cmd->function = 1; /* XXX magic number */
440
441 cmd->cdb_len = xs->cmdlen;
442 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
443 if (xs->xs_control & XS_CTL_DATA_OUT)
444 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
445 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
446 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
447 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
448 }
449
450 cmd->context = htole32(ccb->ccb_id);
451 cmd->data_len = htole32(xs->datalen);
452
453 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
454
455 /* we've built the command, let's put it on the hw */
456 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
457 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
458 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
459
460 arc_push(sc, reg);
461 if (xs->xs_control & XS_CTL_POLL) {
462 if (arc_complete(sc, ccb, xs->timeout) != 0) {
463 xs->error = XS_DRIVER_STUFFUP;
464 mutex_spin_exit(&sc->sc_mutex);
465 scsipi_done(xs);
466 return;
467 }
468 }
469
470 mutex_spin_exit(&sc->sc_mutex);
471 }
472
473 int
474 arc_load_xs(struct arc_ccb *ccb)
475 {
476 struct arc_softc *sc = ccb->ccb_sc;
477 struct scsipi_xfer *xs = ccb->ccb_xs;
478 bus_dmamap_t dmap = ccb->ccb_dmamap;
479 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
480 uint64_t addr;
481 int i, error;
482
483 if (xs->datalen == 0)
484 return 0;
485
486 error = bus_dmamap_load(sc->sc_dmat, dmap,
487 xs->data, xs->datalen, NULL,
488 (xs->xs_control & XS_CTL_NOSLEEP) ?
489 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
490 if (error != 0) {
491 aprint_error("%s: error %d loading dmamap\n",
492 device_xname(sc->sc_dev), error);
493 return 1;
494 }
495
496 for (i = 0; i < dmap->dm_nsegs; i++) {
497 sge = &sgl[i];
498
499 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
500 addr = dmap->dm_segs[i].ds_addr;
501 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
502 sge->sg_lo_addr = htole32((uint32_t)addr);
503 }
504
505 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
506 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
507 BUS_DMASYNC_PREWRITE);
508
509 return 0;
510 }
511
512 void
513 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
514 {
515 struct scsipi_xfer *xs = ccb->ccb_xs;
516 struct arc_msg_scsicmd *cmd;
517
518 if (xs->datalen != 0) {
519 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
520 ccb->ccb_dmamap->dm_mapsize,
521 (xs->xs_control & XS_CTL_DATA_IN) ?
522 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
523 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
524 }
525
526 /* timeout_del */
527 xs->status |= XS_STS_DONE;
528
529 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
530 cmd = &ccb->ccb_cmd->cmd;
531
532 switch (cmd->status) {
533 case ARC_MSG_STATUS_SELTIMEOUT:
534 case ARC_MSG_STATUS_ABORTED:
535 case ARC_MSG_STATUS_INIT_FAIL:
536 xs->status = SCSI_OK;
537 xs->error = XS_SELTIMEOUT;
538 break;
539
540 case SCSI_CHECK:
541 memset(&xs->sense, 0, sizeof(xs->sense));
542 memcpy(&xs->sense, cmd->sense_data,
543 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
544 xs->sense.scsi_sense.response_code =
545 SSD_RCODE_VALID | 0x70;
546 xs->status = SCSI_CHECK;
547 xs->error = XS_SENSE;
548 xs->resid = 0;
549 break;
550
551 default:
552 /* unknown device status */
553 xs->error = XS_BUSY; /* try again later? */
554 xs->status = SCSI_BUSY;
555 break;
556 }
557 } else {
558 xs->status = SCSI_OK;
559 xs->error = XS_NOERROR;
560 xs->resid = 0;
561 }
562
563 arc_put_ccb(sc, ccb);
564 scsipi_done(xs);
565 }
566
567 int
568 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
569 {
570 struct arc_ccb *ccb = NULL;
571 char *kva = ARC_DMA_KVA(sc->sc_requests);
572 struct arc_io_cmd *cmd;
573 uint32_t reg;
574
575 do {
576 reg = arc_pop(sc);
577 if (reg == 0xffffffff) {
578 if (timeout-- == 0)
579 return 1;
580
581 delay(1000);
582 continue;
583 }
584
585 cmd = (struct arc_io_cmd *)(kva +
586 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
587 ARC_DMA_DVA(sc->sc_requests)));
588 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
589
590 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
591 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
592 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
593
594 arc_scsi_cmd_done(sc, ccb, reg);
595 } while (nccb != ccb);
596
597 return 0;
598 }
599
600 int
601 arc_map_pci_resources(device_t self, struct pci_attach_args *pa)
602 {
603 struct arc_softc *sc = device_private(self);
604 pcireg_t memtype;
605 pci_intr_handle_t ih;
606 char intrbuf[PCI_INTRSTR_LEN];
607
608 sc->sc_pc = pa->pa_pc;
609 sc->sc_tag = pa->pa_tag;
610 sc->sc_dmat = pa->pa_dmat;
611
612 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
613 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
614 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
615 aprint_error(": unable to map system interface register\n");
616 return 1;
617 }
618
619 if (pci_intr_map(pa, &ih) != 0) {
620 aprint_error(": unable to map interrupt\n");
621 goto unmap;
622 }
623
624 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
625 arc_intr, sc);
626 if (sc->sc_ih == NULL) {
627 aprint_error(": unable to map interrupt [2]\n");
628 goto unmap;
629 }
630
631 aprint_normal("\n");
632 aprint_normal_dev(self, "interrupting at %s\n",
633 pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf)));
634
635 return 0;
636
637 unmap:
638 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
639 sc->sc_ios = 0;
640 return 1;
641 }
642
643 void
644 arc_unmap_pci_resources(struct arc_softc *sc)
645 {
646 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
647 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
648 sc->sc_ios = 0;
649 }
650
651 int
652 arc_query_firmware(device_t self)
653 {
654 struct arc_softc *sc = device_private(self);
655 struct arc_msg_firmware_info fwinfo;
656 char string[81]; /* sizeof(vendor)*2+1 */
657
658 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
659 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
660 aprint_debug_dev(self, "timeout waiting for firmware ok\n");
661 return 1;
662 }
663
664 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
665 aprint_debug_dev(self, "timeout waiting for get config\n");
666 return 1;
667 }
668
669 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
670 aprint_debug_dev(self, "timeout waiting to start bg rebuild\n");
671 return 1;
672 }
673
674 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
675
676 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
677 device_xname(self), htole32(fwinfo.signature));
678
679 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
680 aprint_error_dev(self, "invalid firmware info from iop\n");
681 return 1;
682 }
683
684 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
685 device_xname(self), htole32(fwinfo.request_len));
686 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
687 device_xname(self), htole32(fwinfo.queue_len));
688 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
689 device_xname(self), htole32(fwinfo.sdram_size));
690 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
691 device_xname(self), htole32(fwinfo.sata_ports));
692
693 strnvisx(string, sizeof(string), fwinfo.vendor, sizeof(fwinfo.vendor),
694 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
695 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
696 device_xname(self), string);
697
698 strnvisx(string, sizeof(string), fwinfo.model, sizeof(fwinfo.model),
699 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
700 aprint_normal_dev(self, "Areca %s Host Adapter RAID controller\n",
701 string);
702
703 strnvisx(string, sizeof(string), fwinfo.fw_version,
704 sizeof(fwinfo.fw_version), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
705 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
706 device_xname(self), string);
707
708 aprint_normal_dev(self, "%d ports, %dMB SDRAM, firmware <%s>\n",
709 htole32(fwinfo.sata_ports), htole32(fwinfo.sdram_size), string);
710
711 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
712 aprint_error_dev(self,
713 "unexpected request frame size (%d != %d)\n",
714 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
715 return 1;
716 }
717
718 sc->sc_req_count = htole32(fwinfo.queue_len);
719
720 return 0;
721 }
722
723 #if NBIO > 0
724 static int
725 arc_bioctl(device_t self, u_long cmd, void *addr)
726 {
727 struct arc_softc *sc = device_private(self);
728 int error = 0;
729
730 switch (cmd) {
731 case BIOCINQ:
732 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
733 break;
734
735 case BIOCVOL:
736 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
737 break;
738
739 case BIOCDISK:
740 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
741 break;
742
743 case BIOCDISK_NOVOL:
744 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
745 break;
746
747 case BIOCALARM:
748 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
749 break;
750
751 case BIOCSETSTATE:
752 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
753 break;
754
755 case BIOCVOLOPS:
756 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
757 break;
758
759 default:
760 error = ENOTTY;
761 break;
762 }
763
764 return error;
765 }
766
767 static int
768 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
769 {
770 switch (*reply) {
771 case ARC_FW_CMD_RAIDINVAL:
772 printf("%s: firmware error (invalid raid set)\n",
773 device_xname(sc->sc_dev));
774 return EINVAL;
775 case ARC_FW_CMD_VOLINVAL:
776 printf("%s: firmware error (invalid volume set)\n",
777 device_xname(sc->sc_dev));
778 return EINVAL;
779 case ARC_FW_CMD_NORAID:
780 printf("%s: firmware error (unexistent raid set)\n",
781 device_xname(sc->sc_dev));
782 return ENODEV;
783 case ARC_FW_CMD_NOVOLUME:
784 printf("%s: firmware error (unexistent volume set)\n",
785 device_xname(sc->sc_dev));
786 return ENODEV;
787 case ARC_FW_CMD_NOPHYSDRV:
788 printf("%s: firmware error (unexistent physical drive)\n",
789 device_xname(sc->sc_dev));
790 return ENODEV;
791 case ARC_FW_CMD_PARAM_ERR:
792 printf("%s: firmware error (parameter error)\n",
793 device_xname(sc->sc_dev));
794 return EINVAL;
795 case ARC_FW_CMD_UNSUPPORTED:
796 printf("%s: firmware error (unsupported command)\n",
797 device_xname(sc->sc_dev));
798 return EOPNOTSUPP;
799 case ARC_FW_CMD_DISKCFG_CHGD:
800 printf("%s: firmware error (disk configuration changed)\n",
801 device_xname(sc->sc_dev));
802 return EINVAL;
803 case ARC_FW_CMD_PASS_INVAL:
804 printf("%s: firmware error (invalid password)\n",
805 device_xname(sc->sc_dev));
806 return EINVAL;
807 case ARC_FW_CMD_NODISKSPACE:
808 printf("%s: firmware error (no disk space available)\n",
809 device_xname(sc->sc_dev));
810 return EOPNOTSUPP;
811 case ARC_FW_CMD_CHECKSUM_ERR:
812 printf("%s: firmware error (checksum error)\n",
813 device_xname(sc->sc_dev));
814 return EINVAL;
815 case ARC_FW_CMD_PASS_REQD:
816 printf("%s: firmware error (password required)\n",
817 device_xname(sc->sc_dev));
818 return EPERM;
819 case ARC_FW_CMD_OK:
820 default:
821 return 0;
822 }
823 }
824
825 static int
826 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
827 {
828 uint8_t request[2], reply[1];
829 size_t len;
830 int error = 0;
831
832 switch (ba->ba_opcode) {
833 case BIOC_SAENABLE:
834 case BIOC_SADISABLE:
835 request[0] = ARC_FW_SET_ALARM;
836 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
837 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
838 len = sizeof(request);
839
840 break;
841
842 case BIOC_SASILENCE:
843 request[0] = ARC_FW_MUTE_ALARM;
844 len = 1;
845
846 break;
847
848 case BIOC_GASTATUS:
849 /* system info is too big/ugly to deal with here */
850 return arc_bio_alarm_state(sc, ba);
851
852 default:
853 return EOPNOTSUPP;
854 }
855
856 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
857 if (error != 0)
858 return error;
859
860 return arc_fw_parse_status_code(sc, &reply[0]);
861 }
862
863 static int
864 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
865 {
866 struct arc_fw_sysinfo *sysinfo;
867 uint8_t request;
868 int error = 0;
869
870 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
871
872 request = ARC_FW_SYSINFO;
873 error = arc_msgbuf(sc, &request, sizeof(request),
874 sysinfo, sizeof(struct arc_fw_sysinfo));
875
876 if (error != 0)
877 goto out;
878
879 ba->ba_status = sysinfo->alarm;
880
881 out:
882 kmem_free(sysinfo, sizeof(*sysinfo));
883 return error;
884 }
885
886 static int
887 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
888 {
889 /* to create a raid set */
890 struct req_craidset {
891 uint8_t cmdcode;
892 uint32_t devmask;
893 uint8_t raidset_name[16];
894 } __packed;
895
896 /* to create a volume set */
897 struct req_cvolset {
898 uint8_t cmdcode;
899 uint8_t raidset;
900 uint8_t volset_name[16];
901 uint64_t capacity;
902 uint8_t raidlevel;
903 uint8_t stripe;
904 uint8_t scsi_chan;
905 uint8_t scsi_target;
906 uint8_t scsi_lun;
907 uint8_t tagqueue;
908 uint8_t cache;
909 uint8_t speed;
910 uint8_t quick_init;
911 } __packed;
912
913 struct scsibus_softc *scsibus_sc = NULL;
914 struct req_craidset req_craidset;
915 struct req_cvolset req_cvolset;
916 uint8_t request[2];
917 uint8_t reply[1];
918 int error = 0;
919
920 switch (bc->bc_opcode) {
921 case BIOC_VCREATE_VOLUME:
922 {
923 /*
924 * Zero out the structs so that we use some defaults
925 * in raid and volume sets.
926 */
927 memset(&req_craidset, 0, sizeof(req_craidset));
928 memset(&req_cvolset, 0, sizeof(req_cvolset));
929
930 /*
931 * Firstly we have to create the raid set and
932 * use the default name for all them.
933 */
934 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
935 req_craidset.devmask = bc->bc_devmask;
936 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
937 reply, sizeof(reply));
938 if (error != 0)
939 return error;
940
941 error = arc_fw_parse_status_code(sc, &reply[0]);
942 if (error) {
943 printf("%s: create raidset%d failed\n",
944 device_xname(sc->sc_dev), bc->bc_volid);
945 return error;
946 }
947
948 /*
949 * At this point the raid set was created, so it's
950 * time to create the volume set.
951 */
952 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
953 req_cvolset.raidset = bc->bc_volid;
954 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
955
956 /*
957 * Set the RAID level.
958 */
959 switch (bc->bc_level) {
960 case 0:
961 case 1:
962 req_cvolset.raidlevel = bc->bc_level;
963 break;
964 case BIOC_SVOL_RAID10:
965 req_cvolset.raidlevel = 1;
966 break;
967 case 3:
968 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
969 break;
970 case 5:
971 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
972 break;
973 case 6:
974 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
975 break;
976 default:
977 return EOPNOTSUPP;
978 }
979
980 /*
981 * Set the stripe size.
982 */
983 switch (bc->bc_stripe) {
984 case 4:
985 req_cvolset.stripe = 0;
986 break;
987 case 8:
988 req_cvolset.stripe = 1;
989 break;
990 case 16:
991 req_cvolset.stripe = 2;
992 break;
993 case 32:
994 req_cvolset.stripe = 3;
995 break;
996 case 64:
997 req_cvolset.stripe = 4;
998 break;
999 case 128:
1000 req_cvolset.stripe = 5;
1001 break;
1002 default:
1003 req_cvolset.stripe = 4; /* by default 64K */
1004 break;
1005 }
1006
1007 req_cvolset.scsi_chan = bc->bc_channel;
1008 req_cvolset.scsi_target = bc->bc_target;
1009 req_cvolset.scsi_lun = bc->bc_lun;
1010 req_cvolset.tagqueue = 1; /* always enabled */
1011 req_cvolset.cache = 1; /* always enabled */
1012 req_cvolset.speed = 4; /* always max speed */
1013
1014 /* RAID 1 and 1+0 levels need foreground initialization */
1015 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
1016 req_cvolset.quick_init = 1; /* foreground init */
1017
1018 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
1019 reply, sizeof(reply));
1020 if (error != 0)
1021 return error;
1022
1023 error = arc_fw_parse_status_code(sc, &reply[0]);
1024 if (error) {
1025 printf("%s: create volumeset%d failed\n",
1026 device_xname(sc->sc_dev), bc->bc_volid);
1027 return error;
1028 }
1029
1030 /*
1031 * If we are creating a RAID 1 or RAID 1+0 volume,
1032 * the volume will be created immediately but it won't
1033 * be available until the initialization is done... so
1034 * don't bother attaching the sd(4) device.
1035 */
1036 if (bc->bc_level == 1 || bc->bc_level == BIOC_SVOL_RAID10)
1037 break;
1038
1039 /*
1040 * Do a rescan on the bus to attach the device associated
1041 * with the new volume.
1042 */
1043 scsibus_sc = device_private(sc->sc_scsibus_dv);
1044 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
1045
1046 break;
1047 }
1048 case BIOC_VREMOVE_VOLUME:
1049 {
1050 /*
1051 * Remove the volume set specified in bc_volid.
1052 */
1053 request[0] = ARC_FW_DELETE_VOLUME;
1054 request[1] = bc->bc_volid;
1055 error = arc_msgbuf(sc, request, sizeof(request),
1056 reply, sizeof(reply));
1057 if (error != 0)
1058 return error;
1059
1060 error = arc_fw_parse_status_code(sc, &reply[0]);
1061 if (error) {
1062 printf("%s: delete volumeset%d failed\n",
1063 device_xname(sc->sc_dev), bc->bc_volid);
1064 return error;
1065 }
1066
1067 /*
1068 * Detach the sd(4) device associated with the volume,
1069 * but if there's an error don't make it a priority.
1070 */
1071 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1072 bc->bc_lun, 0);
1073 if (error)
1074 printf("%s: couldn't detach sd device for volume %d "
1075 "at %u:%u.%u (error=%d)\n",
1076 device_xname(sc->sc_dev), bc->bc_volid,
1077 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1078
1079 /*
1080 * and remove the raid set specified in bc_volid,
1081 * we only care about volumes.
1082 */
1083 request[0] = ARC_FW_DELETE_RAIDSET;
1084 request[1] = bc->bc_volid;
1085 error = arc_msgbuf(sc, request, sizeof(request),
1086 reply, sizeof(reply));
1087 if (error != 0)
1088 return error;
1089
1090 error = arc_fw_parse_status_code(sc, &reply[0]);
1091 if (error) {
1092 printf("%s: delete raidset%d failed\n",
1093 device_xname(sc->sc_dev), bc->bc_volid);
1094 return error;
1095 }
1096
1097 break;
1098 }
1099 default:
1100 return EOPNOTSUPP;
1101 }
1102
1103 return error;
1104 }
1105
1106 static int
1107 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1108 {
1109 /* for a hotspare disk */
1110 struct request_hs {
1111 uint8_t cmdcode;
1112 uint32_t devmask;
1113 } __packed;
1114
1115 /* for a pass-through disk */
1116 struct request_pt {
1117 uint8_t cmdcode;
1118 uint8_t devid;
1119 uint8_t scsi_chan;
1120 uint8_t scsi_id;
1121 uint8_t scsi_lun;
1122 uint8_t tagged_queue;
1123 uint8_t cache_mode;
1124 uint8_t max_speed;
1125 } __packed;
1126
1127 struct scsibus_softc *scsibus_sc = NULL;
1128 struct request_hs req_hs; /* to add/remove hotspare */
1129 struct request_pt req_pt; /* to add a pass-through */
1130 uint8_t req_gen[2];
1131 uint8_t reply[1];
1132 int error = 0;
1133
1134 switch (bs->bs_status) {
1135 case BIOC_SSHOTSPARE:
1136 {
1137 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1138 req_hs.devmask = (1 << bs->bs_target);
1139 goto hotspare;
1140 }
1141 case BIOC_SSDELHOTSPARE:
1142 {
1143 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1144 req_hs.devmask = (1 << bs->bs_target);
1145 goto hotspare;
1146 }
1147 case BIOC_SSPASSTHRU:
1148 {
1149 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1150 req_pt.devid = bs->bs_other_id; /* this wants device# */
1151 req_pt.scsi_chan = bs->bs_channel;
1152 req_pt.scsi_id = bs->bs_target;
1153 req_pt.scsi_lun = bs->bs_lun;
1154 req_pt.tagged_queue = 1; /* always enabled */
1155 req_pt.cache_mode = 1; /* always enabled */
1156 req_pt.max_speed = 4; /* always max speed */
1157
1158 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1159 reply, sizeof(reply));
1160 if (error != 0)
1161 return error;
1162
1163 /*
1164 * Do a rescan on the bus to attach the new device
1165 * associated with the pass-through disk.
1166 */
1167 scsibus_sc = device_private(sc->sc_scsibus_dv);
1168 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1169
1170 goto out;
1171 }
1172 case BIOC_SSDELPASSTHRU:
1173 {
1174 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1175 req_gen[1] = bs->bs_target;
1176 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1177 reply, sizeof(reply));
1178 if (error != 0)
1179 return error;
1180
1181 /*
1182 * Detach the sd device associated with this pass-through disk.
1183 */
1184 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1185 bs->bs_lun, 0);
1186 if (error)
1187 printf("%s: couldn't detach sd device for the "
1188 "pass-through disk at %u:%u.%u (error=%d)\n",
1189 device_xname(sc->sc_dev),
1190 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1191
1192 goto out;
1193 }
1194 case BIOC_SSCHECKSTART_VOL:
1195 {
1196 req_gen[0] = ARC_FW_START_CHECKVOL;
1197 req_gen[1] = bs->bs_volid;
1198 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1199 reply, sizeof(reply));
1200 if (error != 0)
1201 return error;
1202
1203 goto out;
1204 }
1205 case BIOC_SSCHECKSTOP_VOL:
1206 {
1207 uint8_t req = ARC_FW_STOP_CHECKVOL;
1208 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1209 if (error != 0)
1210 return error;
1211
1212 goto out;
1213 }
1214 default:
1215 return EOPNOTSUPP;
1216 }
1217
1218 hotspare:
1219 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1220 reply, sizeof(reply));
1221 if (error != 0)
1222 return error;
1223
1224 out:
1225 return arc_fw_parse_status_code(sc, &reply[0]);
1226 }
1227
1228 static int
1229 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1230 {
1231 uint8_t request[2];
1232 struct arc_fw_sysinfo *sysinfo = NULL;
1233 struct arc_fw_raidinfo *raidinfo;
1234 int nvols = 0, i;
1235 int error = 0;
1236
1237 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1238
1239 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1240 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1241
1242 request[0] = ARC_FW_SYSINFO;
1243 error = arc_msgbuf(sc, request, 1, sysinfo,
1244 sizeof(struct arc_fw_sysinfo));
1245 if (error != 0)
1246 goto out;
1247
1248 sc->sc_maxraidset = sysinfo->max_raid_set;
1249 sc->sc_maxvolset = sysinfo->max_volume_set;
1250 sc->sc_cchans = sysinfo->ide_channels;
1251 }
1252
1253 request[0] = ARC_FW_RAIDINFO;
1254 for (i = 0; i < sc->sc_maxraidset; i++) {
1255 request[1] = i;
1256 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1257 sizeof(struct arc_fw_raidinfo));
1258 if (error != 0)
1259 goto out;
1260
1261 nvols += raidinfo->volumes;
1262 }
1263
1264 strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1265 bi->bi_novol = nvols;
1266 bi->bi_nodisk = sc->sc_cchans;
1267
1268 out:
1269 if (sysinfo)
1270 kmem_free(sysinfo, sizeof(*sysinfo));
1271 kmem_free(raidinfo, sizeof(*raidinfo));
1272 return error;
1273 }
1274
1275 static int
1276 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1277 {
1278 uint8_t request[2];
1279 int error = 0;
1280 int nvols = 0, i;
1281
1282 request[0] = ARC_FW_VOLINFO;
1283 for (i = 0; i < sc->sc_maxvolset; i++) {
1284 request[1] = i;
1285 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1286 sizeof(struct arc_fw_volinfo));
1287 if (error != 0)
1288 goto out;
1289
1290 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1291 continue;
1292
1293 if (nvols == vol)
1294 break;
1295
1296 nvols++;
1297 }
1298
1299 if (nvols != vol ||
1300 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1301 error = ENODEV;
1302 goto out;
1303 }
1304
1305 out:
1306 return error;
1307 }
1308
1309 static int
1310 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1311 {
1312 struct arc_fw_volinfo *volinfo;
1313 uint64_t blocks;
1314 uint32_t status;
1315 int error = 0;
1316
1317 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1318
1319 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1320 if (error != 0)
1321 goto out;
1322
1323 bv->bv_percent = -1;
1324 bv->bv_seconds = 0;
1325
1326 status = htole32(volinfo->volume_status);
1327 if (status == 0x0) {
1328 if (htole32(volinfo->fail_mask) == 0x0)
1329 bv->bv_status = BIOC_SVONLINE;
1330 else
1331 bv->bv_status = BIOC_SVDEGRADED;
1332 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1333 bv->bv_status = BIOC_SVDEGRADED;
1334 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1335 bv->bv_status = BIOC_SVOFFLINE;
1336 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1337 bv->bv_status = BIOC_SVBUILDING;
1338 bv->bv_percent = htole32(volinfo->progress);
1339 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1340 bv->bv_status = BIOC_SVREBUILD;
1341 bv->bv_percent = htole32(volinfo->progress);
1342 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1343 bv->bv_status = BIOC_SVMIGRATING;
1344 bv->bv_percent = htole32(volinfo->progress);
1345 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1346 bv->bv_status = BIOC_SVCHECKING;
1347 bv->bv_percent = htole32(volinfo->progress);
1348 } else if (status & ARC_FW_VOL_STATUS_NEED_INIT) {
1349 bv->bv_status = BIOC_SVOFFLINE;
1350 } else {
1351 printf("%s: volume %d status 0x%x\n",
1352 device_xname(sc->sc_dev), bv->bv_volid, status);
1353 }
1354
1355 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1356 blocks += (uint64_t)htole32(volinfo->capacity);
1357 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1358
1359 switch (volinfo->raid_level) {
1360 case ARC_FW_VOL_RAIDLEVEL_0:
1361 bv->bv_level = 0;
1362 break;
1363 case ARC_FW_VOL_RAIDLEVEL_1:
1364 if (volinfo->member_disks > 2)
1365 bv->bv_level = BIOC_SVOL_RAID10;
1366 else
1367 bv->bv_level = 1;
1368 break;
1369 case ARC_FW_VOL_RAIDLEVEL_3:
1370 bv->bv_level = 3;
1371 break;
1372 case ARC_FW_VOL_RAIDLEVEL_5:
1373 bv->bv_level = 5;
1374 break;
1375 case ARC_FW_VOL_RAIDLEVEL_6:
1376 bv->bv_level = 6;
1377 break;
1378 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1379 bv->bv_level = BIOC_SVOL_PASSTHRU;
1380 break;
1381 default:
1382 bv->bv_level = -1;
1383 break;
1384 }
1385
1386 bv->bv_nodisk = volinfo->member_disks;
1387 bv->bv_stripe_size = volinfo->stripe_size / 2;
1388 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1389 strnvisx(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1390 sizeof(volinfo->set_name), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1391
1392 out:
1393 kmem_free(volinfo, sizeof(*volinfo));
1394 return error;
1395 }
1396
1397 static int
1398 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1399 {
1400 struct arc_fw_diskinfo *diskinfo;
1401 uint8_t request[2];
1402 int error = 0;
1403
1404 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1405
1406 if (bd->bd_diskid >= sc->sc_cchans) {
1407 error = ENODEV;
1408 goto out;
1409 }
1410
1411 request[0] = ARC_FW_DISKINFO;
1412 request[1] = bd->bd_diskid;
1413 error = arc_msgbuf(sc, request, sizeof(request),
1414 diskinfo, sizeof(struct arc_fw_diskinfo));
1415 if (error != 0)
1416 goto out;
1417
1418 /* skip disks with no capacity */
1419 if (htole32(diskinfo->capacity) == 0 &&
1420 htole32(diskinfo->capacity2) == 0)
1421 goto out;
1422
1423 bd->bd_disknovol = true;
1424 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1425
1426 out:
1427 kmem_free(diskinfo, sizeof(*diskinfo));
1428 return error;
1429 }
1430
1431 static void
1432 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1433 struct arc_fw_diskinfo *diskinfo, int diskid)
1434 {
1435 uint64_t blocks;
1436 char model[81];
1437 char serial[41];
1438 char rev[17];
1439
1440 /* Ignore bit zero for now, we don't know what it means */
1441 diskinfo->device_state &= ~0x1;
1442
1443 switch (diskinfo->device_state) {
1444 case ARC_FW_DISK_FAILED:
1445 bd->bd_status = BIOC_SDFAILED;
1446 break;
1447 case ARC_FW_DISK_PASSTHRU:
1448 bd->bd_status = BIOC_SDPASSTHRU;
1449 break;
1450 case ARC_FW_DISK_NORMAL:
1451 bd->bd_status = BIOC_SDONLINE;
1452 break;
1453 case ARC_FW_DISK_HOTSPARE:
1454 bd->bd_status = BIOC_SDHOTSPARE;
1455 break;
1456 case ARC_FW_DISK_UNUSED:
1457 bd->bd_status = BIOC_SDUNUSED;
1458 break;
1459 case 0:
1460 /* disk has been disconnected */
1461 bd->bd_status = BIOC_SDOFFLINE;
1462 bd->bd_channel = 1;
1463 bd->bd_target = 0;
1464 bd->bd_lun = 0;
1465 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1466 break;
1467 default:
1468 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1469 diskinfo->device_state);
1470 bd->bd_status = BIOC_SDINVALID;
1471 return;
1472 }
1473
1474 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1475 blocks += (uint64_t)htole32(diskinfo->capacity);
1476 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1477
1478 strnvisx(model, sizeof(model), diskinfo->model,
1479 sizeof(diskinfo->model), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1480 strnvisx(serial, sizeof(serial), diskinfo->serial,
1481 sizeof(diskinfo->serial), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1482 strnvisx(rev, sizeof(rev), diskinfo->firmware_rev,
1483 sizeof(diskinfo->firmware_rev), VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1484
1485 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1486 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1487
1488 #if 0
1489 bd->bd_channel = diskinfo->scsi_attr.channel;
1490 bd->bd_target = diskinfo->scsi_attr.target;
1491 bd->bd_lun = diskinfo->scsi_attr.lun;
1492 #endif
1493
1494 /*
1495 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1496 * the diskid.
1497 */
1498 bd->bd_channel = 0;
1499 bd->bd_target = diskid;
1500 bd->bd_lun = 0;
1501 }
1502
1503 static int
1504 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1505 {
1506 struct arc_fw_raidinfo *raidinfo;
1507 struct arc_fw_volinfo *volinfo;
1508 struct arc_fw_diskinfo *diskinfo;
1509 uint8_t request[2];
1510 int error = 0;
1511
1512 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1513 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1514 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1515
1516 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1517 if (error != 0)
1518 goto out;
1519
1520 request[0] = ARC_FW_RAIDINFO;
1521 request[1] = volinfo->raid_set_number;
1522
1523 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1524 sizeof(struct arc_fw_raidinfo));
1525 if (error != 0)
1526 goto out;
1527
1528 if (bd->bd_diskid >= sc->sc_cchans ||
1529 bd->bd_diskid >= raidinfo->member_devices) {
1530 error = ENODEV;
1531 goto out;
1532 }
1533
1534 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1535 /*
1536 * The disk has been disconnected, mark it offline
1537 * and put it on another bus.
1538 */
1539 bd->bd_channel = 1;
1540 bd->bd_target = 0;
1541 bd->bd_lun = 0;
1542 bd->bd_status = BIOC_SDOFFLINE;
1543 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1544 goto out;
1545 }
1546
1547 request[0] = ARC_FW_DISKINFO;
1548 request[1] = raidinfo->device_array[bd->bd_diskid];
1549 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1550 sizeof(struct arc_fw_diskinfo));
1551 if (error != 0)
1552 goto out;
1553
1554 /* now fill our bio disk with data from the firmware */
1555 arc_bio_disk_filldata(sc, bd, diskinfo,
1556 raidinfo->device_array[bd->bd_diskid]);
1557
1558 out:
1559 kmem_free(raidinfo, sizeof(*raidinfo));
1560 kmem_free(volinfo, sizeof(*volinfo));
1561 kmem_free(diskinfo, sizeof(*diskinfo));
1562 return error;
1563 }
1564
1565 static uint8_t
1566 arc_msg_cksum(void *cmd, uint16_t len)
1567 {
1568 uint8_t *buf = cmd;
1569 uint8_t cksum;
1570 int i;
1571
1572 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1573 for (i = 0; i < len; i++)
1574 cksum += buf[i];
1575
1576 return cksum;
1577 }
1578
1579
1580 static int
1581 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1582 size_t rbuflen)
1583 {
1584 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1585 uint8_t *wbuf, *rbuf;
1586 int wlen, wdone = 0, rlen, rdone = 0;
1587 struct arc_fw_bufhdr *bufhdr;
1588 uint32_t reg, rwlen;
1589 int error = 0;
1590 #ifdef ARC_DEBUG
1591 int i;
1592 #endif
1593
1594 wbuf = rbuf = NULL;
1595
1596 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1597 device_xname(sc->sc_dev), wbuflen, rbuflen);
1598
1599 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1600 wbuf = kmem_alloc(wlen, KM_SLEEP);
1601
1602 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1603 rbuf = kmem_alloc(rlen, KM_SLEEP);
1604
1605 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1606 device_xname(sc->sc_dev), wlen, rlen);
1607
1608 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1609 bufhdr->hdr = arc_fw_hdr;
1610 bufhdr->len = htole16(wbuflen);
1611 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1612 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1613
1614 arc_lock(sc);
1615 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1616 error = EBUSY;
1617 goto out;
1618 }
1619
1620 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1621
1622 do {
1623 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1624 memset(rwbuf, 0, sizeof(rwbuf));
1625 rwlen = (wlen - wdone) % sizeof(rwbuf);
1626 memcpy(rwbuf, &wbuf[wdone], rwlen);
1627
1628 #ifdef ARC_DEBUG
1629 if (arcdebug & ARC_D_DB) {
1630 printf("%s: write %d:",
1631 device_xname(sc->sc_dev), rwlen);
1632 for (i = 0; i < rwlen; i++)
1633 printf(" 0x%02x", rwbuf[i]);
1634 printf("\n");
1635 }
1636 #endif
1637
1638 /* copy the chunk to the hw */
1639 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1640 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1641 sizeof(rwbuf));
1642
1643 /* say we have a buffer for the hw */
1644 arc_write(sc, ARC_REG_INB_DOORBELL,
1645 ARC_REG_INB_DOORBELL_WRITE_OK);
1646
1647 wdone += rwlen;
1648 }
1649
1650 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1651 arc_wait(sc);
1652
1653 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1654
1655 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1656 device_xname(sc->sc_dev), reg);
1657
1658 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1659 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1660 if (rwlen > sizeof(rwbuf)) {
1661 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1662 device_xname(sc->sc_dev));
1663 error = EIO;
1664 goto out;
1665 }
1666
1667 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1668 sizeof(rwbuf));
1669
1670 arc_write(sc, ARC_REG_INB_DOORBELL,
1671 ARC_REG_INB_DOORBELL_READ_OK);
1672
1673 #ifdef ARC_DEBUG
1674 printf("%s: len: %d+%d=%d/%d\n",
1675 device_xname(sc->sc_dev),
1676 rwlen, rdone, rwlen + rdone, rlen);
1677 if (arcdebug & ARC_D_DB) {
1678 printf("%s: read:",
1679 device_xname(sc->sc_dev));
1680 for (i = 0; i < rwlen; i++)
1681 printf(" 0x%02x", rwbuf[i]);
1682 printf("\n");
1683 }
1684 #endif
1685
1686 if ((rdone + rwlen) > rlen) {
1687 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1688 device_xname(sc->sc_dev));
1689 error = EIO;
1690 goto out;
1691 }
1692
1693 memcpy(&rbuf[rdone], rwbuf, rwlen);
1694 rdone += rwlen;
1695 }
1696 } while (rdone != rlen);
1697
1698 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1699 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1700 bufhdr->len != htole16(rbuflen)) {
1701 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1702 device_xname(sc->sc_dev));
1703 error = EIO;
1704 goto out;
1705 }
1706
1707 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1708
1709 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1710 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1711 device_xname(sc->sc_dev));
1712 error = EIO;
1713 goto out;
1714 }
1715
1716 out:
1717 arc_unlock(sc);
1718 kmem_free(wbuf, wlen);
1719 kmem_free(rbuf, rlen);
1720
1721 return error;
1722 }
1723
1724 static void
1725 arc_lock(struct arc_softc *sc)
1726 {
1727 rw_enter(&sc->sc_rwlock, RW_WRITER);
1728 mutex_spin_enter(&sc->sc_mutex);
1729 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1730 sc->sc_talking = 1;
1731 }
1732
1733 static void
1734 arc_unlock(struct arc_softc *sc)
1735 {
1736 KASSERT(mutex_owned(&sc->sc_mutex));
1737
1738 arc_write(sc, ARC_REG_INTRMASK,
1739 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1740 sc->sc_talking = 0;
1741 mutex_spin_exit(&sc->sc_mutex);
1742 rw_exit(&sc->sc_rwlock);
1743 }
1744
1745 static void
1746 arc_wait(struct arc_softc *sc)
1747 {
1748 KASSERT(mutex_owned(&sc->sc_mutex));
1749
1750 arc_write(sc, ARC_REG_INTRMASK,
1751 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1752 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1753 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1754 }
1755
1756
1757 static void
1758 arc_create_sensors(void *arg)
1759 {
1760 struct arc_softc *sc = arg;
1761 struct bioc_inq bi;
1762 struct bioc_vol bv;
1763 int i, j;
1764 size_t slen, count = 0;
1765
1766 memset(&bi, 0, sizeof(bi));
1767 if (arc_bio_inq(sc, &bi) != 0) {
1768 aprint_error("%s: unable to query firmware for sensor info\n",
1769 device_xname(sc->sc_dev));
1770 kthread_exit(0);
1771 }
1772
1773 /* There's no point to continue if there are no volumes */
1774 if (!bi.bi_novol)
1775 kthread_exit(0);
1776
1777 for (i = 0; i < bi.bi_novol; i++) {
1778 memset(&bv, 0, sizeof(bv));
1779 bv.bv_volid = i;
1780 if (arc_bio_vol(sc, &bv) != 0)
1781 kthread_exit(0);
1782
1783 /* Skip passthrough volumes */
1784 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1785 continue;
1786
1787 /* new volume found */
1788 sc->sc_nsensors++;
1789 /* new disk in a volume found */
1790 sc->sc_nsensors+= bv.bv_nodisk;
1791 }
1792
1793 /* No valid volumes */
1794 if (!sc->sc_nsensors)
1795 kthread_exit(0);
1796
1797 sc->sc_sme = sysmon_envsys_create();
1798 slen = sizeof(arc_edata_t) * sc->sc_nsensors;
1799 sc->sc_arc_sensors = kmem_zalloc(slen, KM_SLEEP);
1800
1801 /* Attach sensors for volumes and disks */
1802 for (i = 0; i < bi.bi_novol; i++) {
1803 memset(&bv, 0, sizeof(bv));
1804 bv.bv_volid = i;
1805 if (arc_bio_vol(sc, &bv) != 0)
1806 goto bad;
1807
1808 sc->sc_arc_sensors[count].arc_sensor.units = ENVSYS_DRIVE;
1809 sc->sc_arc_sensors[count].arc_sensor.state = ENVSYS_SINVALID;
1810 sc->sc_arc_sensors[count].arc_sensor.value_cur =
1811 ENVSYS_DRIVE_EMPTY;
1812 sc->sc_arc_sensors[count].arc_sensor.flags =
1813 ENVSYS_FMONSTCHANGED;
1814
1815 /* Skip passthrough volumes */
1816 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1817 continue;
1818
1819 if (bv.bv_level == BIOC_SVOL_RAID10)
1820 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1821 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1822 "RAID 1+0 volume%d (%s)", i, bv.bv_dev);
1823 else
1824 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1825 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1826 "RAID %d volume%d (%s)", bv.bv_level, i,
1827 bv.bv_dev);
1828
1829 sc->sc_arc_sensors[count].arc_volid = i;
1830
1831 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1832 &sc->sc_arc_sensors[count].arc_sensor))
1833 goto bad;
1834
1835 count++;
1836
1837 /* Attach disk sensors for this volume */
1838 for (j = 0; j < bv.bv_nodisk; j++) {
1839 sc->sc_arc_sensors[count].arc_sensor.state =
1840 ENVSYS_SINVALID;
1841 sc->sc_arc_sensors[count].arc_sensor.units =
1842 ENVSYS_DRIVE;
1843 sc->sc_arc_sensors[count].arc_sensor.value_cur =
1844 ENVSYS_DRIVE_EMPTY;
1845 sc->sc_arc_sensors[count].arc_sensor.flags =
1846 ENVSYS_FMONSTCHANGED;
1847
1848 snprintf(sc->sc_arc_sensors[count].arc_sensor.desc,
1849 sizeof(sc->sc_arc_sensors[count].arc_sensor.desc),
1850 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1851 sc->sc_arc_sensors[count].arc_volid = i;
1852 sc->sc_arc_sensors[count].arc_diskid = j + 10;
1853
1854 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1855 &sc->sc_arc_sensors[count].arc_sensor))
1856 goto bad;
1857
1858 count++;
1859 }
1860 }
1861
1862 /*
1863 * Register our envsys driver with the framework now that the
1864 * sensors were all attached.
1865 */
1866 sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1867 sc->sc_sme->sme_cookie = sc;
1868 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1869
1870 if (sysmon_envsys_register(sc->sc_sme)) {
1871 aprint_debug("%s: unable to register with sysmon\n",
1872 device_xname(sc->sc_dev));
1873 goto bad;
1874 }
1875 kthread_exit(0);
1876
1877 bad:
1878 sysmon_envsys_destroy(sc->sc_sme);
1879 kmem_free(sc->sc_arc_sensors, slen);
1880
1881 sc->sc_sme = NULL;
1882 sc->sc_arc_sensors = NULL;
1883
1884 kthread_exit(0);
1885 }
1886
1887 static void
1888 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1889 {
1890 struct arc_softc *sc = sme->sme_cookie;
1891 struct bioc_vol bv;
1892 struct bioc_disk bd;
1893 arc_edata_t *arcdata = (arc_edata_t *)edata;
1894
1895 /* sanity check */
1896 if (edata->units != ENVSYS_DRIVE)
1897 return;
1898
1899 memset(&bv, 0, sizeof(bv));
1900 bv.bv_volid = arcdata->arc_volid;
1901
1902 if (arc_bio_vol(sc, &bv)) {
1903 bv.bv_status = BIOC_SVINVALID;
1904 bio_vol_to_envsys(edata, &bv);
1905 return;
1906 }
1907
1908 if (arcdata->arc_diskid) {
1909 /* Current sensor is handling a disk volume member */
1910 memset(&bd, 0, sizeof(bd));
1911 bd.bd_volid = arcdata->arc_volid;
1912 bd.bd_diskid = arcdata->arc_diskid - 10;
1913
1914 if (arc_bio_disk_volume(sc, &bd))
1915 bd.bd_status = BIOC_SDOFFLINE;
1916 bio_disk_to_envsys(edata, &bd);
1917 } else {
1918 /* Current sensor is handling a volume */
1919 bio_vol_to_envsys(edata, &bv);
1920 }
1921 }
1922 #endif /* NBIO > 0 */
1923
1924 static uint32_t
1925 arc_read(struct arc_softc *sc, bus_size_t r)
1926 {
1927 uint32_t v;
1928
1929 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1930 BUS_SPACE_BARRIER_READ);
1931 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1932
1933 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1934 device_xname(sc->sc_dev), r, v);
1935
1936 return v;
1937 }
1938
1939 static void
1940 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1941 {
1942 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1943 BUS_SPACE_BARRIER_READ);
1944 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1945 (uint32_t *)buf, len >> 2);
1946 }
1947
1948 static void
1949 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1950 {
1951 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1952 device_xname(sc->sc_dev), r, v);
1953
1954 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1955 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1956 BUS_SPACE_BARRIER_WRITE);
1957 }
1958
1959 static void
1960 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1961 {
1962 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1963 (const uint32_t *)buf, len >> 2);
1964 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1965 BUS_SPACE_BARRIER_WRITE);
1966 }
1967
1968 static int
1969 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1970 uint32_t target)
1971 {
1972 int i;
1973
1974 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1975 device_xname(sc->sc_dev), r, mask, target);
1976
1977 for (i = 0; i < 10000; i++) {
1978 if ((arc_read(sc, r) & mask) == target)
1979 return 0;
1980 delay(1000);
1981 }
1982
1983 return 1;
1984 }
1985
1986 #if unused
1987 static int
1988 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1989 uint32_t target)
1990 {
1991 int i;
1992
1993 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1994 device_xname(sc->sc_dev), r, mask, target);
1995
1996 for (i = 0; i < 10000; i++) {
1997 if ((arc_read(sc, r) & mask) != target)
1998 return 0;
1999 delay(1000);
2000 }
2001
2002 return 1;
2003 }
2004 #endif
2005
2006 static int
2007 arc_msg0(struct arc_softc *sc, uint32_t m)
2008 {
2009 /* post message */
2010 arc_write(sc, ARC_REG_INB_MSG0, m);
2011 /* wait for the fw to do it */
2012 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
2013 ARC_REG_INTRSTAT_MSG0) != 0)
2014 return 1;
2015
2016 /* ack it */
2017 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
2018
2019 return 0;
2020 }
2021
2022 static struct arc_dmamem *
2023 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
2024 {
2025 struct arc_dmamem *adm;
2026 int nsegs;
2027
2028 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
2029 if (adm == NULL)
2030 return NULL;
2031
2032 adm->adm_size = size;
2033
2034 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2035 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2036 goto admfree;
2037
2038 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
2039 1, &nsegs, BUS_DMA_NOWAIT) != 0)
2040 goto destroy;
2041
2042 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
2043 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
2044 goto free;
2045
2046 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
2047 NULL, BUS_DMA_NOWAIT) != 0)
2048 goto unmap;
2049
2050 memset(adm->adm_kva, 0, size);
2051
2052 return adm;
2053
2054 unmap:
2055 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2056 free:
2057 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2058 destroy:
2059 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2060 admfree:
2061 kmem_free(adm, sizeof(*adm));
2062
2063 return NULL;
2064 }
2065
2066 static void
2067 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2068 {
2069 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2070 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2071 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2072 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2073 kmem_free(adm, sizeof(*adm));
2074 }
2075
2076 static int
2077 arc_alloc_ccbs(device_t self)
2078 {
2079 struct arc_softc *sc = device_private(self);
2080 struct arc_ccb *ccb;
2081 uint8_t *cmd;
2082 int i;
2083 size_t ccbslen;
2084
2085 TAILQ_INIT(&sc->sc_ccb_free);
2086
2087 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2088 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2089
2090 sc->sc_requests = arc_dmamem_alloc(sc,
2091 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2092 if (sc->sc_requests == NULL) {
2093 aprint_error_dev(self, "unable to allocate ccb dmamem\n");
2094 goto free_ccbs;
2095 }
2096 cmd = ARC_DMA_KVA(sc->sc_requests);
2097
2098 for (i = 0; i < sc->sc_req_count; i++) {
2099 ccb = &sc->sc_ccbs[i];
2100
2101 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2102 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2103 aprint_error_dev(self,
2104 "unable to create dmamap for ccb %d\n", i);
2105 goto free_maps;
2106 }
2107
2108 ccb->ccb_sc = sc;
2109 ccb->ccb_id = i;
2110 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2111
2112 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2113 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2114 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2115
2116 arc_put_ccb(sc, ccb);
2117 }
2118
2119 return 0;
2120
2121 free_maps:
2122 while ((ccb = arc_get_ccb(sc)) != NULL)
2123 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2124 arc_dmamem_free(sc, sc->sc_requests);
2125
2126 free_ccbs:
2127 kmem_free(sc->sc_ccbs, ccbslen);
2128
2129 return 1;
2130 }
2131
2132 static struct arc_ccb *
2133 arc_get_ccb(struct arc_softc *sc)
2134 {
2135 struct arc_ccb *ccb;
2136
2137 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2138 if (ccb != NULL)
2139 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2140
2141 return ccb;
2142 }
2143
2144 static void
2145 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2146 {
2147 ccb->ccb_xs = NULL;
2148 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2149 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2150 }
2151