arcmsr.c revision 1.15 1 /* $NetBSD: arcmsr.c,v 1.15 2008/02/29 18:15:41 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.15 2008/02/29 18:15:41 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
176 panic("%s: couldn't establish shutdown handler\n",
177 device_xname(self));
178
179 memset(adapt, 0, sizeof(*adapt));
180 adapt->adapt_dev = self;
181 adapt->adapt_nchannels = 1;
182 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
183 adapt->adapt_max_periph = adapt->adapt_openings;
184 adapt->adapt_minphys = arc_minphys;
185 adapt->adapt_request = arc_scsi_cmd;
186
187 memset(chan, 0, sizeof(*chan));
188 chan->chan_adapter = adapt;
189 chan->chan_bustype = &scsi_bustype;
190 chan->chan_nluns = ARC_MAX_LUN;
191 chan->chan_ntargets = ARC_MAX_TARGET;
192 chan->chan_id = ARC_MAX_TARGET;
193 chan->chan_channel = 0;
194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195
196 /*
197 * Save the device_t returned, because we could to attach
198 * devices via the management interface.
199 */
200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201
202 /* enable interrupts */
203 arc_write(sc, ARC_REG_INTRMASK,
204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205
206 #if NBIO > 0
207 /*
208 * Register the driver to bio(4) and setup the sensors.
209 */
210 if (bio_register(self, arc_bioctl) != 0)
211 panic("%s: bioctl registration failed\n", device_xname(self));
212
213 /*
214 * you need to talk to the firmware to get volume info. our firmware
215 * interface relies on being able to sleep, so we need to use a thread
216 * to do the work.
217 */
218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 panic("%s: unable to create a kernel thread for sensors\n",
221 device_xname(self));
222 #endif
223
224 return;
225
226 unmap_pci:
227 arc_unmap_pci_resources(sc);
228 }
229
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 struct arc_softc *sc = device_private(self);
234
235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
236 aprint_error("%s: timeout waiting to stop bg rebuild\n",
237 device_xname(&sc->sc_dev));
238
239 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
240 aprint_error("%s: timeout waiting to flush cache\n",
241 device_xname(&sc->sc_dev));
242
243 return 0;
244 }
245
246 static bool
247 arc_shutdown(device_t self, int how)
248 {
249 struct arc_softc *sc = device_private(self);
250
251 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
252 aprint_error("%s: timeout waiting to stop bg rebuild\n",
253 device_xname(&sc->sc_dev));
254
255 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
256 aprint_error("%s: timeout waiting to flush cache\n",
257 device_xname(&sc->sc_dev));
258
259 return true;
260 }
261
262 static void
263 arc_minphys(struct buf *bp)
264 {
265 if (bp->b_bcount > MAXPHYS)
266 bp->b_bcount = MAXPHYS;
267 minphys(bp);
268 }
269
270 static int
271 arc_intr(void *arg)
272 {
273 struct arc_softc *sc = arg;
274 struct arc_ccb *ccb = NULL;
275 char *kva = ARC_DMA_KVA(sc->sc_requests);
276 struct arc_io_cmd *cmd;
277 uint32_t reg, intrstat;
278
279 mutex_spin_enter(&sc->sc_mutex);
280 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
281 if (intrstat == 0x0) {
282 mutex_spin_exit(&sc->sc_mutex);
283 return 0;
284 }
285
286 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
287 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
288
289 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
290 if (sc->sc_talking) {
291 arc_write(sc, ARC_REG_INTRMASK,
292 ~ARC_REG_INTRMASK_POSTQUEUE);
293 cv_broadcast(&sc->sc_condvar);
294 } else {
295 /* otherwise drop it */
296 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
297 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
298 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
299 arc_write(sc, ARC_REG_INB_DOORBELL,
300 ARC_REG_INB_DOORBELL_READ_OK);
301 }
302 }
303 mutex_spin_exit(&sc->sc_mutex);
304
305 while ((reg = arc_pop(sc)) != 0xffffffff) {
306 cmd = (struct arc_io_cmd *)(kva +
307 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
308 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
309 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
310
311 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
312 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314
315 arc_scsi_cmd_done(sc, ccb, reg);
316 }
317
318
319 return 1;
320 }
321
322 void
323 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
324 {
325 struct scsipi_periph *periph;
326 struct scsipi_xfer *xs;
327 struct scsipi_adapter *adapt = chan->chan_adapter;
328 struct arc_softc *sc = device_private(adapt->adapt_dev);
329 struct arc_ccb *ccb;
330 struct arc_msg_scsicmd *cmd;
331 uint32_t reg;
332 uint8_t target;
333
334 switch (req) {
335 case ADAPTER_REQ_GROW_RESOURCES:
336 /* Not supported. */
337 return;
338 case ADAPTER_REQ_SET_XFER_MODE:
339 /* Not supported. */
340 return;
341 case ADAPTER_REQ_RUN_XFER:
342 break;
343 }
344
345 mutex_spin_enter(&sc->sc_mutex);
346
347 xs = arg;
348 periph = xs->xs_periph;
349 target = periph->periph_target;
350
351 if (xs->cmdlen > ARC_MSG_CDBLEN) {
352 memset(&xs->sense, 0, sizeof(xs->sense));
353 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
354 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
355 xs->sense.scsi_sense.asc = 0x20;
356 xs->error = XS_SENSE;
357 xs->status = SCSI_CHECK;
358 mutex_spin_exit(&sc->sc_mutex);
359 scsipi_done(xs);
360 return;
361 }
362
363 ccb = arc_get_ccb(sc);
364 if (ccb == NULL) {
365 xs->error = XS_RESOURCE_SHORTAGE;
366 mutex_spin_exit(&sc->sc_mutex);
367 scsipi_done(xs);
368 return;
369 }
370
371 ccb->ccb_xs = xs;
372
373 if (arc_load_xs(ccb) != 0) {
374 xs->error = XS_DRIVER_STUFFUP;
375 arc_put_ccb(sc, ccb);
376 mutex_spin_exit(&sc->sc_mutex);
377 scsipi_done(xs);
378 return;
379 }
380
381 cmd = &ccb->ccb_cmd->cmd;
382 reg = ccb->ccb_cmd_post;
383
384 /* bus is always 0 */
385 cmd->target = target;
386 cmd->lun = periph->periph_lun;
387 cmd->function = 1; /* XXX magic number */
388
389 cmd->cdb_len = xs->cmdlen;
390 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
391 if (xs->xs_control & XS_CTL_DATA_OUT)
392 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
393 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
394 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
395 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
396 }
397
398 cmd->context = htole32(ccb->ccb_id);
399 cmd->data_len = htole32(xs->datalen);
400
401 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
402
403 /* we've built the command, let's put it on the hw */
404 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
405 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
406 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
407
408 arc_push(sc, reg);
409 if (xs->xs_control & XS_CTL_POLL) {
410 if (arc_complete(sc, ccb, xs->timeout) != 0) {
411 xs->error = XS_DRIVER_STUFFUP;
412 mutex_spin_exit(&sc->sc_mutex);
413 scsipi_done(xs);
414 return;
415 }
416 }
417
418 mutex_spin_exit(&sc->sc_mutex);
419 }
420
421 int
422 arc_load_xs(struct arc_ccb *ccb)
423 {
424 struct arc_softc *sc = ccb->ccb_sc;
425 struct scsipi_xfer *xs = ccb->ccb_xs;
426 bus_dmamap_t dmap = ccb->ccb_dmamap;
427 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
428 uint64_t addr;
429 int i, error;
430
431 if (xs->datalen == 0)
432 return 0;
433
434 error = bus_dmamap_load(sc->sc_dmat, dmap,
435 xs->data, xs->datalen, NULL,
436 (xs->xs_control & XS_CTL_NOSLEEP) ?
437 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
438 if (error != 0) {
439 aprint_error("%s: error %d loading dmamap\n",
440 device_xname(&sc->sc_dev), error);
441 return 1;
442 }
443
444 for (i = 0; i < dmap->dm_nsegs; i++) {
445 sge = &sgl[i];
446
447 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
448 addr = dmap->dm_segs[i].ds_addr;
449 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
450 sge->sg_lo_addr = htole32((uint32_t)addr);
451 }
452
453 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
454 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
455 BUS_DMASYNC_PREWRITE);
456
457 return 0;
458 }
459
460 void
461 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
462 {
463 struct scsipi_xfer *xs = ccb->ccb_xs;
464 struct arc_msg_scsicmd *cmd;
465
466 if (xs->datalen != 0) {
467 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
468 ccb->ccb_dmamap->dm_mapsize,
469 (xs->xs_control & XS_CTL_DATA_IN) ?
470 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
471 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
472 }
473
474 /* timeout_del */
475 xs->status |= XS_STS_DONE;
476
477 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
478 cmd = &ccb->ccb_cmd->cmd;
479
480 switch (cmd->status) {
481 case ARC_MSG_STATUS_SELTIMEOUT:
482 case ARC_MSG_STATUS_ABORTED:
483 case ARC_MSG_STATUS_INIT_FAIL:
484 xs->status = SCSI_OK;
485 xs->error = XS_SELTIMEOUT;
486 break;
487
488 case SCSI_CHECK:
489 memset(&xs->sense, 0, sizeof(xs->sense));
490 memcpy(&xs->sense, cmd->sense_data,
491 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
492 xs->sense.scsi_sense.response_code =
493 SSD_RCODE_VALID | 0x70;
494 xs->status = SCSI_CHECK;
495 xs->error = XS_SENSE;
496 xs->resid = 0;
497 break;
498
499 default:
500 /* unknown device status */
501 xs->error = XS_BUSY; /* try again later? */
502 xs->status = SCSI_BUSY;
503 break;
504 }
505 } else {
506 xs->status = SCSI_OK;
507 xs->error = XS_NOERROR;
508 xs->resid = 0;
509 }
510
511 arc_put_ccb(sc, ccb);
512 scsipi_done(xs);
513 }
514
515 int
516 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
517 {
518 struct arc_ccb *ccb = NULL;
519 char *kva = ARC_DMA_KVA(sc->sc_requests);
520 struct arc_io_cmd *cmd;
521 uint32_t reg;
522
523 do {
524 reg = arc_pop(sc);
525 if (reg == 0xffffffff) {
526 if (timeout-- == 0)
527 return 1;
528
529 delay(1000);
530 continue;
531 }
532
533 cmd = (struct arc_io_cmd *)(kva +
534 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
535 ARC_DMA_DVA(sc->sc_requests)));
536 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
537
538 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
539 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541
542 arc_scsi_cmd_done(sc, ccb, reg);
543 } while (nccb != ccb);
544
545 return 0;
546 }
547
548 int
549 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
550 {
551 pcireg_t memtype;
552 pci_intr_handle_t ih;
553
554 sc->sc_pc = pa->pa_pc;
555 sc->sc_tag = pa->pa_tag;
556 sc->sc_dmat = pa->pa_dmat;
557
558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
561 aprint_error(": unable to map system interface register\n");
562 return 1;
563 }
564
565 if (pci_intr_map(pa, &ih) != 0) {
566 aprint_error(": unable to map interrupt\n");
567 goto unmap;
568 }
569
570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
571 arc_intr, sc);
572 if (sc->sc_ih == NULL) {
573 aprint_error(": unable to map interrupt [2]\n");
574 goto unmap;
575 }
576
577 aprint_normal("\n");
578 aprint_normal("%s: interrupting at %s\n",
579 device_xname(&sc->sc_dev), pci_intr_string(pa->pa_pc, ih));
580
581 return 0;
582
583 unmap:
584 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
585 sc->sc_ios = 0;
586 return 1;
587 }
588
589 void
590 arc_unmap_pci_resources(struct arc_softc *sc)
591 {
592 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
594 sc->sc_ios = 0;
595 }
596
597 int
598 arc_query_firmware(struct arc_softc *sc)
599 {
600 struct arc_msg_firmware_info fwinfo;
601 char string[81]; /* sizeof(vendor)*2+1 */
602
603 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
604 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
605 aprint_debug("%s: timeout waiting for firmware ok\n",
606 device_xname(&sc->sc_dev));
607 return 1;
608 }
609
610 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
611 aprint_debug("%s: timeout waiting for get config\n",
612 device_xname(&sc->sc_dev));
613 return 1;
614 }
615
616 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
617 aprint_debug("%s: timeout waiting to start bg rebuild\n",
618 device_xname(&sc->sc_dev));
619 return 1;
620 }
621
622 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
623
624 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
625 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
626
627 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
628 aprint_error("%s: invalid firmware info from iop\n",
629 device_xname(&sc->sc_dev));
630 return 1;
631 }
632
633 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
634 device_xname(&sc->sc_dev),
635 htole32(fwinfo.request_len));
636 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
637 device_xname(&sc->sc_dev),
638 htole32(fwinfo.queue_len));
639 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
640 device_xname(&sc->sc_dev),
641 htole32(fwinfo.sdram_size));
642 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
643 device_xname(&sc->sc_dev),
644 htole32(fwinfo.sata_ports));
645
646 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
647 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
648 device_xname(&sc->sc_dev), string);
649
650 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
651 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
652 device_xname(&sc->sc_dev), string);
653
654 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
655 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
656 device_xname(&sc->sc_dev), string);
657
658 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
659 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
660 htole32(fwinfo.sdram_size), string);
661
662 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
663 aprint_error("%s: unexpected request frame size (%d != %d)\n",
664 device_xname(&sc->sc_dev),
665 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
666 return 1;
667 }
668
669 sc->sc_req_count = htole32(fwinfo.queue_len);
670
671 return 0;
672 }
673
674 #if NBIO > 0
675 static int
676 arc_bioctl(struct device *self, u_long cmd, void *addr)
677 {
678 struct arc_softc *sc = device_private(self);
679 int error = 0;
680
681 switch (cmd) {
682 case BIOCINQ:
683 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
684 break;
685
686 case BIOCVOL:
687 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
688 break;
689
690 case BIOCDISK:
691 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
692 break;
693
694 case BIOCDISK_NOVOL:
695 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
696 break;
697
698 case BIOCALARM:
699 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
700 break;
701
702 case BIOCSETSTATE:
703 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
704 break;
705
706 case BIOCVOLOPS:
707 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
708 break;
709
710 default:
711 error = ENOTTY;
712 break;
713 }
714
715 return error;
716 }
717
718 static int
719 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
720 {
721 switch (*reply) {
722 case ARC_FW_CMD_RAIDINVAL:
723 printf("%s: firmware error (invalid raid set)\n",
724 device_xname(&sc->sc_dev));
725 return EINVAL;
726 case ARC_FW_CMD_VOLINVAL:
727 printf("%s: firmware error (invalid volume set)\n",
728 device_xname(&sc->sc_dev));
729 return EINVAL;
730 case ARC_FW_CMD_NORAID:
731 printf("%s: firmware error (unexistent raid set)\n",
732 device_xname(&sc->sc_dev));
733 return ENODEV;
734 case ARC_FW_CMD_NOVOLUME:
735 printf("%s: firmware error (unexistent volume set)\n",
736 device_xname(&sc->sc_dev));
737 return ENODEV;
738 case ARC_FW_CMD_NOPHYSDRV:
739 printf("%s: firmware error (unexistent physical drive)\n",
740 device_xname(&sc->sc_dev));
741 return ENODEV;
742 case ARC_FW_CMD_PARAM_ERR:
743 printf("%s: firmware error (parameter error)\n",
744 device_xname(&sc->sc_dev));
745 return EINVAL;
746 case ARC_FW_CMD_UNSUPPORTED:
747 printf("%s: firmware error (unsupported command)\n",
748 device_xname(&sc->sc_dev));
749 return EOPNOTSUPP;
750 case ARC_FW_CMD_DISKCFG_CHGD:
751 printf("%s: firmware error (disk configuration changed)\n",
752 device_xname(&sc->sc_dev));
753 return EINVAL;
754 case ARC_FW_CMD_PASS_INVAL:
755 printf("%s: firmware error (invalid password)\n",
756 device_xname(&sc->sc_dev));
757 return EINVAL;
758 case ARC_FW_CMD_NODISKSPACE:
759 printf("%s: firmware error (no disk space available)\n",
760 device_xname(&sc->sc_dev));
761 return EOPNOTSUPP;
762 case ARC_FW_CMD_CHECKSUM_ERR:
763 printf("%s: firmware error (checksum error)\n",
764 device_xname(&sc->sc_dev));
765 return EINVAL;
766 case ARC_FW_CMD_PASS_REQD:
767 printf("%s: firmware error (password required)\n",
768 device_xname(&sc->sc_dev));
769 return EPERM;
770 case ARC_FW_CMD_OK:
771 default:
772 return 0;
773 }
774 }
775
776 static int
777 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
778 {
779 uint8_t request[2], reply[1];
780 size_t len;
781 int error = 0;
782
783 switch (ba->ba_opcode) {
784 case BIOC_SAENABLE:
785 case BIOC_SADISABLE:
786 request[0] = ARC_FW_SET_ALARM;
787 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
788 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
789 len = sizeof(request);
790
791 break;
792
793 case BIOC_SASILENCE:
794 request[0] = ARC_FW_MUTE_ALARM;
795 len = 1;
796
797 break;
798
799 case BIOC_GASTATUS:
800 /* system info is too big/ugly to deal with here */
801 return arc_bio_alarm_state(sc, ba);
802
803 default:
804 return EOPNOTSUPP;
805 }
806
807 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
808 if (error != 0)
809 return error;
810
811 return arc_fw_parse_status_code(sc, &reply[0]);
812 }
813
814 static int
815 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
816 {
817 struct arc_fw_sysinfo *sysinfo;
818 uint8_t request;
819 int error = 0;
820
821 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
822
823 request = ARC_FW_SYSINFO;
824 error = arc_msgbuf(sc, &request, sizeof(request),
825 sysinfo, sizeof(struct arc_fw_sysinfo));
826
827 if (error != 0)
828 goto out;
829
830 ba->ba_status = sysinfo->alarm;
831
832 out:
833 kmem_free(sysinfo, sizeof(*sysinfo));
834 return error;
835 }
836
837 static int
838 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
839 {
840 /* to create a raid set */
841 struct req_craidset {
842 uint8_t cmdcode;
843 uint32_t devmask;
844 uint8_t raidset_name[16];
845 } __packed;
846
847 /* to create a volume set */
848 struct req_cvolset {
849 uint8_t cmdcode;
850 uint8_t raidset;
851 uint8_t volset_name[16];
852 uint64_t capacity;
853 uint8_t raidlevel;
854 uint8_t stripe;
855 uint8_t scsi_chan;
856 uint8_t scsi_target;
857 uint8_t scsi_lun;
858 uint8_t tagqueue;
859 uint8_t cache;
860 uint8_t speed;
861 uint8_t quick_init;
862 } __packed;
863
864 struct scsibus_softc *scsibus_sc = NULL;
865 struct req_craidset req_craidset;
866 struct req_cvolset req_cvolset;
867 uint8_t request[2];
868 uint8_t reply[1];
869 int error = 0;
870
871 switch (bc->bc_opcode) {
872 case BIOC_VCREATE_VOLUME:
873 {
874 /*
875 * Zero out the structs so that we use some defaults
876 * in raid and volume sets.
877 */
878 memset(&req_craidset, 0, sizeof(req_craidset));
879 memset(&req_cvolset, 0, sizeof(req_cvolset));
880
881 /*
882 * Firstly we have to create the raid set and
883 * use the default name for all them.
884 */
885 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
886 req_craidset.devmask = bc->bc_devmask;
887 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
888 reply, sizeof(reply));
889 if (error != 0)
890 return error;
891
892 error = arc_fw_parse_status_code(sc, &reply[0]);
893 if (error) {
894 printf("%s: create raidset%d failed\n",
895 device_xname(&sc->sc_dev), bc->bc_volid);
896 return error;
897 }
898
899 /*
900 * At this point the raid set was created, so it's
901 * time to create the volume set.
902 */
903 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
904 req_cvolset.raidset = bc->bc_volid;
905 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
906
907 /*
908 * Set the RAID level.
909 */
910 switch (bc->bc_level) {
911 case 0:
912 case 1:
913 req_cvolset.raidlevel = bc->bc_level;
914 break;
915 case 3:
916 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
917 break;
918 case 5:
919 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
920 break;
921 case 6:
922 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
923 break;
924 default:
925 return EOPNOTSUPP;
926 }
927
928 /*
929 * Set the stripe size.
930 */
931 switch (bc->bc_stripe) {
932 case 4:
933 req_cvolset.stripe = 0;
934 break;
935 case 8:
936 req_cvolset.stripe = 1;
937 break;
938 case 16:
939 req_cvolset.stripe = 2;
940 break;
941 case 32:
942 req_cvolset.stripe = 3;
943 break;
944 case 64:
945 req_cvolset.stripe = 4;
946 break;
947 case 128:
948 req_cvolset.stripe = 5;
949 break;
950 default:
951 req_cvolset.stripe = 4; /* by default 64K */
952 break;
953 }
954
955 req_cvolset.scsi_chan = bc->bc_channel;
956 req_cvolset.scsi_target = bc->bc_target;
957 req_cvolset.scsi_lun = bc->bc_lun;
958 req_cvolset.tagqueue = 1; /* always enabled */
959 req_cvolset.cache = 1; /* always enabled */
960 req_cvolset.speed = 4; /* always max speed */
961
962 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
963 reply, sizeof(reply));
964 if (error != 0)
965 return error;
966
967 error = arc_fw_parse_status_code(sc, &reply[0]);
968 if (error) {
969 printf("%s: create volumeset%d failed\n",
970 device_xname(&sc->sc_dev), bc->bc_volid);
971 return error;
972 }
973
974 /*
975 * Do a rescan on the bus to attach the device associated
976 * with the new volume.
977 */
978 scsibus_sc = device_private(sc->sc_scsibus_dv);
979 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
980
981 break;
982 }
983 case BIOC_VREMOVE_VOLUME:
984 {
985 /*
986 * Remove the volume set specified in bc_volid.
987 */
988 request[0] = ARC_FW_DELETE_VOLUME;
989 request[1] = bc->bc_volid;
990 error = arc_msgbuf(sc, request, sizeof(request),
991 reply, sizeof(reply));
992 if (error != 0)
993 return error;
994
995 error = arc_fw_parse_status_code(sc, &reply[0]);
996 if (error) {
997 printf("%s: delete volumeset%d failed\n",
998 device_xname(&sc->sc_dev), bc->bc_volid);
999 return error;
1000 }
1001
1002 /*
1003 * Detach the sd(4) device associated with the volume,
1004 * but if there's an error don't make it a priority.
1005 */
1006 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1007 bc->bc_lun, 0);
1008 if (error)
1009 printf("%s: couldn't detach sd device for volume %d "
1010 "at %u:%u.%u (error=%d)\n",
1011 device_xname(&sc->sc_dev), bc->bc_volid,
1012 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1013
1014 /*
1015 * and remove the raid set specified in bc_volid,
1016 * we only care about volumes.
1017 */
1018 request[0] = ARC_FW_DELETE_RAIDSET;
1019 request[1] = bc->bc_volid;
1020 error = arc_msgbuf(sc, request, sizeof(request),
1021 reply, sizeof(reply));
1022 if (error != 0)
1023 return error;
1024
1025 error = arc_fw_parse_status_code(sc, &reply[0]);
1026 if (error) {
1027 printf("%s: delete raidset%d failed\n",
1028 device_xname(&sc->sc_dev), bc->bc_volid);
1029 return error;
1030 }
1031
1032 break;
1033 }
1034 default:
1035 return EOPNOTSUPP;
1036 }
1037
1038 return error;
1039 }
1040
1041 static int
1042 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1043 {
1044 /* for a hotspare disk */
1045 struct request_hs {
1046 uint8_t cmdcode;
1047 uint32_t devmask;
1048 } __packed;
1049
1050 /* for a pass-through disk */
1051 struct request_pt {
1052 uint8_t cmdcode;
1053 uint8_t devid;
1054 uint8_t scsi_chan;
1055 uint8_t scsi_id;
1056 uint8_t scsi_lun;
1057 uint8_t tagged_queue;
1058 uint8_t cache_mode;
1059 uint8_t max_speed;
1060 } __packed;
1061
1062 struct scsibus_softc *scsibus_sc = NULL;
1063 struct request_hs req_hs; /* to add/remove hotspare */
1064 struct request_pt req_pt; /* to add a pass-through */
1065 uint8_t req_gen[2];
1066 uint8_t reply[1];
1067 int error = 0;
1068
1069 switch (bs->bs_status) {
1070 case BIOC_SSHOTSPARE:
1071 {
1072 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1073 req_hs.devmask = (1 << bs->bs_target);
1074 goto hotspare;
1075 }
1076 case BIOC_SSDELHOTSPARE:
1077 {
1078 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1079 req_hs.devmask = (1 << bs->bs_target);
1080 goto hotspare;
1081 }
1082 case BIOC_SSPASSTHRU:
1083 {
1084 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1085 req_pt.devid = bs->bs_other_id; /* this wants device# */
1086 req_pt.scsi_chan = bs->bs_channel;
1087 req_pt.scsi_id = bs->bs_target;
1088 req_pt.scsi_lun = bs->bs_lun;
1089 req_pt.tagged_queue = 1; /* always enabled */
1090 req_pt.cache_mode = 1; /* always enabled */
1091 req_pt.max_speed = 4; /* always max speed */
1092
1093 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1094 reply, sizeof(reply));
1095 if (error != 0)
1096 return error;
1097
1098 /*
1099 * Do a rescan on the bus to attach the new device
1100 * associated with the pass-through disk.
1101 */
1102 scsibus_sc = device_private(sc->sc_scsibus_dv);
1103 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1104
1105 goto out;
1106 }
1107 case BIOC_SSDELPASSTHRU:
1108 {
1109 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1110 req_gen[1] = bs->bs_target;
1111 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1112 reply, sizeof(reply));
1113 if (error != 0)
1114 return error;
1115
1116 /*
1117 * Detach the sd device associated with this pass-through disk.
1118 */
1119 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1120 bs->bs_lun, 0);
1121 if (error)
1122 printf("%s: couldn't detach sd device for the "
1123 "pass-through disk at %u:%u.%u (error=%d)\n",
1124 device_xname(&sc->sc_dev),
1125 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1126
1127 goto out;
1128 }
1129 case BIOC_SSCHECKSTART_VOL:
1130 {
1131 req_gen[0] = ARC_FW_START_CHECKVOL;
1132 req_gen[1] = bs->bs_volid;
1133 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1134 reply, sizeof(reply));
1135 if (error != 0)
1136 return error;
1137
1138 goto out;
1139 }
1140 case BIOC_SSCHECKSTOP_VOL:
1141 {
1142 uint8_t req = ARC_FW_STOP_CHECKVOL;
1143 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1144 if (error != 0)
1145 return error;
1146
1147 goto out;
1148 }
1149 default:
1150 return EOPNOTSUPP;
1151 }
1152
1153 hotspare:
1154 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1155 reply, sizeof(reply));
1156 if (error != 0)
1157 return error;
1158
1159 out:
1160 return arc_fw_parse_status_code(sc, &reply[0]);
1161 }
1162
1163 static int
1164 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1165 {
1166 uint8_t request[2];
1167 struct arc_fw_sysinfo *sysinfo = NULL;
1168 struct arc_fw_raidinfo *raidinfo;
1169 int nvols = 0, i;
1170 int error = 0;
1171
1172 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1173
1174 if (!sc->sc_maxraidset || !sc->sc_maxvolset || !sc->sc_cchans) {
1175 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1176
1177 request[0] = ARC_FW_SYSINFO;
1178 error = arc_msgbuf(sc, request, 1, sysinfo,
1179 sizeof(struct arc_fw_sysinfo));
1180 if (error != 0)
1181 goto out;
1182
1183 sc->sc_maxraidset = sysinfo->max_raid_set;
1184 sc->sc_maxvolset = sysinfo->max_volume_set;
1185 sc->sc_cchans = sysinfo->ide_channels;
1186 }
1187
1188 request[0] = ARC_FW_RAIDINFO;
1189 for (i = 0; i < sc->sc_maxraidset; i++) {
1190 request[1] = i;
1191 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1192 sizeof(struct arc_fw_raidinfo));
1193 if (error != 0)
1194 goto out;
1195
1196 if (raidinfo->volumes)
1197 nvols++;
1198 }
1199
1200 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1201 bi->bi_novol = nvols;
1202 bi->bi_nodisk = sc->sc_cchans;
1203
1204 out:
1205 if (sysinfo)
1206 kmem_free(sysinfo, sizeof(*sysinfo));
1207 kmem_free(raidinfo, sizeof(*raidinfo));
1208 return error;
1209 }
1210
1211 static int
1212 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1213 {
1214 uint8_t request[2];
1215 int error = 0;
1216 int nvols = 0, i;
1217
1218 request[0] = ARC_FW_VOLINFO;
1219 for (i = 0; i < sc->sc_maxvolset; i++) {
1220 request[1] = i;
1221 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1222 sizeof(struct arc_fw_volinfo));
1223 if (error != 0)
1224 goto out;
1225
1226 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1227 continue;
1228
1229 if (nvols == vol)
1230 break;
1231
1232 nvols++;
1233 }
1234
1235 if (nvols != vol ||
1236 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1237 error = ENODEV;
1238 goto out;
1239 }
1240
1241 out:
1242 return error;
1243 }
1244
1245 static int
1246 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1247 {
1248 struct arc_fw_volinfo *volinfo;
1249 uint64_t blocks;
1250 uint32_t status;
1251 int error = 0;
1252
1253 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1254
1255 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1256 if (error != 0)
1257 goto out;
1258
1259 bv->bv_percent = -1;
1260 bv->bv_seconds = 0;
1261
1262 status = htole32(volinfo->volume_status);
1263 if (status == 0x0) {
1264 if (htole32(volinfo->fail_mask) == 0x0)
1265 bv->bv_status = BIOC_SVONLINE;
1266 else
1267 bv->bv_status = BIOC_SVDEGRADED;
1268 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1269 bv->bv_status = BIOC_SVDEGRADED;
1270 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1271 bv->bv_status = BIOC_SVOFFLINE;
1272 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1273 bv->bv_status = BIOC_SVBUILDING;
1274 bv->bv_percent = htole32(volinfo->progress);
1275 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1276 bv->bv_status = BIOC_SVREBUILD;
1277 bv->bv_percent = htole32(volinfo->progress);
1278 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1279 bv->bv_status = BIOC_SVMIGRATING;
1280 bv->bv_percent = htole32(volinfo->progress);
1281 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1282 bv->bv_status = BIOC_SVCHECKING;
1283 bv->bv_percent = htole32(volinfo->progress);
1284 }
1285
1286 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1287 blocks += (uint64_t)htole32(volinfo->capacity);
1288 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1289
1290 switch (volinfo->raid_level) {
1291 case ARC_FW_VOL_RAIDLEVEL_0:
1292 bv->bv_level = 0;
1293 break;
1294 case ARC_FW_VOL_RAIDLEVEL_1:
1295 bv->bv_level = 1;
1296 break;
1297 case ARC_FW_VOL_RAIDLEVEL_3:
1298 bv->bv_level = 3;
1299 break;
1300 case ARC_FW_VOL_RAIDLEVEL_5:
1301 bv->bv_level = 5;
1302 break;
1303 case ARC_FW_VOL_RAIDLEVEL_6:
1304 bv->bv_level = 6;
1305 break;
1306 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1307 bv->bv_level = BIOC_SVOL_PASSTHRU;
1308 break;
1309 default:
1310 bv->bv_level = -1;
1311 break;
1312 }
1313
1314 bv->bv_nodisk = volinfo->member_disks;
1315 bv->bv_stripe_size = volinfo->stripe_size / 2;
1316 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1317 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1318 sizeof(volinfo->set_name));
1319
1320 out:
1321 kmem_free(volinfo, sizeof(*volinfo));
1322 return error;
1323 }
1324
1325 static int
1326 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1327 {
1328 struct arc_fw_diskinfo *diskinfo;
1329 uint8_t request[2];
1330 int error = 0;
1331
1332 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1333
1334 if (bd->bd_diskid >= sc->sc_cchans) {
1335 error = ENODEV;
1336 goto out;
1337 }
1338
1339 request[0] = ARC_FW_DISKINFO;
1340 request[1] = bd->bd_diskid;
1341 error = arc_msgbuf(sc, request, sizeof(request),
1342 diskinfo, sizeof(struct arc_fw_diskinfo));
1343 if (error != 0)
1344 goto out;
1345
1346 /* skip disks with no capacity */
1347 if (htole32(diskinfo->capacity) == 0 &&
1348 htole32(diskinfo->capacity2) == 0)
1349 goto out;
1350
1351 bd->bd_disknovol = true;
1352 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1353
1354 out:
1355 kmem_free(diskinfo, sizeof(*diskinfo));
1356 return error;
1357 }
1358
1359 static void
1360 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1361 struct arc_fw_diskinfo *diskinfo, int diskid)
1362 {
1363 uint64_t blocks;
1364 char model[81];
1365 char serial[41];
1366 char rev[17];
1367
1368 switch (htole32(diskinfo->device_state)) {
1369 case ARC_FW_DISK_PASSTHRU:
1370 bd->bd_status = BIOC_SDPASSTHRU;
1371 break;
1372 case ARC_FW_DISK_RAIDMEMBER:
1373 bd->bd_status = BIOC_SDONLINE;
1374 break;
1375 case ARC_FW_DISK_HOTSPARE:
1376 bd->bd_status = BIOC_SDHOTSPARE;
1377 break;
1378 case ARC_FW_DISK_UNUSED:
1379 bd->bd_status = BIOC_SDUNUSED;
1380 break;
1381 case 0:
1382 /* disk has been disconnected */
1383 bd->bd_status = BIOC_SDOFFLINE;
1384 bd->bd_channel = 1;
1385 bd->bd_target = 0;
1386 bd->bd_lun = 0;
1387 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1388 break;
1389 default:
1390 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1391 htole32(diskinfo->device_state));
1392 bd->bd_status = BIOC_SDINVALID;
1393 return;
1394 }
1395
1396 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1397 blocks += (uint64_t)htole32(diskinfo->capacity);
1398 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1399
1400 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1401 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1402 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1403 sizeof(diskinfo->firmware_rev));
1404
1405 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1406 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1407
1408 #if 0
1409 bd->bd_channel = diskinfo->scsi_attr.channel;
1410 bd->bd_target = diskinfo->scsi_attr.target;
1411 bd->bd_lun = diskinfo->scsi_attr.lun;
1412 #endif
1413
1414 /*
1415 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1416 * the diskid.
1417 */
1418 bd->bd_channel = 0;
1419 bd->bd_target = diskid;
1420 bd->bd_lun = 0;
1421 }
1422
1423 static int
1424 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1425 {
1426 struct arc_fw_raidinfo *raidinfo;
1427 struct arc_fw_volinfo *volinfo;
1428 struct arc_fw_diskinfo *diskinfo;
1429 uint8_t request[2];
1430 int error = 0;
1431
1432 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1433 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1434 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1435
1436 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1437 if (error != 0)
1438 goto out;
1439
1440 request[0] = ARC_FW_RAIDINFO;
1441 request[1] = volinfo->raid_set_number;
1442
1443 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1444 sizeof(struct arc_fw_raidinfo));
1445 if (error != 0)
1446 goto out;
1447
1448 if (bd->bd_diskid >= sc->sc_cchans ||
1449 bd->bd_diskid >= raidinfo->member_devices) {
1450 error = ENODEV;
1451 goto out;
1452 }
1453
1454 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1455 /*
1456 * The disk has been disconnected, mark it offline
1457 * and put it on another bus.
1458 */
1459 bd->bd_channel = 1;
1460 bd->bd_target = 0;
1461 bd->bd_lun = 0;
1462 bd->bd_status = BIOC_SDOFFLINE;
1463 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1464 goto out;
1465 }
1466
1467 request[0] = ARC_FW_DISKINFO;
1468 request[1] = raidinfo->device_array[bd->bd_diskid];
1469 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1470 sizeof(struct arc_fw_diskinfo));
1471 if (error != 0)
1472 goto out;
1473
1474 /* now fill our bio disk with data from the firmware */
1475 arc_bio_disk_filldata(sc, bd, diskinfo,
1476 raidinfo->device_array[bd->bd_diskid]);
1477
1478 out:
1479 kmem_free(raidinfo, sizeof(*raidinfo));
1480 kmem_free(volinfo, sizeof(*volinfo));
1481 kmem_free(diskinfo, sizeof(*diskinfo));
1482 return error;
1483 }
1484 #endif /* NBIO > 0 */
1485
1486 uint8_t
1487 arc_msg_cksum(void *cmd, uint16_t len)
1488 {
1489 uint8_t *buf = cmd;
1490 uint8_t cksum;
1491 int i;
1492
1493 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1494 for (i = 0; i < len; i++)
1495 cksum += buf[i];
1496
1497 return cksum;
1498 }
1499
1500
1501 int
1502 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1503 size_t rbuflen)
1504 {
1505 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1506 uint8_t *wbuf, *rbuf;
1507 int wlen, wdone = 0, rlen, rdone = 0;
1508 struct arc_fw_bufhdr *bufhdr;
1509 uint32_t reg, rwlen;
1510 int error = 0;
1511 #ifdef ARC_DEBUG
1512 int i;
1513 #endif
1514
1515 wbuf = rbuf = NULL;
1516
1517 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1518 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1519
1520 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1521 wbuf = kmem_alloc(wlen, KM_SLEEP);
1522
1523 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1524 rbuf = kmem_alloc(rlen, KM_SLEEP);
1525
1526 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1527 device_xname(&sc->sc_dev), wlen, rlen);
1528
1529 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1530 bufhdr->hdr = arc_fw_hdr;
1531 bufhdr->len = htole16(wbuflen);
1532 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1533 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1534
1535 arc_lock(sc);
1536 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1537 error = EBUSY;
1538 goto out;
1539 }
1540
1541 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1542
1543 do {
1544 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1545 memset(rwbuf, 0, sizeof(rwbuf));
1546 rwlen = (wlen - wdone) % sizeof(rwbuf);
1547 memcpy(rwbuf, &wbuf[wdone], rwlen);
1548
1549 #ifdef ARC_DEBUG
1550 if (arcdebug & ARC_D_DB) {
1551 printf("%s: write %d:",
1552 device_xname(&sc->sc_dev), rwlen);
1553 for (i = 0; i < rwlen; i++)
1554 printf(" 0x%02x", rwbuf[i]);
1555 printf("\n");
1556 }
1557 #endif
1558
1559 /* copy the chunk to the hw */
1560 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1561 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1562 sizeof(rwbuf));
1563
1564 /* say we have a buffer for the hw */
1565 arc_write(sc, ARC_REG_INB_DOORBELL,
1566 ARC_REG_INB_DOORBELL_WRITE_OK);
1567
1568 wdone += rwlen;
1569 }
1570
1571 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1572 arc_wait(sc);
1573
1574 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1575
1576 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1577 device_xname(&sc->sc_dev), reg);
1578
1579 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1580 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1581 if (rwlen > sizeof(rwbuf)) {
1582 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1583 device_xname(&sc->sc_dev));
1584 error = EIO;
1585 goto out;
1586 }
1587
1588 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1589 sizeof(rwbuf));
1590
1591 arc_write(sc, ARC_REG_INB_DOORBELL,
1592 ARC_REG_INB_DOORBELL_READ_OK);
1593
1594 #ifdef ARC_DEBUG
1595 printf("%s: len: %d+%d=%d/%d\n",
1596 device_xname(&sc->sc_dev),
1597 rwlen, rdone, rwlen + rdone, rlen);
1598 if (arcdebug & ARC_D_DB) {
1599 printf("%s: read:",
1600 device_xname(&sc->sc_dev));
1601 for (i = 0; i < rwlen; i++)
1602 printf(" 0x%02x", rwbuf[i]);
1603 printf("\n");
1604 }
1605 #endif
1606
1607 if ((rdone + rwlen) > rlen) {
1608 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1609 device_xname(&sc->sc_dev));
1610 error = EIO;
1611 goto out;
1612 }
1613
1614 memcpy(&rbuf[rdone], rwbuf, rwlen);
1615 rdone += rwlen;
1616 }
1617 } while (rdone != rlen);
1618
1619 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1620 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1621 bufhdr->len != htole16(rbuflen)) {
1622 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1623 device_xname(&sc->sc_dev));
1624 error = EIO;
1625 goto out;
1626 }
1627
1628 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1629
1630 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1631 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1632 device_xname(&sc->sc_dev));
1633 error = EIO;
1634 goto out;
1635 }
1636
1637 out:
1638 arc_unlock(sc);
1639 kmem_free(wbuf, wlen);
1640 kmem_free(rbuf, rlen);
1641
1642 return error;
1643 }
1644
1645 void
1646 arc_lock(struct arc_softc *sc)
1647 {
1648 rw_enter(&sc->sc_rwlock, RW_WRITER);
1649 mutex_spin_enter(&sc->sc_mutex);
1650 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1651 sc->sc_talking = 1;
1652 }
1653
1654 void
1655 arc_unlock(struct arc_softc *sc)
1656 {
1657 KASSERT(mutex_owned(&sc->sc_mutex));
1658
1659 arc_write(sc, ARC_REG_INTRMASK,
1660 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1661 sc->sc_talking = 0;
1662 mutex_spin_exit(&sc->sc_mutex);
1663 rw_exit(&sc->sc_rwlock);
1664 }
1665
1666 void
1667 arc_wait(struct arc_softc *sc)
1668 {
1669 KASSERT(mutex_owned(&sc->sc_mutex));
1670
1671 arc_write(sc, ARC_REG_INTRMASK,
1672 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1673 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1674 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1675 }
1676
1677 #if NBIO > 0
1678 static void
1679 arc_create_sensors(void *arg)
1680 {
1681 struct arc_softc *sc = arg;
1682 struct bioc_inq bi;
1683 struct bioc_vol bv;
1684 int i, j;
1685 size_t slen, count = 0;
1686
1687 memset(&bi, 0, sizeof(bi));
1688 if (arc_bio_inq(sc, &bi) != 0) {
1689 aprint_error("%s: unable to query firmware for sensor info\n",
1690 device_xname(&sc->sc_dev));
1691 kthread_exit(0);
1692 }
1693
1694 /* There's no point to continue if there are no volumes */
1695 if (!bi.bi_novol)
1696 kthread_exit(0);
1697
1698 for (i = 0; i < bi.bi_novol; i++) {
1699 memset(&bv, 0, sizeof(bv));
1700 bv.bv_volid = i;
1701 if (arc_bio_vol(sc, &bv) != 0)
1702 kthread_exit(0);
1703
1704 /* Skip passthrough volumes */
1705 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1706 continue;
1707
1708 /* new volume found */
1709 sc->sc_nsensors++;
1710 /* new disk in a volume found */
1711 sc->sc_nsensors+= bv.bv_nodisk;
1712 }
1713
1714 sc->sc_sme = sysmon_envsys_create();
1715 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1716 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1717
1718 /* Attach sensors for volumes and disks */
1719 for (i = 0; i < bi.bi_novol; i++) {
1720 memset(&bv, 0, sizeof(bv));
1721 bv.bv_volid = i;
1722 if (arc_bio_vol(sc, &bv) != 0)
1723 goto bad;
1724
1725 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1726 sc->sc_sensors[count].monitor = true;
1727 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1728
1729 /* Skip passthrough volumes */
1730 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1731 continue;
1732
1733 snprintf(sc->sc_sensors[count].desc,
1734 sizeof(sc->sc_sensors[count].desc),
1735 "RAID %d volume%d (%s)", bv.bv_level, i, bv.bv_dev);
1736 sc->sc_sensors[count].value_max = i;
1737
1738 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1739 &sc->sc_sensors[count]))
1740 goto bad;
1741
1742 count++;
1743
1744 /* Attach disk sensors for this volume */
1745 for (j = 0; j < bv.bv_nodisk; j++) {
1746 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1747 sc->sc_sensors[count].monitor = true;
1748 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1749
1750 snprintf(sc->sc_sensors[count].desc,
1751 sizeof(sc->sc_sensors[count].desc),
1752 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1753 sc->sc_sensors[count].value_max = i;
1754 sc->sc_sensors[count].value_avg = j + 10;
1755
1756 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1757 &sc->sc_sensors[count]))
1758 goto bad;
1759
1760 count++;
1761 }
1762 }
1763
1764 /*
1765 * Register our envsys driver with the framework now that the
1766 * sensors were all attached.
1767 */
1768 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1769 sc->sc_sme->sme_cookie = sc;
1770 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1771
1772 if (sysmon_envsys_register(sc->sc_sme)) {
1773 aprint_debug("%s: unable to register with sysmon\n",
1774 device_xname(&sc->sc_dev));
1775 goto bad;
1776 }
1777 kthread_exit(0);
1778
1779 bad:
1780 kmem_free(sc->sc_sensors, slen);
1781 sysmon_envsys_destroy(sc->sc_sme);
1782 kthread_exit(0);
1783 }
1784
1785 static void
1786 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1787 {
1788 struct arc_softc *sc = sme->sme_cookie;
1789 struct bioc_vol bv;
1790 struct bioc_disk bd;
1791
1792 /* sanity check */
1793 if (edata->units != ENVSYS_DRIVE)
1794 return;
1795
1796 memset(&bv, 0, sizeof(bv));
1797 bv.bv_volid = edata->value_max;
1798
1799 if (arc_bio_vol(sc, &bv)) {
1800 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1801 edata->state = ENVSYS_SINVALID;
1802 return;
1803 }
1804
1805 /* Current sensor is handling a disk volume member */
1806 if (edata->value_avg) {
1807 memset(&bd, 0, sizeof(bd));
1808 bd.bd_volid = edata->value_max;
1809 bd.bd_diskid = edata->value_avg - 10;
1810
1811 if (arc_bio_disk_volume(sc, &bd)) {
1812 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1813 edata->state = ENVSYS_SCRITICAL;
1814 return;
1815 }
1816
1817 switch (bd.bd_status) {
1818 case BIOC_SDONLINE:
1819 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1820 edata->state = ENVSYS_SVALID;
1821 break;
1822 case BIOC_SDOFFLINE:
1823 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1824 edata->state = ENVSYS_SCRITICAL;
1825 break;
1826 default:
1827 edata->value_cur = ENVSYS_DRIVE_FAIL;
1828 edata->state = ENVSYS_SCRITICAL;
1829 break;
1830 }
1831
1832 return;
1833 }
1834
1835 /* Current sensor is handling a volume */
1836 switch (bv.bv_status) {
1837 case BIOC_SVOFFLINE:
1838 edata->value_cur = ENVSYS_DRIVE_FAIL;
1839 edata->state = ENVSYS_SCRITICAL;
1840 break;
1841 case BIOC_SVDEGRADED:
1842 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1843 edata->state = ENVSYS_SCRITICAL;
1844 break;
1845 case BIOC_SVBUILDING:
1846 edata->value_cur = ENVSYS_DRIVE_BUILD;
1847 edata->state = ENVSYS_SVALID;
1848 break;
1849 case BIOC_SVMIGRATING:
1850 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1851 edata->state = ENVSYS_SVALID;
1852 break;
1853 case BIOC_SVCHECKING:
1854 edata->value_cur = ENVSYS_DRIVE_CHECK;
1855 edata->state = ENVSYS_SVALID;
1856 break;
1857 case BIOC_SVREBUILD:
1858 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1859 edata->state = ENVSYS_SCRITICAL;
1860 break;
1861 case BIOC_SVSCRUB:
1862 case BIOC_SVONLINE:
1863 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1864 edata->state = ENVSYS_SVALID;
1865 break;
1866 case BIOC_SVINVALID:
1867 /* FALLTHROUGH */
1868 default:
1869 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1870 edata->state = ENVSYS_SINVALID;
1871 break;
1872 }
1873 }
1874 #endif /* NBIO > 0 */
1875
1876 uint32_t
1877 arc_read(struct arc_softc *sc, bus_size_t r)
1878 {
1879 uint32_t v;
1880
1881 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1882 BUS_SPACE_BARRIER_READ);
1883 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1884
1885 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1886 device_xname(&sc->sc_dev), r, v);
1887
1888 return v;
1889 }
1890
1891 void
1892 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1893 {
1894 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1895 BUS_SPACE_BARRIER_READ);
1896 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1897 (uint32_t *)buf, len >> 2);
1898 }
1899
1900 void
1901 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1902 {
1903 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1904 device_xname(&sc->sc_dev), r, v);
1905
1906 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1907 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1908 BUS_SPACE_BARRIER_WRITE);
1909 }
1910
1911 void
1912 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1913 {
1914 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1915 (const uint32_t *)buf, len >> 2);
1916 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1917 BUS_SPACE_BARRIER_WRITE);
1918 }
1919
1920 int
1921 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1922 uint32_t target)
1923 {
1924 int i;
1925
1926 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1927 device_xname(&sc->sc_dev), r, mask, target);
1928
1929 for (i = 0; i < 10000; i++) {
1930 if ((arc_read(sc, r) & mask) == target)
1931 return 0;
1932 delay(1000);
1933 }
1934
1935 return 1;
1936 }
1937
1938 int
1939 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1940 uint32_t target)
1941 {
1942 int i;
1943
1944 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1945 device_xname(&sc->sc_dev), r, mask, target);
1946
1947 for (i = 0; i < 10000; i++) {
1948 if ((arc_read(sc, r) & mask) != target)
1949 return 0;
1950 delay(1000);
1951 }
1952
1953 return 1;
1954 }
1955
1956 int
1957 arc_msg0(struct arc_softc *sc, uint32_t m)
1958 {
1959 /* post message */
1960 arc_write(sc, ARC_REG_INB_MSG0, m);
1961 /* wait for the fw to do it */
1962 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1963 ARC_REG_INTRSTAT_MSG0) != 0)
1964 return 1;
1965
1966 /* ack it */
1967 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1968
1969 return 0;
1970 }
1971
1972 struct arc_dmamem *
1973 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1974 {
1975 struct arc_dmamem *adm;
1976 int nsegs;
1977
1978 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1979 if (adm == NULL)
1980 return NULL;
1981
1982 adm->adm_size = size;
1983
1984 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1985 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1986 goto admfree;
1987
1988 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1989 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1990 goto destroy;
1991
1992 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1993 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1994 goto free;
1995
1996 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1997 NULL, BUS_DMA_NOWAIT) != 0)
1998 goto unmap;
1999
2000 memset(adm->adm_kva, 0, size);
2001
2002 return adm;
2003
2004 unmap:
2005 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2006 free:
2007 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2008 destroy:
2009 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2010 admfree:
2011 kmem_free(adm, sizeof(*adm));
2012
2013 return NULL;
2014 }
2015
2016 void
2017 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2018 {
2019 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2020 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2021 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2022 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2023 kmem_free(adm, sizeof(*adm));
2024 }
2025
2026 int
2027 arc_alloc_ccbs(struct arc_softc *sc)
2028 {
2029 struct arc_ccb *ccb;
2030 uint8_t *cmd;
2031 int i;
2032 size_t ccbslen;
2033
2034 TAILQ_INIT(&sc->sc_ccb_free);
2035
2036 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2037 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2038
2039 sc->sc_requests = arc_dmamem_alloc(sc,
2040 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2041 if (sc->sc_requests == NULL) {
2042 aprint_error("%s: unable to allocate ccb dmamem\n",
2043 device_xname(&sc->sc_dev));
2044 goto free_ccbs;
2045 }
2046 cmd = ARC_DMA_KVA(sc->sc_requests);
2047
2048 for (i = 0; i < sc->sc_req_count; i++) {
2049 ccb = &sc->sc_ccbs[i];
2050
2051 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2052 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2053 aprint_error("%s: unable to create dmamap for ccb %d\n",
2054 device_xname(&sc->sc_dev), i);
2055 goto free_maps;
2056 }
2057
2058 ccb->ccb_sc = sc;
2059 ccb->ccb_id = i;
2060 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2061
2062 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2063 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2064 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2065
2066 arc_put_ccb(sc, ccb);
2067 }
2068
2069 return 0;
2070
2071 free_maps:
2072 while ((ccb = arc_get_ccb(sc)) != NULL)
2073 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2074 arc_dmamem_free(sc, sc->sc_requests);
2075
2076 free_ccbs:
2077 kmem_free(sc->sc_ccbs, ccbslen);
2078
2079 return 1;
2080 }
2081
2082 struct arc_ccb *
2083 arc_get_ccb(struct arc_softc *sc)
2084 {
2085 struct arc_ccb *ccb;
2086
2087 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2088 if (ccb != NULL)
2089 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2090
2091 return ccb;
2092 }
2093
2094 void
2095 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2096 {
2097 ccb->ccb_xs = NULL;
2098 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2099 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2100 }
2101