arcmsr.c revision 1.10 1 /* $NetBSD: arcmsr.c,v 1.10 2008/02/28 16:47:53 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.10 2008/02/28 16:47:53 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
176 aprint_error_dev(self, "couldn't establish power handler\n");
177
178 memset(adapt, 0, sizeof(*adapt));
179 adapt->adapt_dev = self;
180 adapt->adapt_nchannels = 1;
181 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
182 adapt->adapt_max_periph = adapt->adapt_openings;
183 adapt->adapt_minphys = arc_minphys;
184 adapt->adapt_request = arc_scsi_cmd;
185
186 memset(chan, 0, sizeof(*chan));
187 chan->chan_adapter = adapt;
188 chan->chan_bustype = &scsi_bustype;
189 chan->chan_nluns = ARC_MAX_LUN;
190 chan->chan_ntargets = ARC_MAX_TARGET;
191 chan->chan_id = ARC_MAX_TARGET;
192 chan->chan_channel = 0;
193 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
194
195 /*
196 * Save the device_t returned, because we could to attach
197 * devices via the management interface.
198 */
199 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
200
201 /* enable interrupts */
202 arc_write(sc, ARC_REG_INTRMASK,
203 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
204
205 #if NBIO > 0
206 /*
207 * Register the driver to bio(4) and setup the sensors.
208 */
209 if (bio_register(self, arc_bioctl) != 0)
210 panic("%s: bioctl registration failed\n", device_xname(self));
211
212 /*
213 * you need to talk to the firmware to get volume info. our firmware
214 * interface relies on being able to sleep, so we need to use a thread
215 * to do the work.
216 */
217 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
218 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
219 panic("%s: unable to create a kernel thread for sensors\n",
220 device_xname(self));
221 #endif
222
223 return;
224
225 unmap_pci:
226 arc_unmap_pci_resources(sc);
227 }
228
229 static int
230 arc_detach(device_t self, int flags)
231 {
232 struct arc_softc *sc = device_private(self);
233
234 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
235 aprint_error("%s: timeout waiting to stop bg rebuild\n",
236 device_xname(&sc->sc_dev));
237
238 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
239 aprint_error("%s: timeout waiting to flush cache\n",
240 device_xname(&sc->sc_dev));
241
242 return 0;
243 }
244
245 static bool
246 arc_shutdown(device_t self, int how)
247 {
248 struct arc_softc *sc = device_private(self);
249
250 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
251 aprint_error("%s: timeout waiting to stop bg rebuild\n",
252 device_xname(&sc->sc_dev));
253
254 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
255 aprint_error("%s: timeout waiting to flush cache\n",
256 device_xname(&sc->sc_dev));
257
258 return true;
259 }
260
261 static void
262 arc_minphys(struct buf *bp)
263 {
264 if (bp->b_bcount > MAXPHYS)
265 bp->b_bcount = MAXPHYS;
266 minphys(bp);
267 }
268
269 static int
270 arc_intr(void *arg)
271 {
272 struct arc_softc *sc = arg;
273 struct arc_ccb *ccb = NULL;
274 char *kva = ARC_DMA_KVA(sc->sc_requests);
275 struct arc_io_cmd *cmd;
276 uint32_t reg, intrstat;
277
278 mutex_spin_enter(&sc->sc_mutex);
279 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
280 if (intrstat == 0x0) {
281 mutex_spin_exit(&sc->sc_mutex);
282 return 0;
283 }
284
285 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
286 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
287
288 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
289 if (sc->sc_talking) {
290 arc_write(sc, ARC_REG_INTRMASK,
291 ~ARC_REG_INTRMASK_POSTQUEUE);
292 cv_broadcast(&sc->sc_condvar);
293 } else {
294 /* otherwise drop it */
295 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
296 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
297 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
298 arc_write(sc, ARC_REG_INB_DOORBELL,
299 ARC_REG_INB_DOORBELL_READ_OK);
300 }
301 }
302 mutex_spin_exit(&sc->sc_mutex);
303
304 while ((reg = arc_pop(sc)) != 0xffffffff) {
305 cmd = (struct arc_io_cmd *)(kva +
306 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
307 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
308 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
309
310 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
311 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
313
314 arc_scsi_cmd_done(sc, ccb, reg);
315 }
316
317
318 return 1;
319 }
320
321 void
322 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
323 {
324 struct scsipi_periph *periph;
325 struct scsipi_xfer *xs;
326 struct scsipi_adapter *adapt = chan->chan_adapter;
327 struct arc_softc *sc = device_private(adapt->adapt_dev);
328 struct arc_ccb *ccb;
329 struct arc_msg_scsicmd *cmd;
330 uint32_t reg;
331 uint8_t target;
332
333 switch (req) {
334 case ADAPTER_REQ_GROW_RESOURCES:
335 /* Not supported. */
336 return;
337 case ADAPTER_REQ_SET_XFER_MODE:
338 /* Not supported. */
339 return;
340 case ADAPTER_REQ_RUN_XFER:
341 break;
342 }
343
344 mutex_spin_enter(&sc->sc_mutex);
345
346 xs = arg;
347 periph = xs->xs_periph;
348 target = periph->periph_target;
349
350 if (xs->cmdlen > ARC_MSG_CDBLEN) {
351 memset(&xs->sense, 0, sizeof(xs->sense));
352 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
353 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
354 xs->sense.scsi_sense.asc = 0x20;
355 xs->error = XS_SENSE;
356 xs->status = SCSI_CHECK;
357 mutex_spin_exit(&sc->sc_mutex);
358 scsipi_done(xs);
359 return;
360 }
361
362 ccb = arc_get_ccb(sc);
363 if (ccb == NULL) {
364 xs->error = XS_RESOURCE_SHORTAGE;
365 mutex_spin_exit(&sc->sc_mutex);
366 scsipi_done(xs);
367 return;
368 }
369
370 ccb->ccb_xs = xs;
371
372 if (arc_load_xs(ccb) != 0) {
373 xs->error = XS_DRIVER_STUFFUP;
374 arc_put_ccb(sc, ccb);
375 mutex_spin_exit(&sc->sc_mutex);
376 scsipi_done(xs);
377 return;
378 }
379
380 cmd = &ccb->ccb_cmd->cmd;
381 reg = ccb->ccb_cmd_post;
382
383 /* bus is always 0 */
384 cmd->target = target;
385 cmd->lun = periph->periph_lun;
386 cmd->function = 1; /* XXX magic number */
387
388 cmd->cdb_len = xs->cmdlen;
389 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
390 if (xs->xs_control & XS_CTL_DATA_OUT)
391 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
392 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
393 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
394 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
395 }
396
397 cmd->context = htole32(ccb->ccb_id);
398 cmd->data_len = htole32(xs->datalen);
399
400 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
401
402 /* we've built the command, let's put it on the hw */
403 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
404 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
405 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
406
407 arc_push(sc, reg);
408 if (xs->xs_control & XS_CTL_POLL) {
409 if (arc_complete(sc, ccb, xs->timeout) != 0) {
410 xs->error = XS_DRIVER_STUFFUP;
411 mutex_spin_exit(&sc->sc_mutex);
412 scsipi_done(xs);
413 return;
414 }
415 }
416
417 mutex_spin_exit(&sc->sc_mutex);
418 }
419
420 int
421 arc_load_xs(struct arc_ccb *ccb)
422 {
423 struct arc_softc *sc = ccb->ccb_sc;
424 struct scsipi_xfer *xs = ccb->ccb_xs;
425 bus_dmamap_t dmap = ccb->ccb_dmamap;
426 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
427 uint64_t addr;
428 int i, error;
429
430 if (xs->datalen == 0)
431 return 0;
432
433 error = bus_dmamap_load(sc->sc_dmat, dmap,
434 xs->data, xs->datalen, NULL,
435 (xs->xs_control & XS_CTL_NOSLEEP) ?
436 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
437 if (error != 0) {
438 aprint_error("%s: error %d loading dmamap\n",
439 device_xname(&sc->sc_dev), error);
440 return 1;
441 }
442
443 for (i = 0; i < dmap->dm_nsegs; i++) {
444 sge = &sgl[i];
445
446 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
447 addr = dmap->dm_segs[i].ds_addr;
448 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
449 sge->sg_lo_addr = htole32((uint32_t)addr);
450 }
451
452 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
453 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
454 BUS_DMASYNC_PREWRITE);
455
456 return 0;
457 }
458
459 void
460 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
461 {
462 struct scsipi_xfer *xs = ccb->ccb_xs;
463 struct arc_msg_scsicmd *cmd;
464
465 if (xs->datalen != 0) {
466 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
467 ccb->ccb_dmamap->dm_mapsize,
468 (xs->xs_control & XS_CTL_DATA_IN) ?
469 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
470 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
471 }
472
473 /* timeout_del */
474 xs->status |= XS_STS_DONE;
475
476 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
477 cmd = &ccb->ccb_cmd->cmd;
478
479 switch (cmd->status) {
480 case ARC_MSG_STATUS_SELTIMEOUT:
481 case ARC_MSG_STATUS_ABORTED:
482 case ARC_MSG_STATUS_INIT_FAIL:
483 xs->status = SCSI_OK;
484 xs->error = XS_SELTIMEOUT;
485 break;
486
487 case SCSI_CHECK:
488 memset(&xs->sense, 0, sizeof(xs->sense));
489 memcpy(&xs->sense, cmd->sense_data,
490 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
491 xs->sense.scsi_sense.response_code =
492 SSD_RCODE_VALID | 0x70;
493 xs->status = SCSI_CHECK;
494 xs->error = XS_SENSE;
495 xs->resid = 0;
496 break;
497
498 default:
499 /* unknown device status */
500 xs->error = XS_BUSY; /* try again later? */
501 xs->status = SCSI_BUSY;
502 break;
503 }
504 } else {
505 xs->status = SCSI_OK;
506 xs->error = XS_NOERROR;
507 xs->resid = 0;
508 }
509
510 arc_put_ccb(sc, ccb);
511 scsipi_done(xs);
512 }
513
514 int
515 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
516 {
517 struct arc_ccb *ccb = NULL;
518 char *kva = ARC_DMA_KVA(sc->sc_requests);
519 struct arc_io_cmd *cmd;
520 uint32_t reg;
521
522 do {
523 reg = arc_pop(sc);
524 if (reg == 0xffffffff) {
525 if (timeout-- == 0)
526 return 1;
527
528 delay(1000);
529 continue;
530 }
531
532 cmd = (struct arc_io_cmd *)(kva +
533 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
534 ARC_DMA_DVA(sc->sc_requests)));
535 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
536
537 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
538 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
539 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
540
541 arc_scsi_cmd_done(sc, ccb, reg);
542 } while (nccb != ccb);
543
544 return 0;
545 }
546
547 int
548 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
549 {
550 pcireg_t memtype;
551 pci_intr_handle_t ih;
552
553 sc->sc_pc = pa->pa_pc;
554 sc->sc_tag = pa->pa_tag;
555 sc->sc_dmat = pa->pa_dmat;
556
557 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
558 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
559 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
560 aprint_error(": unable to map system interface register\n");
561 return 1;
562 }
563
564 if (pci_intr_map(pa, &ih) != 0) {
565 aprint_error(": unable to map interrupt\n");
566 goto unmap;
567 }
568
569 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
570 arc_intr, sc);
571 if (sc->sc_ih == NULL) {
572 aprint_error(": unable to map interrupt [2]\n");
573 goto unmap;
574 }
575
576 aprint_normal("\n");
577 aprint_normal("%s: interrupting at %s\n",
578 device_xname(&sc->sc_dev), pci_intr_string(pa->pa_pc, ih));
579
580 return 0;
581
582 unmap:
583 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
584 sc->sc_ios = 0;
585 return 1;
586 }
587
588 void
589 arc_unmap_pci_resources(struct arc_softc *sc)
590 {
591 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
592 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
593 sc->sc_ios = 0;
594 }
595
596 int
597 arc_query_firmware(struct arc_softc *sc)
598 {
599 struct arc_msg_firmware_info fwinfo;
600 char string[81]; /* sizeof(vendor)*2+1 */
601
602 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
603 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
604 aprint_debug("%s: timeout waiting for firmware ok\n",
605 device_xname(&sc->sc_dev));
606 return 1;
607 }
608
609 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
610 aprint_debug("%s: timeout waiting for get config\n",
611 device_xname(&sc->sc_dev));
612 return 1;
613 }
614
615 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
616 aprint_debug("%s: timeout waiting to start bg rebuild\n",
617 device_xname(&sc->sc_dev));
618 return 1;
619 }
620
621 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
622
623 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
624 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
625
626 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
627 aprint_error("%s: invalid firmware info from iop\n",
628 device_xname(&sc->sc_dev));
629 return 1;
630 }
631
632 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
633 device_xname(&sc->sc_dev),
634 htole32(fwinfo.request_len));
635 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
636 device_xname(&sc->sc_dev),
637 htole32(fwinfo.queue_len));
638 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
639 device_xname(&sc->sc_dev),
640 htole32(fwinfo.sdram_size));
641 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
642 device_xname(&sc->sc_dev),
643 htole32(fwinfo.sata_ports));
644
645 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
646 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
647 device_xname(&sc->sc_dev), string);
648
649 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
650 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
651 device_xname(&sc->sc_dev), string);
652
653 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
654 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
655 device_xname(&sc->sc_dev), string);
656
657 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
658 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
659 htole32(fwinfo.sdram_size), string);
660
661 /* save the number of max disks for future use */
662 sc->sc_maxdisks = htole32(fwinfo.sata_ports);
663
664 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
665 aprint_error("%s: unexpected request frame size (%d != %d)\n",
666 device_xname(&sc->sc_dev),
667 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
668 return 1;
669 }
670
671 sc->sc_req_count = htole32(fwinfo.queue_len);
672
673 return 0;
674 }
675
676 #if NBIO > 0
677 static int
678 arc_bioctl(struct device *self, u_long cmd, void *addr)
679 {
680 struct arc_softc *sc = device_private(self);
681 int error = 0;
682
683 switch (cmd) {
684 case BIOCINQ:
685 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
686 break;
687
688 case BIOCVOL:
689 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
690 break;
691
692 case BIOCDISK:
693 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
694 break;
695
696 case BIOCDISK_NOVOL:
697 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
698 break;
699
700 case BIOCALARM:
701 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
702 break;
703
704 case BIOCSETSTATE:
705 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
706 break;
707
708 case BIOCVOLOPS:
709 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
710 break;
711
712 default:
713 error = ENOTTY;
714 break;
715 }
716
717 return error;
718 }
719
720 static int
721 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
722 {
723 switch (*reply) {
724 case ARC_FW_CMD_RAIDINVAL:
725 printf("%s: firmware error (invalid raid set)\n",
726 device_xname(&sc->sc_dev));
727 return EINVAL;
728 case ARC_FW_CMD_VOLINVAL:
729 printf("%s: firmware error (invalid volume set)\n",
730 device_xname(&sc->sc_dev));
731 return EINVAL;
732 case ARC_FW_CMD_NORAID:
733 printf("%s: firmware error (unexistent raid set)\n",
734 device_xname(&sc->sc_dev));
735 return ENODEV;
736 case ARC_FW_CMD_NOVOLUME:
737 printf("%s: firmware error (unexistent volume set)\n",
738 device_xname(&sc->sc_dev));
739 return ENODEV;
740 case ARC_FW_CMD_NOPHYSDRV:
741 printf("%s: firmware error (unexistent physical drive)\n",
742 device_xname(&sc->sc_dev));
743 return ENODEV;
744 case ARC_FW_CMD_PARAM_ERR:
745 printf("%s: firmware error (parameter error)\n",
746 device_xname(&sc->sc_dev));
747 return EINVAL;
748 case ARC_FW_CMD_UNSUPPORTED:
749 printf("%s: firmware error (unsupported command)\n",
750 device_xname(&sc->sc_dev));
751 return EOPNOTSUPP;
752 case ARC_FW_CMD_DISKCFG_CHGD:
753 printf("%s: firmware error (disk configuration changed)\n",
754 device_xname(&sc->sc_dev));
755 return EINVAL;
756 case ARC_FW_CMD_PASS_INVAL:
757 printf("%s: firmware error (invalid password)\n",
758 device_xname(&sc->sc_dev));
759 return EINVAL;
760 case ARC_FW_CMD_NODISKSPACE:
761 printf("%s: firmware error (no disk space available)\n",
762 device_xname(&sc->sc_dev));
763 return EOPNOTSUPP;
764 case ARC_FW_CMD_CHECKSUM_ERR:
765 printf("%s: firmware error (checksum error)\n",
766 device_xname(&sc->sc_dev));
767 return EINVAL;
768 case ARC_FW_CMD_PASS_REQD:
769 printf("%s: firmware error (password required)\n",
770 device_xname(&sc->sc_dev));
771 return EPERM;
772 case ARC_FW_CMD_OK:
773 default:
774 return 0;
775 }
776 }
777
778 static int
779 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
780 {
781 uint8_t request[2], reply[1];
782 size_t len;
783 int error = 0;
784
785 switch (ba->ba_opcode) {
786 case BIOC_SAENABLE:
787 case BIOC_SADISABLE:
788 request[0] = ARC_FW_SET_ALARM;
789 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
790 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
791 len = sizeof(request);
792
793 break;
794
795 case BIOC_SASILENCE:
796 request[0] = ARC_FW_MUTE_ALARM;
797 len = 1;
798
799 break;
800
801 case BIOC_GASTATUS:
802 /* system info is too big/ugly to deal with here */
803 return arc_bio_alarm_state(sc, ba);
804
805 default:
806 return EOPNOTSUPP;
807 }
808
809 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
810 if (error != 0)
811 return error;
812
813 return arc_fw_parse_status_code(sc, &reply[0]);
814 }
815
816 static int
817 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
818 {
819 struct arc_fw_sysinfo *sysinfo;
820 uint8_t request;
821 int error = 0;
822
823 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
824
825 request = ARC_FW_SYSINFO;
826 error = arc_msgbuf(sc, &request, sizeof(request),
827 sysinfo, sizeof(struct arc_fw_sysinfo));
828
829 if (error != 0)
830 goto out;
831
832 ba->ba_status = sysinfo->alarm;
833
834 out:
835 kmem_free(sysinfo, sizeof(*sysinfo));
836 return error;
837 }
838
839 static int
840 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
841 {
842 /* to create a raid set */
843 struct req_craidset {
844 uint8_t cmdcode;
845 uint32_t devmask;
846 uint8_t raidset_name[16];
847 } __packed;
848
849 /* to create a volume set */
850 struct req_cvolset {
851 uint8_t cmdcode;
852 uint8_t raidset;
853 uint8_t volset_name[16];
854 uint64_t capacity;
855 uint8_t raidlevel;
856 uint8_t stripe;
857 uint8_t scsi_chan;
858 uint8_t scsi_target;
859 uint8_t scsi_lun;
860 uint8_t tagqueue;
861 uint8_t cache;
862 uint8_t speed;
863 uint8_t quick_init;
864 } __packed;
865
866 struct scsibus_softc *scsibus_sc = NULL;
867 struct req_craidset req_craidset;
868 struct req_cvolset req_cvolset;
869 uint8_t request[2];
870 uint8_t reply[1];
871 int error = 0;
872
873 switch (bc->bc_opcode) {
874 case BIOC_VCREATE_VOLUME:
875 {
876 /*
877 * Zero out the structs so that we use some defaults
878 * in raid and volume sets.
879 */
880 memset(&req_craidset, 0, sizeof(req_craidset));
881 memset(&req_cvolset, 0, sizeof(req_cvolset));
882
883 /*
884 * Firstly we have to create the raid set and
885 * use the default name for all them.
886 */
887 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
888 req_craidset.devmask = bc->bc_devmask;
889 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
890 reply, sizeof(reply));
891 if (error != 0)
892 return error;
893
894 error = arc_fw_parse_status_code(sc, &reply[0]);
895 if (error) {
896 printf("%s: create raidset%d failed\n",
897 device_xname(&sc->sc_dev), bc->bc_volid);
898 return error;
899 }
900
901 /*
902 * At this point the raid set was created, so it's
903 * time to create the volume set.
904 */
905 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
906 req_cvolset.raidset = bc->bc_volid;
907 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
908
909 /*
910 * Set the RAID level.
911 */
912 switch (bc->bc_level) {
913 case 0:
914 case 1:
915 req_cvolset.raidlevel = bc->bc_level;
916 break;
917 case 3:
918 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
919 break;
920 case 5:
921 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
922 break;
923 case 6:
924 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
925 break;
926 default:
927 return EOPNOTSUPP;
928 }
929
930 /*
931 * Set the stripe size.
932 */
933 switch (bc->bc_stripe) {
934 case 4:
935 req_cvolset.stripe = 0;
936 break;
937 case 8:
938 req_cvolset.stripe = 1;
939 break;
940 case 16:
941 req_cvolset.stripe = 2;
942 break;
943 case 32:
944 req_cvolset.stripe = 3;
945 break;
946 case 64:
947 req_cvolset.stripe = 4;
948 break;
949 case 128:
950 req_cvolset.stripe = 5;
951 break;
952 default:
953 req_cvolset.stripe = 4; /* by default 64K */
954 break;
955 }
956
957 req_cvolset.scsi_chan = bc->bc_channel;
958 req_cvolset.scsi_target = bc->bc_target;
959 req_cvolset.scsi_lun = bc->bc_lun;
960 req_cvolset.tagqueue = 1; /* always enabled */
961 req_cvolset.cache = 1; /* always enabled */
962 req_cvolset.speed = 4; /* always max speed */
963
964 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
965 reply, sizeof(reply));
966 if (error != 0)
967 return error;
968
969 error = arc_fw_parse_status_code(sc, &reply[0]);
970 if (error) {
971 printf("%s: create volumeset%d failed\n",
972 device_xname(&sc->sc_dev), bc->bc_volid);
973 return error;
974 }
975
976 /*
977 * Do a rescan on the bus to attach the device associated
978 * with the new volume.
979 */
980 scsibus_sc = device_private(sc->sc_scsibus_dv);
981 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
982
983 break;
984 }
985 case BIOC_VREMOVE_VOLUME:
986 {
987 /*
988 * Remove the volume set specified in bc_volid.
989 */
990 request[0] = ARC_FW_DELETE_VOLUME;
991 request[1] = bc->bc_volid;
992 error = arc_msgbuf(sc, request, sizeof(request),
993 reply, sizeof(reply));
994 if (error != 0)
995 return error;
996
997 error = arc_fw_parse_status_code(sc, &reply[0]);
998 if (error) {
999 printf("%s: delete volumeset%d failed\n",
1000 device_xname(&sc->sc_dev), bc->bc_volid);
1001 return error;
1002 }
1003
1004 /*
1005 * Detach the sd(4) device associated with the volume,
1006 * but if there's an error don't make it a priority.
1007 */
1008 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1009 bc->bc_lun, 0);
1010 if (error)
1011 printf("%s: couldn't detach sd device for volume %d "
1012 "at %u:%u.%u (error=%d)\n",
1013 device_xname(&sc->sc_dev), bc->bc_volid,
1014 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1015
1016 /*
1017 * and remove the raid set specified in bc_volid,
1018 * we only care about volumes.
1019 */
1020 request[0] = ARC_FW_DELETE_RAIDSET;
1021 request[1] = bc->bc_volid;
1022 error = arc_msgbuf(sc, request, sizeof(request),
1023 reply, sizeof(reply));
1024 if (error != 0)
1025 return error;
1026
1027 error = arc_fw_parse_status_code(sc, &reply[0]);
1028 if (error) {
1029 printf("%s: delete raidset%d failed\n",
1030 device_xname(&sc->sc_dev), bc->bc_volid);
1031 return error;
1032 }
1033
1034 break;
1035 }
1036 default:
1037 return EOPNOTSUPP;
1038 }
1039
1040 return error;
1041 }
1042
1043 static int
1044 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1045 {
1046 /* for a hotspare disk */
1047 struct request_hs {
1048 uint8_t cmdcode;
1049 uint32_t devmask;
1050 } __packed;
1051
1052 /* for a pass-through disk */
1053 struct request_pt {
1054 uint8_t cmdcode;
1055 uint8_t devid;
1056 uint8_t scsi_chan;
1057 uint8_t scsi_id;
1058 uint8_t scsi_lun;
1059 uint8_t tagged_queue;
1060 uint8_t cache_mode;
1061 uint8_t max_speed;
1062 } __packed;
1063
1064 struct scsibus_softc *scsibus_sc = NULL;
1065 struct request_hs req_hs; /* to add/remove hotspare */
1066 struct request_pt req_pt; /* to add a pass-through */
1067 uint8_t req_gen[2];
1068 uint8_t reply[1];
1069 int error = 0;
1070
1071 switch (bs->bs_status) {
1072 case BIOC_SSHOTSPARE:
1073 {
1074 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1075 req_hs.devmask = (1 << bs->bs_target);
1076 goto hotspare;
1077 }
1078 case BIOC_SSDELHOTSPARE:
1079 {
1080 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1081 req_hs.devmask = (1 << bs->bs_target);
1082 goto hotspare;
1083 }
1084 case BIOC_SSPASSTHRU:
1085 {
1086 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1087 req_pt.devid = bs->bs_other_id; /* this wants device# */
1088 req_pt.scsi_chan = bs->bs_channel;
1089 req_pt.scsi_id = bs->bs_target;
1090 req_pt.scsi_lun = bs->bs_lun;
1091 req_pt.tagged_queue = 1; /* always enabled */
1092 req_pt.cache_mode = 1; /* always enabled */
1093 req_pt.max_speed = 4; /* always max speed */
1094
1095 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1096 reply, sizeof(reply));
1097 if (error != 0)
1098 return error;
1099
1100 /*
1101 * Do a rescan on the bus to attach the new device
1102 * associated with the pass-through disk.
1103 */
1104 scsibus_sc = device_private(sc->sc_scsibus_dv);
1105 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1106
1107 goto out;
1108 }
1109 case BIOC_SSDELPASSTHRU:
1110 {
1111 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1112 req_gen[1] = bs->bs_target;
1113 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1114 reply, sizeof(reply));
1115 if (error != 0)
1116 return error;
1117
1118 /*
1119 * Detach the sd device associated with this pass-through disk.
1120 */
1121 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1122 bs->bs_lun, 0);
1123 if (error)
1124 printf("%s: couldn't detach sd device for the "
1125 "pass-through disk at %u:%u.%u (error=%d)\n",
1126 device_xname(&sc->sc_dev),
1127 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1128
1129 goto out;
1130 }
1131 case BIOC_SSCHECKSTART_VOL:
1132 {
1133 req_gen[0] = ARC_FW_START_CHECKVOL;
1134 req_gen[1] = bs->bs_volid;
1135 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1136 reply, sizeof(reply));
1137 if (error != 0)
1138 return error;
1139
1140 goto out;
1141 }
1142 case BIOC_SSCHECKSTOP_VOL:
1143 {
1144 uint8_t req = ARC_FW_STOP_CHECKVOL;
1145 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1146 if (error != 0)
1147 return error;
1148
1149 goto out;
1150 }
1151 default:
1152 return EOPNOTSUPP;
1153 }
1154
1155 hotspare:
1156 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1157 reply, sizeof(reply));
1158 if (error != 0)
1159 return error;
1160
1161 out:
1162 return arc_fw_parse_status_code(sc, &reply[0]);
1163 }
1164
1165 static int
1166 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1167 {
1168 uint8_t request[2];
1169 struct arc_fw_sysinfo *sysinfo;
1170 struct arc_fw_raidinfo *raidinfo;
1171 int maxraidset, nvols = 0, i;
1172 int error = 0;
1173
1174 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1175 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1176
1177 request[0] = ARC_FW_SYSINFO;
1178 error = arc_msgbuf(sc, request, 1, sysinfo,
1179 sizeof(struct arc_fw_sysinfo));
1180 if (error != 0)
1181 goto out;
1182
1183 maxraidset = sysinfo->max_raid_set;
1184
1185 request[0] = ARC_FW_RAIDINFO;
1186 for (i = 0; i < maxraidset; i++) {
1187 request[1] = i;
1188 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1189 sizeof(struct arc_fw_raidinfo));
1190 if (error != 0)
1191 goto out;
1192
1193 if (raidinfo->volumes)
1194 nvols++;
1195 }
1196
1197 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1198 bi->bi_novol = nvols;
1199 bi->bi_nodisk = sc->sc_maxdisks;
1200
1201 out:
1202 kmem_free(raidinfo, sizeof(*raidinfo));
1203 kmem_free(sysinfo, sizeof(*sysinfo));
1204 return error;
1205 }
1206
1207 static int
1208 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1209 {
1210 uint8_t request[2];
1211 struct arc_fw_sysinfo *sysinfo;
1212 int error = 0;
1213 int maxvols, nvols = 0, i;
1214
1215 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1216
1217 request[0] = ARC_FW_SYSINFO;
1218 error = arc_msgbuf(sc, request, 1, sysinfo,
1219 sizeof(struct arc_fw_sysinfo));
1220 if (error != 0)
1221 goto out;
1222
1223 maxvols = sysinfo->max_volume_set;
1224
1225 request[0] = ARC_FW_VOLINFO;
1226 for (i = 0; i < maxvols; i++) {
1227 request[1] = i;
1228 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1229 sizeof(struct arc_fw_volinfo));
1230 if (error != 0)
1231 goto out;
1232
1233 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1234 continue;
1235
1236 if (nvols == vol)
1237 break;
1238
1239 nvols++;
1240 }
1241
1242 if (nvols != vol ||
1243 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1244 error = ENODEV;
1245 goto out;
1246 }
1247
1248 out:
1249 kmem_free(sysinfo, sizeof(*sysinfo));
1250 return error;
1251 }
1252
1253 static int
1254 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1255 {
1256 struct arc_fw_volinfo *volinfo;
1257 uint64_t blocks;
1258 uint32_t status;
1259 int error = 0;
1260
1261 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1262
1263 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1264 if (error != 0)
1265 goto out;
1266
1267 bv->bv_percent = -1;
1268 bv->bv_seconds = 0;
1269
1270 status = htole32(volinfo->volume_status);
1271 if (status == 0x0) {
1272 if (htole32(volinfo->fail_mask) == 0x0)
1273 bv->bv_status = BIOC_SVONLINE;
1274 else
1275 bv->bv_status = BIOC_SVDEGRADED;
1276 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1277 bv->bv_status = BIOC_SVDEGRADED;
1278 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1279 bv->bv_status = BIOC_SVOFFLINE;
1280 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1281 bv->bv_status = BIOC_SVBUILDING;
1282 bv->bv_percent = htole32(volinfo->progress);
1283 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1284 bv->bv_status = BIOC_SVREBUILD;
1285 bv->bv_percent = htole32(volinfo->progress);
1286 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1287 bv->bv_status = BIOC_SVMIGRATING;
1288 bv->bv_percent = htole32(volinfo->progress);
1289 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1290 bv->bv_status = BIOC_SVCHECKING;
1291 bv->bv_percent = htole32(volinfo->progress);
1292 }
1293
1294 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1295 blocks += (uint64_t)htole32(volinfo->capacity);
1296 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1297
1298 switch (volinfo->raid_level) {
1299 case ARC_FW_VOL_RAIDLEVEL_0:
1300 bv->bv_level = 0;
1301 break;
1302 case ARC_FW_VOL_RAIDLEVEL_1:
1303 bv->bv_level = 1;
1304 break;
1305 case ARC_FW_VOL_RAIDLEVEL_3:
1306 bv->bv_level = 3;
1307 break;
1308 case ARC_FW_VOL_RAIDLEVEL_5:
1309 bv->bv_level = 5;
1310 break;
1311 case ARC_FW_VOL_RAIDLEVEL_6:
1312 bv->bv_level = 6;
1313 break;
1314 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1315 bv->bv_level = BIOC_SVOL_PASSTHRU;
1316 break;
1317 default:
1318 bv->bv_level = -1;
1319 break;
1320 }
1321
1322 bv->bv_nodisk = volinfo->member_disks;
1323 bv->bv_stripe_size = volinfo->stripe_size / 2;
1324 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1325 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1326 sizeof(volinfo->set_name));
1327
1328 out:
1329 kmem_free(volinfo, sizeof(*volinfo));
1330 return error;
1331 }
1332
1333 static int
1334 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1335 {
1336 struct arc_fw_diskinfo *diskinfo;
1337 uint8_t request[2];
1338 int error = 0;
1339
1340 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1341
1342 if (bd->bd_diskid > sc->sc_maxdisks) {
1343 error = ENODEV;
1344 goto out;
1345 }
1346
1347 request[0] = ARC_FW_DISKINFO;
1348 request[1] = bd->bd_diskid;
1349 error = arc_msgbuf(sc, request, sizeof(request),
1350 diskinfo, sizeof(struct arc_fw_diskinfo));
1351 if (error != 0)
1352 return error;
1353
1354 /* skip disks with no capacity */
1355 if (htole32(diskinfo->capacity) == 0 &&
1356 htole32(diskinfo->capacity2) == 0)
1357 goto out;
1358
1359 bd->bd_disknovol = true;
1360 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1361
1362 out:
1363 kmem_free(diskinfo, sizeof(*diskinfo));
1364 return error;
1365 }
1366
1367 static void
1368 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1369 struct arc_fw_diskinfo *diskinfo, int diskid)
1370 {
1371 uint64_t blocks;
1372 char model[81];
1373 char serial[41];
1374 char rev[17];
1375
1376 switch (htole32(diskinfo->device_state)) {
1377 case ARC_FW_DISK_PASSTHRU:
1378 bd->bd_status = BIOC_SDPASSTHRU;
1379 break;
1380 case ARC_FW_DISK_RAIDMEMBER:
1381 bd->bd_status = BIOC_SDONLINE;
1382 break;
1383 case ARC_FW_DISK_HOTSPARE:
1384 bd->bd_status = BIOC_SDHOTSPARE;
1385 break;
1386 case ARC_FW_DISK_UNUSED:
1387 bd->bd_status = BIOC_SDUNUSED;
1388 break;
1389 default:
1390 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1391 htole32(diskinfo->device_state));
1392 bd->bd_status = BIOC_SDINVALID;
1393 return;
1394 }
1395
1396 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1397 blocks += (uint64_t)htole32(diskinfo->capacity);
1398 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1399
1400 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1401 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1402 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1403 sizeof(diskinfo->firmware_rev));
1404
1405 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1406 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1407
1408 #if 0
1409 bd->bd_channel = diskinfo->scsi_attr.channel;
1410 bd->bd_target = diskinfo->scsi_attr.target;
1411 bd->bd_lun = diskinfo->scsi_attr.lun;
1412 #endif
1413
1414 /*
1415 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1416 * the diskid.
1417 */
1418 bd->bd_channel = 0;
1419 bd->bd_target = diskid;
1420 bd->bd_lun = 0;
1421 }
1422
1423 static int
1424 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1425 {
1426 uint8_t request[2];
1427 struct arc_fw_raidinfo *raidinfo;
1428 struct arc_fw_volinfo *volinfo;
1429 struct arc_fw_diskinfo *diskinfo;
1430 int error = 0;
1431
1432 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1433 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1434 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1435
1436 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1437 if (error != 0)
1438 goto out;
1439
1440 request[0] = ARC_FW_RAIDINFO;
1441 request[1] = volinfo->raid_set_number;
1442
1443 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1444 sizeof(struct arc_fw_raidinfo));
1445 if (error != 0)
1446 goto out;
1447
1448 if (bd->bd_diskid > raidinfo->member_devices) {
1449 error = ENODEV;
1450 goto out;
1451 }
1452
1453 request[0] = ARC_FW_DISKINFO;
1454 request[1] = raidinfo->device_array[bd->bd_diskid];
1455 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1456 sizeof(struct arc_fw_diskinfo));
1457 if (error != 0)
1458 goto out;
1459
1460 /* now fill our bio disk with data from the firmware */
1461 arc_bio_disk_filldata(sc, bd, diskinfo,
1462 raidinfo->device_array[bd->bd_diskid]);
1463
1464 out:
1465 kmem_free(raidinfo, sizeof(*raidinfo));
1466 kmem_free(volinfo, sizeof(*volinfo));
1467 kmem_free(diskinfo, sizeof(*diskinfo));
1468 return error;
1469 }
1470 #endif /* NBIO > 0 */
1471
1472 uint8_t
1473 arc_msg_cksum(void *cmd, uint16_t len)
1474 {
1475 uint8_t *buf = cmd;
1476 uint8_t cksum;
1477 int i;
1478
1479 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1480 for (i = 0; i < len; i++)
1481 cksum += buf[i];
1482
1483 return cksum;
1484 }
1485
1486
1487 int
1488 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1489 size_t rbuflen)
1490 {
1491 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1492 uint8_t *wbuf, *rbuf;
1493 int wlen, wdone = 0, rlen, rdone = 0;
1494 struct arc_fw_bufhdr *bufhdr;
1495 uint32_t reg, rwlen;
1496 int error = 0;
1497 #ifdef ARC_DEBUG
1498 int i;
1499 #endif
1500
1501 wbuf = rbuf = NULL;
1502
1503 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1504 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1505
1506 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1507 wbuf = kmem_alloc(wlen, KM_SLEEP);
1508
1509 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1510 rbuf = kmem_alloc(rlen, KM_SLEEP);
1511
1512 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1513 device_xname(&sc->sc_dev), wlen, rlen);
1514
1515 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1516 bufhdr->hdr = arc_fw_hdr;
1517 bufhdr->len = htole16(wbuflen);
1518 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1519 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1520
1521 arc_lock(sc);
1522 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1523 error = EBUSY;
1524 goto out;
1525 }
1526
1527 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1528
1529 do {
1530 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1531 memset(rwbuf, 0, sizeof(rwbuf));
1532 rwlen = (wlen - wdone) % sizeof(rwbuf);
1533 memcpy(rwbuf, &wbuf[wdone], rwlen);
1534
1535 #ifdef ARC_DEBUG
1536 if (arcdebug & ARC_D_DB) {
1537 printf("%s: write %d:",
1538 device_xname(&sc->sc_dev), rwlen);
1539 for (i = 0; i < rwlen; i++)
1540 printf(" 0x%02x", rwbuf[i]);
1541 printf("\n");
1542 }
1543 #endif
1544
1545 /* copy the chunk to the hw */
1546 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1547 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1548 sizeof(rwbuf));
1549
1550 /* say we have a buffer for the hw */
1551 arc_write(sc, ARC_REG_INB_DOORBELL,
1552 ARC_REG_INB_DOORBELL_WRITE_OK);
1553
1554 wdone += rwlen;
1555 }
1556
1557 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1558 arc_wait(sc);
1559
1560 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1561
1562 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1563 device_xname(&sc->sc_dev), reg);
1564
1565 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1566 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1567 if (rwlen > sizeof(rwbuf)) {
1568 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1569 device_xname(&sc->sc_dev));
1570 error = EIO;
1571 goto out;
1572 }
1573
1574 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1575 sizeof(rwbuf));
1576
1577 arc_write(sc, ARC_REG_INB_DOORBELL,
1578 ARC_REG_INB_DOORBELL_READ_OK);
1579
1580 #ifdef ARC_DEBUG
1581 printf("%s: len: %d+%d=%d/%d\n",
1582 device_xname(&sc->sc_dev),
1583 rwlen, rdone, rwlen + rdone, rlen);
1584 if (arcdebug & ARC_D_DB) {
1585 printf("%s: read:",
1586 device_xname(&sc->sc_dev));
1587 for (i = 0; i < rwlen; i++)
1588 printf(" 0x%02x", rwbuf[i]);
1589 printf("\n");
1590 }
1591 #endif
1592
1593 if ((rdone + rwlen) > rlen) {
1594 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1595 device_xname(&sc->sc_dev));
1596 error = EIO;
1597 goto out;
1598 }
1599
1600 memcpy(&rbuf[rdone], rwbuf, rwlen);
1601 rdone += rwlen;
1602 }
1603 } while (rdone != rlen);
1604
1605 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1606 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1607 bufhdr->len != htole16(rbuflen)) {
1608 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1609 device_xname(&sc->sc_dev));
1610 error = EIO;
1611 goto out;
1612 }
1613
1614 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1615
1616 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1617 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1618 device_xname(&sc->sc_dev));
1619 error = EIO;
1620 goto out;
1621 }
1622
1623 out:
1624 arc_unlock(sc);
1625 kmem_free(wbuf, wlen);
1626 kmem_free(rbuf, rlen);
1627
1628 return error;
1629 }
1630
1631 void
1632 arc_lock(struct arc_softc *sc)
1633 {
1634 rw_enter(&sc->sc_rwlock, RW_WRITER);
1635 mutex_spin_enter(&sc->sc_mutex);
1636 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1637 sc->sc_talking = 1;
1638 }
1639
1640 void
1641 arc_unlock(struct arc_softc *sc)
1642 {
1643 KASSERT(mutex_owned(&sc->sc_mutex));
1644
1645 arc_write(sc, ARC_REG_INTRMASK,
1646 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1647 sc->sc_talking = 0;
1648 mutex_spin_exit(&sc->sc_mutex);
1649 rw_exit(&sc->sc_rwlock);
1650 }
1651
1652 void
1653 arc_wait(struct arc_softc *sc)
1654 {
1655 KASSERT(mutex_owned(&sc->sc_mutex));
1656
1657 arc_write(sc, ARC_REG_INTRMASK,
1658 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1659 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1660 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1661 }
1662
1663 #if NBIO > 0
1664 static void
1665 arc_create_sensors(void *arg)
1666 {
1667 struct arc_softc *sc = arg;
1668 struct bioc_inq bi;
1669 struct bioc_vol bv;
1670 int i, j;
1671 size_t slen, count = 0;
1672
1673 memset(&bi, 0, sizeof(bi));
1674 if (arc_bio_inq(sc, &bi) != 0) {
1675 aprint_error("%s: unable to query firmware for sensor info\n",
1676 device_xname(&sc->sc_dev));
1677 kthread_exit(0);
1678 }
1679
1680 /* There's no point to continue if there are no volumes */
1681 if (!bi.bi_novol)
1682 kthread_exit(0);
1683
1684 for (i = 0; i < bi.bi_novol; i++) {
1685 memset(&bv, 0, sizeof(bv));
1686 bv.bv_volid = i;
1687 if (arc_bio_vol(sc, &bv) != 0)
1688 kthread_exit(0);
1689
1690 /* Skip passthrough volumes */
1691 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1692 continue;
1693
1694 /* new volume found */
1695 sc->sc_nsensors++;
1696 /* new disk in a volume found */
1697 sc->sc_nsensors+= bv.bv_nodisk;
1698 }
1699
1700 sc->sc_sme = sysmon_envsys_create();
1701 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1702 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1703
1704 /* Attach sensors for volumes and disks */
1705 for (i = 0; i < bi.bi_novol; i++) {
1706 memset(&bv, 0, sizeof(bv));
1707 bv.bv_volid = i;
1708 if (arc_bio_vol(sc, &bv) != 0)
1709 goto bad;
1710
1711 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1712 sc->sc_sensors[count].monitor = true;
1713 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1714
1715 /* Skip passthrough volumes */
1716 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1717 continue;
1718
1719 snprintf(sc->sc_sensors[count].desc,
1720 sizeof(sc->sc_sensors[count].desc),
1721 "RAID %d volume%d (%s)", bv.bv_level, i, bv.bv_dev);
1722 sc->sc_sensors[count].value_max = i;
1723
1724 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1725 &sc->sc_sensors[count]))
1726 goto bad;
1727
1728 count++;
1729
1730 /* Attach disk sensors for this volume */
1731 for (j = 0; j < bv.bv_nodisk; j++) {
1732 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1733 sc->sc_sensors[count].monitor = true;
1734 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1735
1736 snprintf(sc->sc_sensors[count].desc,
1737 sizeof(sc->sc_sensors[count].desc),
1738 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1739 sc->sc_sensors[count].value_max = i;
1740 sc->sc_sensors[count].value_avg = j + 10;
1741
1742 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1743 &sc->sc_sensors[count]))
1744 goto bad;
1745
1746 count++;
1747 }
1748 }
1749
1750 /*
1751 * Register our envsys driver with the framework now that the
1752 * sensors were all attached.
1753 */
1754 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1755 sc->sc_sme->sme_cookie = sc;
1756 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1757
1758 if (sysmon_envsys_register(sc->sc_sme)) {
1759 aprint_debug("%s: unable to register with sysmon\n",
1760 device_xname(&sc->sc_dev));
1761 goto bad;
1762 }
1763 kthread_exit(0);
1764
1765 bad:
1766 kmem_free(sc->sc_sensors, slen);
1767 sysmon_envsys_destroy(sc->sc_sme);
1768 kthread_exit(0);
1769 }
1770
1771 static void
1772 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1773 {
1774 struct arc_softc *sc = sme->sme_cookie;
1775 struct bioc_vol bv;
1776 struct bioc_disk bd;
1777
1778 /* sanity check */
1779 if (edata->units != ENVSYS_DRIVE)
1780 return;
1781
1782 memset(&bv, 0, sizeof(bv));
1783 bv.bv_volid = edata->value_max;
1784
1785 if (arc_bio_vol(sc, &bv)) {
1786 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1787 edata->state = ENVSYS_SINVALID;
1788 return;
1789 }
1790
1791 /* Current sensor is handling a disk volume member */
1792 if (edata->value_avg) {
1793 memset(&bd, 0, sizeof(bd));
1794 bd.bd_volid = edata->value_max;
1795 bd.bd_diskid = edata->value_avg - 10;
1796
1797 if (arc_bio_disk_volume(sc, &bd)) {
1798 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1799 edata->state = ENVSYS_SINVALID;
1800 return;
1801 }
1802
1803 switch (bd.bd_status) {
1804 case BIOC_SDONLINE:
1805 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1806 edata->state = ENVSYS_SVALID;
1807 break;
1808 case BIOC_SDOFFLINE:
1809 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1810 edata->state = ENVSYS_SCRITICAL;
1811 break;
1812 default:
1813 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1814 edata->state = ENVSYS_SCRITICAL;
1815 break;
1816 }
1817
1818 return;
1819 }
1820
1821 /* Current sensor is handling a volume */
1822 switch (bv.bv_status) {
1823 case BIOC_SVOFFLINE:
1824 edata->value_cur = ENVSYS_DRIVE_FAIL;
1825 edata->state = ENVSYS_SCRITICAL;
1826 break;
1827 case BIOC_SVDEGRADED:
1828 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1829 edata->state = ENVSYS_SCRITICAL;
1830 break;
1831 case BIOC_SVBUILDING:
1832 edata->value_cur = ENVSYS_DRIVE_BUILD;
1833 edata->state = ENVSYS_SVALID;
1834 break;
1835 case BIOC_SVMIGRATING:
1836 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1837 edata->state = ENVSYS_SVALID;
1838 break;
1839 case BIOC_SVCHECKING:
1840 edata->value_cur = ENVSYS_DRIVE_CHECK;
1841 edata->state = ENVSYS_SVALID;
1842 break;
1843 case BIOC_SVSCRUB:
1844 case BIOC_SVONLINE:
1845 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1846 edata->state = ENVSYS_SVALID;
1847 break;
1848 case BIOC_SVINVALID:
1849 /* FALLTHROUGH */
1850 default:
1851 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1852 edata->state = ENVSYS_SINVALID;
1853 break;
1854 }
1855 }
1856 #endif /* NBIO > 0 */
1857
1858 uint32_t
1859 arc_read(struct arc_softc *sc, bus_size_t r)
1860 {
1861 uint32_t v;
1862
1863 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1864 BUS_SPACE_BARRIER_READ);
1865 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1866
1867 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1868 device_xname(&sc->sc_dev), r, v);
1869
1870 return v;
1871 }
1872
1873 void
1874 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1875 {
1876 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1877 BUS_SPACE_BARRIER_READ);
1878 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1879 (uint32_t *)buf, len >> 2);
1880 }
1881
1882 void
1883 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1884 {
1885 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1886 device_xname(&sc->sc_dev), r, v);
1887
1888 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1889 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1890 BUS_SPACE_BARRIER_WRITE);
1891 }
1892
1893 void
1894 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1895 {
1896 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1897 (const uint32_t *)buf, len >> 2);
1898 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1899 BUS_SPACE_BARRIER_WRITE);
1900 }
1901
1902 int
1903 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1904 uint32_t target)
1905 {
1906 int i;
1907
1908 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1909 device_xname(&sc->sc_dev), r, mask, target);
1910
1911 for (i = 0; i < 10000; i++) {
1912 if ((arc_read(sc, r) & mask) == target)
1913 return 0;
1914 delay(1000);
1915 }
1916
1917 return 1;
1918 }
1919
1920 int
1921 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1922 uint32_t target)
1923 {
1924 int i;
1925
1926 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1927 device_xname(&sc->sc_dev), r, mask, target);
1928
1929 for (i = 0; i < 10000; i++) {
1930 if ((arc_read(sc, r) & mask) != target)
1931 return 0;
1932 delay(1000);
1933 }
1934
1935 return 1;
1936 }
1937
1938 int
1939 arc_msg0(struct arc_softc *sc, uint32_t m)
1940 {
1941 /* post message */
1942 arc_write(sc, ARC_REG_INB_MSG0, m);
1943 /* wait for the fw to do it */
1944 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1945 ARC_REG_INTRSTAT_MSG0) != 0)
1946 return 1;
1947
1948 /* ack it */
1949 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1950
1951 return 0;
1952 }
1953
1954 struct arc_dmamem *
1955 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1956 {
1957 struct arc_dmamem *adm;
1958 int nsegs;
1959
1960 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1961 if (adm == NULL)
1962 return NULL;
1963
1964 adm->adm_size = size;
1965
1966 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1967 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1968 goto admfree;
1969
1970 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1971 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1972 goto destroy;
1973
1974 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1975 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1976 goto free;
1977
1978 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1979 NULL, BUS_DMA_NOWAIT) != 0)
1980 goto unmap;
1981
1982 memset(adm->adm_kva, 0, size);
1983
1984 return adm;
1985
1986 unmap:
1987 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1988 free:
1989 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1990 destroy:
1991 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1992 admfree:
1993 kmem_free(adm, sizeof(*adm));
1994
1995 return NULL;
1996 }
1997
1998 void
1999 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2000 {
2001 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2002 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2003 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2004 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2005 kmem_free(adm, sizeof(*adm));
2006 }
2007
2008 int
2009 arc_alloc_ccbs(struct arc_softc *sc)
2010 {
2011 struct arc_ccb *ccb;
2012 uint8_t *cmd;
2013 int i;
2014 size_t ccbslen;
2015
2016 TAILQ_INIT(&sc->sc_ccb_free);
2017
2018 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2019 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2020
2021 sc->sc_requests = arc_dmamem_alloc(sc,
2022 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2023 if (sc->sc_requests == NULL) {
2024 aprint_error("%s: unable to allocate ccb dmamem\n",
2025 device_xname(&sc->sc_dev));
2026 goto free_ccbs;
2027 }
2028 cmd = ARC_DMA_KVA(sc->sc_requests);
2029
2030 for (i = 0; i < sc->sc_req_count; i++) {
2031 ccb = &sc->sc_ccbs[i];
2032
2033 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2034 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2035 aprint_error("%s: unable to create dmamap for ccb %d\n",
2036 device_xname(&sc->sc_dev), i);
2037 goto free_maps;
2038 }
2039
2040 ccb->ccb_sc = sc;
2041 ccb->ccb_id = i;
2042 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2043
2044 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2045 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2046 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2047
2048 arc_put_ccb(sc, ccb);
2049 }
2050
2051 return 0;
2052
2053 free_maps:
2054 while ((ccb = arc_get_ccb(sc)) != NULL)
2055 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2056 arc_dmamem_free(sc, sc->sc_requests);
2057
2058 free_ccbs:
2059 kmem_free(sc->sc_ccbs, ccbslen);
2060
2061 return 1;
2062 }
2063
2064 struct arc_ccb *
2065 arc_get_ccb(struct arc_softc *sc)
2066 {
2067 struct arc_ccb *ccb;
2068
2069 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2070 if (ccb != NULL)
2071 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2072
2073 return ccb;
2074 }
2075
2076 void
2077 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2078 {
2079 ccb->ccb_xs = NULL;
2080 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2081 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2082 }
2083