arcmsr.c revision 1.14 1 /* $NetBSD: arcmsr.c,v 1.14 2008/02/29 17:45:04 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.14 2008/02/29 17:45:04 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
176 panic("%s: couldn't establish shutdown handler\n",
177 device_xname(self));
178
179 memset(adapt, 0, sizeof(*adapt));
180 adapt->adapt_dev = self;
181 adapt->adapt_nchannels = 1;
182 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
183 adapt->adapt_max_periph = adapt->adapt_openings;
184 adapt->adapt_minphys = arc_minphys;
185 adapt->adapt_request = arc_scsi_cmd;
186
187 memset(chan, 0, sizeof(*chan));
188 chan->chan_adapter = adapt;
189 chan->chan_bustype = &scsi_bustype;
190 chan->chan_nluns = ARC_MAX_LUN;
191 chan->chan_ntargets = ARC_MAX_TARGET;
192 chan->chan_id = ARC_MAX_TARGET;
193 chan->chan_channel = 0;
194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195
196 /*
197 * Save the device_t returned, because we could to attach
198 * devices via the management interface.
199 */
200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201
202 /* enable interrupts */
203 arc_write(sc, ARC_REG_INTRMASK,
204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205
206 #if NBIO > 0
207 /*
208 * Register the driver to bio(4) and setup the sensors.
209 */
210 if (bio_register(self, arc_bioctl) != 0)
211 panic("%s: bioctl registration failed\n", device_xname(self));
212
213 /*
214 * you need to talk to the firmware to get volume info. our firmware
215 * interface relies on being able to sleep, so we need to use a thread
216 * to do the work.
217 */
218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 panic("%s: unable to create a kernel thread for sensors\n",
221 device_xname(self));
222 #endif
223
224 return;
225
226 unmap_pci:
227 arc_unmap_pci_resources(sc);
228 }
229
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 struct arc_softc *sc = device_private(self);
234
235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
236 aprint_error("%s: timeout waiting to stop bg rebuild\n",
237 device_xname(&sc->sc_dev));
238
239 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
240 aprint_error("%s: timeout waiting to flush cache\n",
241 device_xname(&sc->sc_dev));
242
243 return 0;
244 }
245
246 static bool
247 arc_shutdown(device_t self, int how)
248 {
249 struct arc_softc *sc = device_private(self);
250
251 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
252 aprint_error("%s: timeout waiting to stop bg rebuild\n",
253 device_xname(&sc->sc_dev));
254
255 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
256 aprint_error("%s: timeout waiting to flush cache\n",
257 device_xname(&sc->sc_dev));
258
259 return true;
260 }
261
262 static void
263 arc_minphys(struct buf *bp)
264 {
265 if (bp->b_bcount > MAXPHYS)
266 bp->b_bcount = MAXPHYS;
267 minphys(bp);
268 }
269
270 static int
271 arc_intr(void *arg)
272 {
273 struct arc_softc *sc = arg;
274 struct arc_ccb *ccb = NULL;
275 char *kva = ARC_DMA_KVA(sc->sc_requests);
276 struct arc_io_cmd *cmd;
277 uint32_t reg, intrstat;
278
279 mutex_spin_enter(&sc->sc_mutex);
280 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
281 if (intrstat == 0x0) {
282 mutex_spin_exit(&sc->sc_mutex);
283 return 0;
284 }
285
286 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
287 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
288
289 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
290 if (sc->sc_talking) {
291 arc_write(sc, ARC_REG_INTRMASK,
292 ~ARC_REG_INTRMASK_POSTQUEUE);
293 cv_broadcast(&sc->sc_condvar);
294 } else {
295 /* otherwise drop it */
296 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
297 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
298 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
299 arc_write(sc, ARC_REG_INB_DOORBELL,
300 ARC_REG_INB_DOORBELL_READ_OK);
301 }
302 }
303 mutex_spin_exit(&sc->sc_mutex);
304
305 while ((reg = arc_pop(sc)) != 0xffffffff) {
306 cmd = (struct arc_io_cmd *)(kva +
307 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
308 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
309 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
310
311 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
312 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314
315 arc_scsi_cmd_done(sc, ccb, reg);
316 }
317
318
319 return 1;
320 }
321
322 void
323 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
324 {
325 struct scsipi_periph *periph;
326 struct scsipi_xfer *xs;
327 struct scsipi_adapter *adapt = chan->chan_adapter;
328 struct arc_softc *sc = device_private(adapt->adapt_dev);
329 struct arc_ccb *ccb;
330 struct arc_msg_scsicmd *cmd;
331 uint32_t reg;
332 uint8_t target;
333
334 switch (req) {
335 case ADAPTER_REQ_GROW_RESOURCES:
336 /* Not supported. */
337 return;
338 case ADAPTER_REQ_SET_XFER_MODE:
339 /* Not supported. */
340 return;
341 case ADAPTER_REQ_RUN_XFER:
342 break;
343 }
344
345 mutex_spin_enter(&sc->sc_mutex);
346
347 xs = arg;
348 periph = xs->xs_periph;
349 target = periph->periph_target;
350
351 if (xs->cmdlen > ARC_MSG_CDBLEN) {
352 memset(&xs->sense, 0, sizeof(xs->sense));
353 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
354 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
355 xs->sense.scsi_sense.asc = 0x20;
356 xs->error = XS_SENSE;
357 xs->status = SCSI_CHECK;
358 mutex_spin_exit(&sc->sc_mutex);
359 scsipi_done(xs);
360 return;
361 }
362
363 ccb = arc_get_ccb(sc);
364 if (ccb == NULL) {
365 xs->error = XS_RESOURCE_SHORTAGE;
366 mutex_spin_exit(&sc->sc_mutex);
367 scsipi_done(xs);
368 return;
369 }
370
371 ccb->ccb_xs = xs;
372
373 if (arc_load_xs(ccb) != 0) {
374 xs->error = XS_DRIVER_STUFFUP;
375 arc_put_ccb(sc, ccb);
376 mutex_spin_exit(&sc->sc_mutex);
377 scsipi_done(xs);
378 return;
379 }
380
381 cmd = &ccb->ccb_cmd->cmd;
382 reg = ccb->ccb_cmd_post;
383
384 /* bus is always 0 */
385 cmd->target = target;
386 cmd->lun = periph->periph_lun;
387 cmd->function = 1; /* XXX magic number */
388
389 cmd->cdb_len = xs->cmdlen;
390 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
391 if (xs->xs_control & XS_CTL_DATA_OUT)
392 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
393 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
394 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
395 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
396 }
397
398 cmd->context = htole32(ccb->ccb_id);
399 cmd->data_len = htole32(xs->datalen);
400
401 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
402
403 /* we've built the command, let's put it on the hw */
404 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
405 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
406 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
407
408 arc_push(sc, reg);
409 if (xs->xs_control & XS_CTL_POLL) {
410 if (arc_complete(sc, ccb, xs->timeout) != 0) {
411 xs->error = XS_DRIVER_STUFFUP;
412 mutex_spin_exit(&sc->sc_mutex);
413 scsipi_done(xs);
414 return;
415 }
416 }
417
418 mutex_spin_exit(&sc->sc_mutex);
419 }
420
421 int
422 arc_load_xs(struct arc_ccb *ccb)
423 {
424 struct arc_softc *sc = ccb->ccb_sc;
425 struct scsipi_xfer *xs = ccb->ccb_xs;
426 bus_dmamap_t dmap = ccb->ccb_dmamap;
427 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
428 uint64_t addr;
429 int i, error;
430
431 if (xs->datalen == 0)
432 return 0;
433
434 error = bus_dmamap_load(sc->sc_dmat, dmap,
435 xs->data, xs->datalen, NULL,
436 (xs->xs_control & XS_CTL_NOSLEEP) ?
437 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
438 if (error != 0) {
439 aprint_error("%s: error %d loading dmamap\n",
440 device_xname(&sc->sc_dev), error);
441 return 1;
442 }
443
444 for (i = 0; i < dmap->dm_nsegs; i++) {
445 sge = &sgl[i];
446
447 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
448 addr = dmap->dm_segs[i].ds_addr;
449 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
450 sge->sg_lo_addr = htole32((uint32_t)addr);
451 }
452
453 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
454 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
455 BUS_DMASYNC_PREWRITE);
456
457 return 0;
458 }
459
460 void
461 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
462 {
463 struct scsipi_xfer *xs = ccb->ccb_xs;
464 struct arc_msg_scsicmd *cmd;
465
466 if (xs->datalen != 0) {
467 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
468 ccb->ccb_dmamap->dm_mapsize,
469 (xs->xs_control & XS_CTL_DATA_IN) ?
470 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
471 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
472 }
473
474 /* timeout_del */
475 xs->status |= XS_STS_DONE;
476
477 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
478 cmd = &ccb->ccb_cmd->cmd;
479
480 switch (cmd->status) {
481 case ARC_MSG_STATUS_SELTIMEOUT:
482 case ARC_MSG_STATUS_ABORTED:
483 case ARC_MSG_STATUS_INIT_FAIL:
484 xs->status = SCSI_OK;
485 xs->error = XS_SELTIMEOUT;
486 break;
487
488 case SCSI_CHECK:
489 memset(&xs->sense, 0, sizeof(xs->sense));
490 memcpy(&xs->sense, cmd->sense_data,
491 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
492 xs->sense.scsi_sense.response_code =
493 SSD_RCODE_VALID | 0x70;
494 xs->status = SCSI_CHECK;
495 xs->error = XS_SENSE;
496 xs->resid = 0;
497 break;
498
499 default:
500 /* unknown device status */
501 xs->error = XS_BUSY; /* try again later? */
502 xs->status = SCSI_BUSY;
503 break;
504 }
505 } else {
506 xs->status = SCSI_OK;
507 xs->error = XS_NOERROR;
508 xs->resid = 0;
509 }
510
511 arc_put_ccb(sc, ccb);
512 scsipi_done(xs);
513 }
514
515 int
516 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
517 {
518 struct arc_ccb *ccb = NULL;
519 char *kva = ARC_DMA_KVA(sc->sc_requests);
520 struct arc_io_cmd *cmd;
521 uint32_t reg;
522
523 do {
524 reg = arc_pop(sc);
525 if (reg == 0xffffffff) {
526 if (timeout-- == 0)
527 return 1;
528
529 delay(1000);
530 continue;
531 }
532
533 cmd = (struct arc_io_cmd *)(kva +
534 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
535 ARC_DMA_DVA(sc->sc_requests)));
536 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
537
538 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
539 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541
542 arc_scsi_cmd_done(sc, ccb, reg);
543 } while (nccb != ccb);
544
545 return 0;
546 }
547
548 int
549 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
550 {
551 pcireg_t memtype;
552 pci_intr_handle_t ih;
553
554 sc->sc_pc = pa->pa_pc;
555 sc->sc_tag = pa->pa_tag;
556 sc->sc_dmat = pa->pa_dmat;
557
558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
561 aprint_error(": unable to map system interface register\n");
562 return 1;
563 }
564
565 if (pci_intr_map(pa, &ih) != 0) {
566 aprint_error(": unable to map interrupt\n");
567 goto unmap;
568 }
569
570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
571 arc_intr, sc);
572 if (sc->sc_ih == NULL) {
573 aprint_error(": unable to map interrupt [2]\n");
574 goto unmap;
575 }
576
577 aprint_normal("\n");
578 aprint_normal("%s: interrupting at %s\n",
579 device_xname(&sc->sc_dev), pci_intr_string(pa->pa_pc, ih));
580
581 return 0;
582
583 unmap:
584 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
585 sc->sc_ios = 0;
586 return 1;
587 }
588
589 void
590 arc_unmap_pci_resources(struct arc_softc *sc)
591 {
592 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
594 sc->sc_ios = 0;
595 }
596
597 int
598 arc_query_firmware(struct arc_softc *sc)
599 {
600 struct arc_msg_firmware_info fwinfo;
601 char string[81]; /* sizeof(vendor)*2+1 */
602
603 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
604 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
605 aprint_debug("%s: timeout waiting for firmware ok\n",
606 device_xname(&sc->sc_dev));
607 return 1;
608 }
609
610 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
611 aprint_debug("%s: timeout waiting for get config\n",
612 device_xname(&sc->sc_dev));
613 return 1;
614 }
615
616 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
617 aprint_debug("%s: timeout waiting to start bg rebuild\n",
618 device_xname(&sc->sc_dev));
619 return 1;
620 }
621
622 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
623
624 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
625 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
626
627 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
628 aprint_error("%s: invalid firmware info from iop\n",
629 device_xname(&sc->sc_dev));
630 return 1;
631 }
632
633 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
634 device_xname(&sc->sc_dev),
635 htole32(fwinfo.request_len));
636 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
637 device_xname(&sc->sc_dev),
638 htole32(fwinfo.queue_len));
639 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
640 device_xname(&sc->sc_dev),
641 htole32(fwinfo.sdram_size));
642 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
643 device_xname(&sc->sc_dev),
644 htole32(fwinfo.sata_ports));
645
646 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
647 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
648 device_xname(&sc->sc_dev), string);
649
650 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
651 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
652 device_xname(&sc->sc_dev), string);
653
654 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
655 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
656 device_xname(&sc->sc_dev), string);
657
658 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
659 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
660 htole32(fwinfo.sdram_size), string);
661
662 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
663 aprint_error("%s: unexpected request frame size (%d != %d)\n",
664 device_xname(&sc->sc_dev),
665 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
666 return 1;
667 }
668
669 sc->sc_req_count = htole32(fwinfo.queue_len);
670
671 return 0;
672 }
673
674 #if NBIO > 0
675 static int
676 arc_bioctl(struct device *self, u_long cmd, void *addr)
677 {
678 struct arc_softc *sc = device_private(self);
679 int error = 0;
680
681 switch (cmd) {
682 case BIOCINQ:
683 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
684 break;
685
686 case BIOCVOL:
687 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
688 break;
689
690 case BIOCDISK:
691 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
692 break;
693
694 case BIOCDISK_NOVOL:
695 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
696 break;
697
698 case BIOCALARM:
699 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
700 break;
701
702 case BIOCSETSTATE:
703 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
704 break;
705
706 case BIOCVOLOPS:
707 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
708 break;
709
710 default:
711 error = ENOTTY;
712 break;
713 }
714
715 return error;
716 }
717
718 static int
719 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
720 {
721 switch (*reply) {
722 case ARC_FW_CMD_RAIDINVAL:
723 printf("%s: firmware error (invalid raid set)\n",
724 device_xname(&sc->sc_dev));
725 return EINVAL;
726 case ARC_FW_CMD_VOLINVAL:
727 printf("%s: firmware error (invalid volume set)\n",
728 device_xname(&sc->sc_dev));
729 return EINVAL;
730 case ARC_FW_CMD_NORAID:
731 printf("%s: firmware error (unexistent raid set)\n",
732 device_xname(&sc->sc_dev));
733 return ENODEV;
734 case ARC_FW_CMD_NOVOLUME:
735 printf("%s: firmware error (unexistent volume set)\n",
736 device_xname(&sc->sc_dev));
737 return ENODEV;
738 case ARC_FW_CMD_NOPHYSDRV:
739 printf("%s: firmware error (unexistent physical drive)\n",
740 device_xname(&sc->sc_dev));
741 return ENODEV;
742 case ARC_FW_CMD_PARAM_ERR:
743 printf("%s: firmware error (parameter error)\n",
744 device_xname(&sc->sc_dev));
745 return EINVAL;
746 case ARC_FW_CMD_UNSUPPORTED:
747 printf("%s: firmware error (unsupported command)\n",
748 device_xname(&sc->sc_dev));
749 return EOPNOTSUPP;
750 case ARC_FW_CMD_DISKCFG_CHGD:
751 printf("%s: firmware error (disk configuration changed)\n",
752 device_xname(&sc->sc_dev));
753 return EINVAL;
754 case ARC_FW_CMD_PASS_INVAL:
755 printf("%s: firmware error (invalid password)\n",
756 device_xname(&sc->sc_dev));
757 return EINVAL;
758 case ARC_FW_CMD_NODISKSPACE:
759 printf("%s: firmware error (no disk space available)\n",
760 device_xname(&sc->sc_dev));
761 return EOPNOTSUPP;
762 case ARC_FW_CMD_CHECKSUM_ERR:
763 printf("%s: firmware error (checksum error)\n",
764 device_xname(&sc->sc_dev));
765 return EINVAL;
766 case ARC_FW_CMD_PASS_REQD:
767 printf("%s: firmware error (password required)\n",
768 device_xname(&sc->sc_dev));
769 return EPERM;
770 case ARC_FW_CMD_OK:
771 default:
772 return 0;
773 }
774 }
775
776 static int
777 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
778 {
779 uint8_t request[2], reply[1];
780 size_t len;
781 int error = 0;
782
783 switch (ba->ba_opcode) {
784 case BIOC_SAENABLE:
785 case BIOC_SADISABLE:
786 request[0] = ARC_FW_SET_ALARM;
787 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
788 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
789 len = sizeof(request);
790
791 break;
792
793 case BIOC_SASILENCE:
794 request[0] = ARC_FW_MUTE_ALARM;
795 len = 1;
796
797 break;
798
799 case BIOC_GASTATUS:
800 /* system info is too big/ugly to deal with here */
801 return arc_bio_alarm_state(sc, ba);
802
803 default:
804 return EOPNOTSUPP;
805 }
806
807 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
808 if (error != 0)
809 return error;
810
811 return arc_fw_parse_status_code(sc, &reply[0]);
812 }
813
814 static int
815 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
816 {
817 struct arc_fw_sysinfo *sysinfo;
818 uint8_t request;
819 int error = 0;
820
821 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
822
823 request = ARC_FW_SYSINFO;
824 error = arc_msgbuf(sc, &request, sizeof(request),
825 sysinfo, sizeof(struct arc_fw_sysinfo));
826
827 if (error != 0)
828 goto out;
829
830 ba->ba_status = sysinfo->alarm;
831
832 out:
833 kmem_free(sysinfo, sizeof(*sysinfo));
834 return error;
835 }
836
837 static int
838 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
839 {
840 /* to create a raid set */
841 struct req_craidset {
842 uint8_t cmdcode;
843 uint32_t devmask;
844 uint8_t raidset_name[16];
845 } __packed;
846
847 /* to create a volume set */
848 struct req_cvolset {
849 uint8_t cmdcode;
850 uint8_t raidset;
851 uint8_t volset_name[16];
852 uint64_t capacity;
853 uint8_t raidlevel;
854 uint8_t stripe;
855 uint8_t scsi_chan;
856 uint8_t scsi_target;
857 uint8_t scsi_lun;
858 uint8_t tagqueue;
859 uint8_t cache;
860 uint8_t speed;
861 uint8_t quick_init;
862 } __packed;
863
864 struct scsibus_softc *scsibus_sc = NULL;
865 struct req_craidset req_craidset;
866 struct req_cvolset req_cvolset;
867 uint8_t request[2];
868 uint8_t reply[1];
869 int error = 0;
870
871 switch (bc->bc_opcode) {
872 case BIOC_VCREATE_VOLUME:
873 {
874 /*
875 * Zero out the structs so that we use some defaults
876 * in raid and volume sets.
877 */
878 memset(&req_craidset, 0, sizeof(req_craidset));
879 memset(&req_cvolset, 0, sizeof(req_cvolset));
880
881 /*
882 * Firstly we have to create the raid set and
883 * use the default name for all them.
884 */
885 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
886 req_craidset.devmask = bc->bc_devmask;
887 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
888 reply, sizeof(reply));
889 if (error != 0)
890 return error;
891
892 error = arc_fw_parse_status_code(sc, &reply[0]);
893 if (error) {
894 printf("%s: create raidset%d failed\n",
895 device_xname(&sc->sc_dev), bc->bc_volid);
896 return error;
897 }
898
899 /*
900 * At this point the raid set was created, so it's
901 * time to create the volume set.
902 */
903 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
904 req_cvolset.raidset = bc->bc_volid;
905 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
906
907 /*
908 * Set the RAID level.
909 */
910 switch (bc->bc_level) {
911 case 0:
912 case 1:
913 req_cvolset.raidlevel = bc->bc_level;
914 break;
915 case 3:
916 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
917 break;
918 case 5:
919 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
920 break;
921 case 6:
922 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
923 break;
924 default:
925 return EOPNOTSUPP;
926 }
927
928 /*
929 * Set the stripe size.
930 */
931 switch (bc->bc_stripe) {
932 case 4:
933 req_cvolset.stripe = 0;
934 break;
935 case 8:
936 req_cvolset.stripe = 1;
937 break;
938 case 16:
939 req_cvolset.stripe = 2;
940 break;
941 case 32:
942 req_cvolset.stripe = 3;
943 break;
944 case 64:
945 req_cvolset.stripe = 4;
946 break;
947 case 128:
948 req_cvolset.stripe = 5;
949 break;
950 default:
951 req_cvolset.stripe = 4; /* by default 64K */
952 break;
953 }
954
955 req_cvolset.scsi_chan = bc->bc_channel;
956 req_cvolset.scsi_target = bc->bc_target;
957 req_cvolset.scsi_lun = bc->bc_lun;
958 req_cvolset.tagqueue = 1; /* always enabled */
959 req_cvolset.cache = 1; /* always enabled */
960 req_cvolset.speed = 4; /* always max speed */
961
962 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
963 reply, sizeof(reply));
964 if (error != 0)
965 return error;
966
967 error = arc_fw_parse_status_code(sc, &reply[0]);
968 if (error) {
969 printf("%s: create volumeset%d failed\n",
970 device_xname(&sc->sc_dev), bc->bc_volid);
971 return error;
972 }
973
974 /*
975 * Do a rescan on the bus to attach the device associated
976 * with the new volume.
977 */
978 scsibus_sc = device_private(sc->sc_scsibus_dv);
979 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
980
981 break;
982 }
983 case BIOC_VREMOVE_VOLUME:
984 {
985 /*
986 * Remove the volume set specified in bc_volid.
987 */
988 request[0] = ARC_FW_DELETE_VOLUME;
989 request[1] = bc->bc_volid;
990 error = arc_msgbuf(sc, request, sizeof(request),
991 reply, sizeof(reply));
992 if (error != 0)
993 return error;
994
995 error = arc_fw_parse_status_code(sc, &reply[0]);
996 if (error) {
997 printf("%s: delete volumeset%d failed\n",
998 device_xname(&sc->sc_dev), bc->bc_volid);
999 return error;
1000 }
1001
1002 /*
1003 * Detach the sd(4) device associated with the volume,
1004 * but if there's an error don't make it a priority.
1005 */
1006 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1007 bc->bc_lun, 0);
1008 if (error)
1009 printf("%s: couldn't detach sd device for volume %d "
1010 "at %u:%u.%u (error=%d)\n",
1011 device_xname(&sc->sc_dev), bc->bc_volid,
1012 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1013
1014 /*
1015 * and remove the raid set specified in bc_volid,
1016 * we only care about volumes.
1017 */
1018 request[0] = ARC_FW_DELETE_RAIDSET;
1019 request[1] = bc->bc_volid;
1020 error = arc_msgbuf(sc, request, sizeof(request),
1021 reply, sizeof(reply));
1022 if (error != 0)
1023 return error;
1024
1025 error = arc_fw_parse_status_code(sc, &reply[0]);
1026 if (error) {
1027 printf("%s: delete raidset%d failed\n",
1028 device_xname(&sc->sc_dev), bc->bc_volid);
1029 return error;
1030 }
1031
1032 break;
1033 }
1034 default:
1035 return EOPNOTSUPP;
1036 }
1037
1038 return error;
1039 }
1040
1041 static int
1042 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1043 {
1044 /* for a hotspare disk */
1045 struct request_hs {
1046 uint8_t cmdcode;
1047 uint32_t devmask;
1048 } __packed;
1049
1050 /* for a pass-through disk */
1051 struct request_pt {
1052 uint8_t cmdcode;
1053 uint8_t devid;
1054 uint8_t scsi_chan;
1055 uint8_t scsi_id;
1056 uint8_t scsi_lun;
1057 uint8_t tagged_queue;
1058 uint8_t cache_mode;
1059 uint8_t max_speed;
1060 } __packed;
1061
1062 struct scsibus_softc *scsibus_sc = NULL;
1063 struct request_hs req_hs; /* to add/remove hotspare */
1064 struct request_pt req_pt; /* to add a pass-through */
1065 uint8_t req_gen[2];
1066 uint8_t reply[1];
1067 int error = 0;
1068
1069 switch (bs->bs_status) {
1070 case BIOC_SSHOTSPARE:
1071 {
1072 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1073 req_hs.devmask = (1 << bs->bs_target);
1074 goto hotspare;
1075 }
1076 case BIOC_SSDELHOTSPARE:
1077 {
1078 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1079 req_hs.devmask = (1 << bs->bs_target);
1080 goto hotspare;
1081 }
1082 case BIOC_SSPASSTHRU:
1083 {
1084 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1085 req_pt.devid = bs->bs_other_id; /* this wants device# */
1086 req_pt.scsi_chan = bs->bs_channel;
1087 req_pt.scsi_id = bs->bs_target;
1088 req_pt.scsi_lun = bs->bs_lun;
1089 req_pt.tagged_queue = 1; /* always enabled */
1090 req_pt.cache_mode = 1; /* always enabled */
1091 req_pt.max_speed = 4; /* always max speed */
1092
1093 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1094 reply, sizeof(reply));
1095 if (error != 0)
1096 return error;
1097
1098 /*
1099 * Do a rescan on the bus to attach the new device
1100 * associated with the pass-through disk.
1101 */
1102 scsibus_sc = device_private(sc->sc_scsibus_dv);
1103 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1104
1105 goto out;
1106 }
1107 case BIOC_SSDELPASSTHRU:
1108 {
1109 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1110 req_gen[1] = bs->bs_target;
1111 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1112 reply, sizeof(reply));
1113 if (error != 0)
1114 return error;
1115
1116 /*
1117 * Detach the sd device associated with this pass-through disk.
1118 */
1119 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1120 bs->bs_lun, 0);
1121 if (error)
1122 printf("%s: couldn't detach sd device for the "
1123 "pass-through disk at %u:%u.%u (error=%d)\n",
1124 device_xname(&sc->sc_dev),
1125 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1126
1127 goto out;
1128 }
1129 case BIOC_SSCHECKSTART_VOL:
1130 {
1131 req_gen[0] = ARC_FW_START_CHECKVOL;
1132 req_gen[1] = bs->bs_volid;
1133 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1134 reply, sizeof(reply));
1135 if (error != 0)
1136 return error;
1137
1138 goto out;
1139 }
1140 case BIOC_SSCHECKSTOP_VOL:
1141 {
1142 uint8_t req = ARC_FW_STOP_CHECKVOL;
1143 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1144 if (error != 0)
1145 return error;
1146
1147 goto out;
1148 }
1149 default:
1150 return EOPNOTSUPP;
1151 }
1152
1153 hotspare:
1154 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1155 reply, sizeof(reply));
1156 if (error != 0)
1157 return error;
1158
1159 out:
1160 return arc_fw_parse_status_code(sc, &reply[0]);
1161 }
1162
1163 static int
1164 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1165 {
1166 uint8_t request[2];
1167 struct arc_fw_sysinfo *sysinfo;
1168 struct arc_fw_raidinfo *raidinfo;
1169 int nvols = 0, i;
1170 int error = 0;
1171
1172 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1173 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1174
1175 request[0] = ARC_FW_SYSINFO;
1176 error = arc_msgbuf(sc, request, 1, sysinfo,
1177 sizeof(struct arc_fw_sysinfo));
1178 if (error != 0)
1179 goto out;
1180
1181 sc->sc_maxraidset = sysinfo->max_raid_set;
1182 sc->sc_maxvolset = sysinfo->max_volume_set;
1183 sc->sc_cchans = sysinfo->ide_channels;
1184
1185 request[0] = ARC_FW_RAIDINFO;
1186 for (i = 0; i < sc->sc_maxraidset; i++) {
1187 request[1] = i;
1188 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1189 sizeof(struct arc_fw_raidinfo));
1190 if (error != 0)
1191 goto out;
1192
1193 if (raidinfo->volumes)
1194 nvols++;
1195 }
1196
1197 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1198 bi->bi_novol = nvols;
1199 bi->bi_nodisk = sc->sc_cchans;
1200
1201 out:
1202 kmem_free(raidinfo, sizeof(*raidinfo));
1203 kmem_free(sysinfo, sizeof(*sysinfo));
1204 return error;
1205 }
1206
1207 static int
1208 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1209 {
1210 uint8_t request[2];
1211 int error = 0;
1212 int nvols = 0, i;
1213
1214 request[0] = ARC_FW_VOLINFO;
1215 for (i = 0; i < sc->sc_maxvolset; i++) {
1216 request[1] = i;
1217 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1218 sizeof(struct arc_fw_volinfo));
1219 if (error != 0)
1220 goto out;
1221
1222 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1223 continue;
1224
1225 if (nvols == vol)
1226 break;
1227
1228 nvols++;
1229 }
1230
1231 if (nvols != vol ||
1232 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1233 error = ENODEV;
1234 goto out;
1235 }
1236
1237 out:
1238 return error;
1239 }
1240
1241 static int
1242 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1243 {
1244 struct arc_fw_volinfo *volinfo;
1245 uint64_t blocks;
1246 uint32_t status;
1247 int error = 0;
1248
1249 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1250
1251 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1252 if (error != 0)
1253 goto out;
1254
1255 bv->bv_percent = -1;
1256 bv->bv_seconds = 0;
1257
1258 status = htole32(volinfo->volume_status);
1259 if (status == 0x0) {
1260 if (htole32(volinfo->fail_mask) == 0x0)
1261 bv->bv_status = BIOC_SVONLINE;
1262 else
1263 bv->bv_status = BIOC_SVDEGRADED;
1264 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1265 bv->bv_status = BIOC_SVDEGRADED;
1266 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1267 bv->bv_status = BIOC_SVOFFLINE;
1268 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1269 bv->bv_status = BIOC_SVBUILDING;
1270 bv->bv_percent = htole32(volinfo->progress);
1271 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1272 bv->bv_status = BIOC_SVREBUILD;
1273 bv->bv_percent = htole32(volinfo->progress);
1274 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1275 bv->bv_status = BIOC_SVMIGRATING;
1276 bv->bv_percent = htole32(volinfo->progress);
1277 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1278 bv->bv_status = BIOC_SVCHECKING;
1279 bv->bv_percent = htole32(volinfo->progress);
1280 }
1281
1282 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1283 blocks += (uint64_t)htole32(volinfo->capacity);
1284 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1285
1286 switch (volinfo->raid_level) {
1287 case ARC_FW_VOL_RAIDLEVEL_0:
1288 bv->bv_level = 0;
1289 break;
1290 case ARC_FW_VOL_RAIDLEVEL_1:
1291 bv->bv_level = 1;
1292 break;
1293 case ARC_FW_VOL_RAIDLEVEL_3:
1294 bv->bv_level = 3;
1295 break;
1296 case ARC_FW_VOL_RAIDLEVEL_5:
1297 bv->bv_level = 5;
1298 break;
1299 case ARC_FW_VOL_RAIDLEVEL_6:
1300 bv->bv_level = 6;
1301 break;
1302 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1303 bv->bv_level = BIOC_SVOL_PASSTHRU;
1304 break;
1305 default:
1306 bv->bv_level = -1;
1307 break;
1308 }
1309
1310 bv->bv_nodisk = volinfo->member_disks;
1311 bv->bv_stripe_size = volinfo->stripe_size / 2;
1312 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1313 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1314 sizeof(volinfo->set_name));
1315
1316 out:
1317 kmem_free(volinfo, sizeof(*volinfo));
1318 return error;
1319 }
1320
1321 static int
1322 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1323 {
1324 struct arc_fw_diskinfo *diskinfo;
1325 uint8_t request[2];
1326 int error = 0;
1327
1328 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1329
1330 if (bd->bd_diskid >= sc->sc_cchans) {
1331 error = ENODEV;
1332 goto out;
1333 }
1334
1335 request[0] = ARC_FW_DISKINFO;
1336 request[1] = bd->bd_diskid;
1337 error = arc_msgbuf(sc, request, sizeof(request),
1338 diskinfo, sizeof(struct arc_fw_diskinfo));
1339 if (error != 0)
1340 goto out;
1341
1342 /* skip disks with no capacity */
1343 if (htole32(diskinfo->capacity) == 0 &&
1344 htole32(diskinfo->capacity2) == 0)
1345 goto out;
1346
1347 bd->bd_disknovol = true;
1348 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1349
1350 out:
1351 kmem_free(diskinfo, sizeof(*diskinfo));
1352 return error;
1353 }
1354
1355 static void
1356 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1357 struct arc_fw_diskinfo *diskinfo, int diskid)
1358 {
1359 uint64_t blocks;
1360 char model[81];
1361 char serial[41];
1362 char rev[17];
1363
1364 switch (htole32(diskinfo->device_state)) {
1365 case ARC_FW_DISK_PASSTHRU:
1366 bd->bd_status = BIOC_SDPASSTHRU;
1367 break;
1368 case ARC_FW_DISK_RAIDMEMBER:
1369 bd->bd_status = BIOC_SDONLINE;
1370 break;
1371 case ARC_FW_DISK_HOTSPARE:
1372 bd->bd_status = BIOC_SDHOTSPARE;
1373 break;
1374 case ARC_FW_DISK_UNUSED:
1375 bd->bd_status = BIOC_SDUNUSED;
1376 break;
1377 case 0:
1378 /* disk has been disconnected */
1379 bd->bd_status = BIOC_SDOFFLINE;
1380 bd->bd_channel = 1;
1381 bd->bd_target = 0;
1382 bd->bd_lun = 0;
1383 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1384 break;
1385 default:
1386 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1387 htole32(diskinfo->device_state));
1388 bd->bd_status = BIOC_SDINVALID;
1389 return;
1390 }
1391
1392 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1393 blocks += (uint64_t)htole32(diskinfo->capacity);
1394 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1395
1396 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1397 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1398 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1399 sizeof(diskinfo->firmware_rev));
1400
1401 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1402 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1403
1404 #if 0
1405 bd->bd_channel = diskinfo->scsi_attr.channel;
1406 bd->bd_target = diskinfo->scsi_attr.target;
1407 bd->bd_lun = diskinfo->scsi_attr.lun;
1408 #endif
1409
1410 /*
1411 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1412 * the diskid.
1413 */
1414 bd->bd_channel = 0;
1415 bd->bd_target = diskid;
1416 bd->bd_lun = 0;
1417 }
1418
1419 static int
1420 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1421 {
1422 struct arc_fw_raidinfo *raidinfo;
1423 struct arc_fw_volinfo *volinfo;
1424 struct arc_fw_diskinfo *diskinfo;
1425 uint8_t request[2];
1426 int error = 0;
1427
1428 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1429 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1430 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1431
1432 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1433 if (error != 0)
1434 goto out;
1435
1436 request[0] = ARC_FW_RAIDINFO;
1437 request[1] = volinfo->raid_set_number;
1438
1439 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1440 sizeof(struct arc_fw_raidinfo));
1441 if (error != 0)
1442 goto out;
1443
1444 if (bd->bd_diskid >= sc->sc_cchans ||
1445 bd->bd_diskid >= raidinfo->member_devices) {
1446 error = ENODEV;
1447 goto out;
1448 }
1449
1450 if (raidinfo->device_array[bd->bd_diskid] == 0xff) {
1451 /*
1452 * The disk has been disconnected, mark it offline
1453 * and put it on another bus.
1454 */
1455 bd->bd_channel = 1;
1456 bd->bd_target = 0;
1457 bd->bd_lun = 0;
1458 bd->bd_status = BIOC_SDOFFLINE;
1459 strlcpy(bd->bd_vendor, "disk missing", sizeof(bd->bd_vendor));
1460 goto out;
1461 }
1462
1463 request[0] = ARC_FW_DISKINFO;
1464 request[1] = raidinfo->device_array[bd->bd_diskid];
1465 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1466 sizeof(struct arc_fw_diskinfo));
1467 if (error != 0)
1468 goto out;
1469
1470 /* now fill our bio disk with data from the firmware */
1471 arc_bio_disk_filldata(sc, bd, diskinfo,
1472 raidinfo->device_array[bd->bd_diskid]);
1473
1474 out:
1475 kmem_free(raidinfo, sizeof(*raidinfo));
1476 kmem_free(volinfo, sizeof(*volinfo));
1477 kmem_free(diskinfo, sizeof(*diskinfo));
1478 return error;
1479 }
1480 #endif /* NBIO > 0 */
1481
1482 uint8_t
1483 arc_msg_cksum(void *cmd, uint16_t len)
1484 {
1485 uint8_t *buf = cmd;
1486 uint8_t cksum;
1487 int i;
1488
1489 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1490 for (i = 0; i < len; i++)
1491 cksum += buf[i];
1492
1493 return cksum;
1494 }
1495
1496
1497 int
1498 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1499 size_t rbuflen)
1500 {
1501 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1502 uint8_t *wbuf, *rbuf;
1503 int wlen, wdone = 0, rlen, rdone = 0;
1504 struct arc_fw_bufhdr *bufhdr;
1505 uint32_t reg, rwlen;
1506 int error = 0;
1507 #ifdef ARC_DEBUG
1508 int i;
1509 #endif
1510
1511 wbuf = rbuf = NULL;
1512
1513 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1514 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1515
1516 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1517 wbuf = kmem_alloc(wlen, KM_SLEEP);
1518
1519 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1520 rbuf = kmem_alloc(rlen, KM_SLEEP);
1521
1522 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1523 device_xname(&sc->sc_dev), wlen, rlen);
1524
1525 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1526 bufhdr->hdr = arc_fw_hdr;
1527 bufhdr->len = htole16(wbuflen);
1528 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1529 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1530
1531 arc_lock(sc);
1532 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1533 error = EBUSY;
1534 goto out;
1535 }
1536
1537 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1538
1539 do {
1540 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1541 memset(rwbuf, 0, sizeof(rwbuf));
1542 rwlen = (wlen - wdone) % sizeof(rwbuf);
1543 memcpy(rwbuf, &wbuf[wdone], rwlen);
1544
1545 #ifdef ARC_DEBUG
1546 if (arcdebug & ARC_D_DB) {
1547 printf("%s: write %d:",
1548 device_xname(&sc->sc_dev), rwlen);
1549 for (i = 0; i < rwlen; i++)
1550 printf(" 0x%02x", rwbuf[i]);
1551 printf("\n");
1552 }
1553 #endif
1554
1555 /* copy the chunk to the hw */
1556 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1557 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1558 sizeof(rwbuf));
1559
1560 /* say we have a buffer for the hw */
1561 arc_write(sc, ARC_REG_INB_DOORBELL,
1562 ARC_REG_INB_DOORBELL_WRITE_OK);
1563
1564 wdone += rwlen;
1565 }
1566
1567 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1568 arc_wait(sc);
1569
1570 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1571
1572 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1573 device_xname(&sc->sc_dev), reg);
1574
1575 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1576 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1577 if (rwlen > sizeof(rwbuf)) {
1578 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1579 device_xname(&sc->sc_dev));
1580 error = EIO;
1581 goto out;
1582 }
1583
1584 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1585 sizeof(rwbuf));
1586
1587 arc_write(sc, ARC_REG_INB_DOORBELL,
1588 ARC_REG_INB_DOORBELL_READ_OK);
1589
1590 #ifdef ARC_DEBUG
1591 printf("%s: len: %d+%d=%d/%d\n",
1592 device_xname(&sc->sc_dev),
1593 rwlen, rdone, rwlen + rdone, rlen);
1594 if (arcdebug & ARC_D_DB) {
1595 printf("%s: read:",
1596 device_xname(&sc->sc_dev));
1597 for (i = 0; i < rwlen; i++)
1598 printf(" 0x%02x", rwbuf[i]);
1599 printf("\n");
1600 }
1601 #endif
1602
1603 if ((rdone + rwlen) > rlen) {
1604 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1605 device_xname(&sc->sc_dev));
1606 error = EIO;
1607 goto out;
1608 }
1609
1610 memcpy(&rbuf[rdone], rwbuf, rwlen);
1611 rdone += rwlen;
1612 }
1613 } while (rdone != rlen);
1614
1615 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1616 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1617 bufhdr->len != htole16(rbuflen)) {
1618 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1619 device_xname(&sc->sc_dev));
1620 error = EIO;
1621 goto out;
1622 }
1623
1624 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1625
1626 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1627 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1628 device_xname(&sc->sc_dev));
1629 error = EIO;
1630 goto out;
1631 }
1632
1633 out:
1634 arc_unlock(sc);
1635 kmem_free(wbuf, wlen);
1636 kmem_free(rbuf, rlen);
1637
1638 return error;
1639 }
1640
1641 void
1642 arc_lock(struct arc_softc *sc)
1643 {
1644 rw_enter(&sc->sc_rwlock, RW_WRITER);
1645 mutex_spin_enter(&sc->sc_mutex);
1646 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1647 sc->sc_talking = 1;
1648 }
1649
1650 void
1651 arc_unlock(struct arc_softc *sc)
1652 {
1653 KASSERT(mutex_owned(&sc->sc_mutex));
1654
1655 arc_write(sc, ARC_REG_INTRMASK,
1656 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1657 sc->sc_talking = 0;
1658 mutex_spin_exit(&sc->sc_mutex);
1659 rw_exit(&sc->sc_rwlock);
1660 }
1661
1662 void
1663 arc_wait(struct arc_softc *sc)
1664 {
1665 KASSERT(mutex_owned(&sc->sc_mutex));
1666
1667 arc_write(sc, ARC_REG_INTRMASK,
1668 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1669 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1670 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1671 }
1672
1673 #if NBIO > 0
1674 static void
1675 arc_create_sensors(void *arg)
1676 {
1677 struct arc_softc *sc = arg;
1678 struct bioc_inq bi;
1679 struct bioc_vol bv;
1680 int i, j;
1681 size_t slen, count = 0;
1682
1683 memset(&bi, 0, sizeof(bi));
1684 if (arc_bio_inq(sc, &bi) != 0) {
1685 aprint_error("%s: unable to query firmware for sensor info\n",
1686 device_xname(&sc->sc_dev));
1687 kthread_exit(0);
1688 }
1689
1690 /* There's no point to continue if there are no volumes */
1691 if (!bi.bi_novol)
1692 kthread_exit(0);
1693
1694 for (i = 0; i < bi.bi_novol; i++) {
1695 memset(&bv, 0, sizeof(bv));
1696 bv.bv_volid = i;
1697 if (arc_bio_vol(sc, &bv) != 0)
1698 kthread_exit(0);
1699
1700 /* Skip passthrough volumes */
1701 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1702 continue;
1703
1704 /* new volume found */
1705 sc->sc_nsensors++;
1706 /* new disk in a volume found */
1707 sc->sc_nsensors+= bv.bv_nodisk;
1708 }
1709
1710 sc->sc_sme = sysmon_envsys_create();
1711 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1712 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1713
1714 /* Attach sensors for volumes and disks */
1715 for (i = 0; i < bi.bi_novol; i++) {
1716 memset(&bv, 0, sizeof(bv));
1717 bv.bv_volid = i;
1718 if (arc_bio_vol(sc, &bv) != 0)
1719 goto bad;
1720
1721 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1722 sc->sc_sensors[count].monitor = true;
1723 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1724
1725 /* Skip passthrough volumes */
1726 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1727 continue;
1728
1729 snprintf(sc->sc_sensors[count].desc,
1730 sizeof(sc->sc_sensors[count].desc),
1731 "RAID %d volume%d (%s)", bv.bv_level, i, bv.bv_dev);
1732 sc->sc_sensors[count].value_max = i;
1733
1734 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1735 &sc->sc_sensors[count]))
1736 goto bad;
1737
1738 count++;
1739
1740 /* Attach disk sensors for this volume */
1741 for (j = 0; j < bv.bv_nodisk; j++) {
1742 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1743 sc->sc_sensors[count].monitor = true;
1744 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1745
1746 snprintf(sc->sc_sensors[count].desc,
1747 sizeof(sc->sc_sensors[count].desc),
1748 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1749 sc->sc_sensors[count].value_max = i;
1750 sc->sc_sensors[count].value_avg = j + 10;
1751
1752 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1753 &sc->sc_sensors[count]))
1754 goto bad;
1755
1756 count++;
1757 }
1758 }
1759
1760 /*
1761 * Register our envsys driver with the framework now that the
1762 * sensors were all attached.
1763 */
1764 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1765 sc->sc_sme->sme_cookie = sc;
1766 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1767
1768 if (sysmon_envsys_register(sc->sc_sme)) {
1769 aprint_debug("%s: unable to register with sysmon\n",
1770 device_xname(&sc->sc_dev));
1771 goto bad;
1772 }
1773 kthread_exit(0);
1774
1775 bad:
1776 kmem_free(sc->sc_sensors, slen);
1777 sysmon_envsys_destroy(sc->sc_sme);
1778 kthread_exit(0);
1779 }
1780
1781 static void
1782 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1783 {
1784 struct arc_softc *sc = sme->sme_cookie;
1785 struct bioc_vol bv;
1786 struct bioc_disk bd;
1787
1788 /* sanity check */
1789 if (edata->units != ENVSYS_DRIVE)
1790 return;
1791
1792 memset(&bv, 0, sizeof(bv));
1793 bv.bv_volid = edata->value_max;
1794
1795 if (arc_bio_vol(sc, &bv)) {
1796 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1797 edata->state = ENVSYS_SINVALID;
1798 return;
1799 }
1800
1801 /* Current sensor is handling a disk volume member */
1802 if (edata->value_avg) {
1803 memset(&bd, 0, sizeof(bd));
1804 bd.bd_volid = edata->value_max;
1805 bd.bd_diskid = edata->value_avg - 10;
1806
1807 if (arc_bio_disk_volume(sc, &bd)) {
1808 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1809 edata->state = ENVSYS_SCRITICAL;
1810 return;
1811 }
1812
1813 switch (bd.bd_status) {
1814 case BIOC_SDONLINE:
1815 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1816 edata->state = ENVSYS_SVALID;
1817 break;
1818 case BIOC_SDOFFLINE:
1819 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1820 edata->state = ENVSYS_SCRITICAL;
1821 break;
1822 default:
1823 edata->value_cur = ENVSYS_DRIVE_FAIL;
1824 edata->state = ENVSYS_SCRITICAL;
1825 break;
1826 }
1827
1828 return;
1829 }
1830
1831 /* Current sensor is handling a volume */
1832 switch (bv.bv_status) {
1833 case BIOC_SVOFFLINE:
1834 edata->value_cur = ENVSYS_DRIVE_FAIL;
1835 edata->state = ENVSYS_SCRITICAL;
1836 break;
1837 case BIOC_SVDEGRADED:
1838 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1839 edata->state = ENVSYS_SCRITICAL;
1840 break;
1841 case BIOC_SVBUILDING:
1842 edata->value_cur = ENVSYS_DRIVE_BUILD;
1843 edata->state = ENVSYS_SVALID;
1844 break;
1845 case BIOC_SVMIGRATING:
1846 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1847 edata->state = ENVSYS_SVALID;
1848 break;
1849 case BIOC_SVCHECKING:
1850 edata->value_cur = ENVSYS_DRIVE_CHECK;
1851 edata->state = ENVSYS_SVALID;
1852 break;
1853 case BIOC_SVREBUILD:
1854 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1855 edata->state = ENVSYS_SCRITICAL;
1856 break;
1857 case BIOC_SVSCRUB:
1858 case BIOC_SVONLINE:
1859 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1860 edata->state = ENVSYS_SVALID;
1861 break;
1862 case BIOC_SVINVALID:
1863 /* FALLTHROUGH */
1864 default:
1865 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1866 edata->state = ENVSYS_SINVALID;
1867 break;
1868 }
1869 }
1870 #endif /* NBIO > 0 */
1871
1872 uint32_t
1873 arc_read(struct arc_softc *sc, bus_size_t r)
1874 {
1875 uint32_t v;
1876
1877 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1878 BUS_SPACE_BARRIER_READ);
1879 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1880
1881 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1882 device_xname(&sc->sc_dev), r, v);
1883
1884 return v;
1885 }
1886
1887 void
1888 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1889 {
1890 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1891 BUS_SPACE_BARRIER_READ);
1892 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1893 (uint32_t *)buf, len >> 2);
1894 }
1895
1896 void
1897 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1898 {
1899 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1900 device_xname(&sc->sc_dev), r, v);
1901
1902 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1903 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1904 BUS_SPACE_BARRIER_WRITE);
1905 }
1906
1907 void
1908 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1909 {
1910 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1911 (const uint32_t *)buf, len >> 2);
1912 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1913 BUS_SPACE_BARRIER_WRITE);
1914 }
1915
1916 int
1917 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1918 uint32_t target)
1919 {
1920 int i;
1921
1922 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1923 device_xname(&sc->sc_dev), r, mask, target);
1924
1925 for (i = 0; i < 10000; i++) {
1926 if ((arc_read(sc, r) & mask) == target)
1927 return 0;
1928 delay(1000);
1929 }
1930
1931 return 1;
1932 }
1933
1934 int
1935 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1936 uint32_t target)
1937 {
1938 int i;
1939
1940 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1941 device_xname(&sc->sc_dev), r, mask, target);
1942
1943 for (i = 0; i < 10000; i++) {
1944 if ((arc_read(sc, r) & mask) != target)
1945 return 0;
1946 delay(1000);
1947 }
1948
1949 return 1;
1950 }
1951
1952 int
1953 arc_msg0(struct arc_softc *sc, uint32_t m)
1954 {
1955 /* post message */
1956 arc_write(sc, ARC_REG_INB_MSG0, m);
1957 /* wait for the fw to do it */
1958 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1959 ARC_REG_INTRSTAT_MSG0) != 0)
1960 return 1;
1961
1962 /* ack it */
1963 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1964
1965 return 0;
1966 }
1967
1968 struct arc_dmamem *
1969 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1970 {
1971 struct arc_dmamem *adm;
1972 int nsegs;
1973
1974 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1975 if (adm == NULL)
1976 return NULL;
1977
1978 adm->adm_size = size;
1979
1980 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1981 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1982 goto admfree;
1983
1984 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1985 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1986 goto destroy;
1987
1988 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1989 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1990 goto free;
1991
1992 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1993 NULL, BUS_DMA_NOWAIT) != 0)
1994 goto unmap;
1995
1996 memset(adm->adm_kva, 0, size);
1997
1998 return adm;
1999
2000 unmap:
2001 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
2002 free:
2003 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2004 destroy:
2005 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2006 admfree:
2007 kmem_free(adm, sizeof(*adm));
2008
2009 return NULL;
2010 }
2011
2012 void
2013 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2014 {
2015 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2016 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2017 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2018 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2019 kmem_free(adm, sizeof(*adm));
2020 }
2021
2022 int
2023 arc_alloc_ccbs(struct arc_softc *sc)
2024 {
2025 struct arc_ccb *ccb;
2026 uint8_t *cmd;
2027 int i;
2028 size_t ccbslen;
2029
2030 TAILQ_INIT(&sc->sc_ccb_free);
2031
2032 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2033 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2034
2035 sc->sc_requests = arc_dmamem_alloc(sc,
2036 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2037 if (sc->sc_requests == NULL) {
2038 aprint_error("%s: unable to allocate ccb dmamem\n",
2039 device_xname(&sc->sc_dev));
2040 goto free_ccbs;
2041 }
2042 cmd = ARC_DMA_KVA(sc->sc_requests);
2043
2044 for (i = 0; i < sc->sc_req_count; i++) {
2045 ccb = &sc->sc_ccbs[i];
2046
2047 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2048 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2049 aprint_error("%s: unable to create dmamap for ccb %d\n",
2050 device_xname(&sc->sc_dev), i);
2051 goto free_maps;
2052 }
2053
2054 ccb->ccb_sc = sc;
2055 ccb->ccb_id = i;
2056 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2057
2058 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2059 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2060 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2061
2062 arc_put_ccb(sc, ccb);
2063 }
2064
2065 return 0;
2066
2067 free_maps:
2068 while ((ccb = arc_get_ccb(sc)) != NULL)
2069 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2070 arc_dmamem_free(sc, sc->sc_requests);
2071
2072 free_ccbs:
2073 kmem_free(sc->sc_ccbs, ccbslen);
2074
2075 return 1;
2076 }
2077
2078 struct arc_ccb *
2079 arc_get_ccb(struct arc_softc *sc)
2080 {
2081 struct arc_ccb *ccb;
2082
2083 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2084 if (ccb != NULL)
2085 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2086
2087 return ccb;
2088 }
2089
2090 void
2091 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2092 {
2093 ccb->ccb_xs = NULL;
2094 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2095 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2096 }
2097