arcmsr.c revision 1.11 1 /* $NetBSD: arcmsr.c,v 1.11 2008/02/28 21:08:45 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007, 2008 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.11 2008/02/28 21:08:45 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static bool arc_shutdown(device_t, int);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 if (!pmf_device_register1(self, NULL, NULL, arc_shutdown))
176 panic("%s: couldn't establish shutdown handler\n",
177 device_xname(self));
178
179 memset(adapt, 0, sizeof(*adapt));
180 adapt->adapt_dev = self;
181 adapt->adapt_nchannels = 1;
182 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
183 adapt->adapt_max_periph = adapt->adapt_openings;
184 adapt->adapt_minphys = arc_minphys;
185 adapt->adapt_request = arc_scsi_cmd;
186
187 memset(chan, 0, sizeof(*chan));
188 chan->chan_adapter = adapt;
189 chan->chan_bustype = &scsi_bustype;
190 chan->chan_nluns = ARC_MAX_LUN;
191 chan->chan_ntargets = ARC_MAX_TARGET;
192 chan->chan_id = ARC_MAX_TARGET;
193 chan->chan_channel = 0;
194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195
196 /*
197 * Save the device_t returned, because we could to attach
198 * devices via the management interface.
199 */
200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201
202 /* enable interrupts */
203 arc_write(sc, ARC_REG_INTRMASK,
204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205
206 #if NBIO > 0
207 /*
208 * Register the driver to bio(4) and setup the sensors.
209 */
210 if (bio_register(self, arc_bioctl) != 0)
211 panic("%s: bioctl registration failed\n", device_xname(self));
212
213 /*
214 * you need to talk to the firmware to get volume info. our firmware
215 * interface relies on being able to sleep, so we need to use a thread
216 * to do the work.
217 */
218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 panic("%s: unable to create a kernel thread for sensors\n",
221 device_xname(self));
222 #endif
223
224 return;
225
226 unmap_pci:
227 arc_unmap_pci_resources(sc);
228 }
229
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 struct arc_softc *sc = device_private(self);
234
235 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
236 aprint_error("%s: timeout waiting to stop bg rebuild\n",
237 device_xname(&sc->sc_dev));
238
239 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
240 aprint_error("%s: timeout waiting to flush cache\n",
241 device_xname(&sc->sc_dev));
242
243 return 0;
244 }
245
246 static bool
247 arc_shutdown(device_t self, int how)
248 {
249 struct arc_softc *sc = device_private(self);
250
251 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
252 aprint_error("%s: timeout waiting to stop bg rebuild\n",
253 device_xname(&sc->sc_dev));
254
255 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
256 aprint_error("%s: timeout waiting to flush cache\n",
257 device_xname(&sc->sc_dev));
258
259 return true;
260 }
261
262 static void
263 arc_minphys(struct buf *bp)
264 {
265 if (bp->b_bcount > MAXPHYS)
266 bp->b_bcount = MAXPHYS;
267 minphys(bp);
268 }
269
270 static int
271 arc_intr(void *arg)
272 {
273 struct arc_softc *sc = arg;
274 struct arc_ccb *ccb = NULL;
275 char *kva = ARC_DMA_KVA(sc->sc_requests);
276 struct arc_io_cmd *cmd;
277 uint32_t reg, intrstat;
278
279 mutex_spin_enter(&sc->sc_mutex);
280 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
281 if (intrstat == 0x0) {
282 mutex_spin_exit(&sc->sc_mutex);
283 return 0;
284 }
285
286 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
287 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
288
289 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
290 if (sc->sc_talking) {
291 arc_write(sc, ARC_REG_INTRMASK,
292 ~ARC_REG_INTRMASK_POSTQUEUE);
293 cv_broadcast(&sc->sc_condvar);
294 } else {
295 /* otherwise drop it */
296 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
297 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
298 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
299 arc_write(sc, ARC_REG_INB_DOORBELL,
300 ARC_REG_INB_DOORBELL_READ_OK);
301 }
302 }
303 mutex_spin_exit(&sc->sc_mutex);
304
305 while ((reg = arc_pop(sc)) != 0xffffffff) {
306 cmd = (struct arc_io_cmd *)(kva +
307 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
308 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
309 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
310
311 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
312 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314
315 arc_scsi_cmd_done(sc, ccb, reg);
316 }
317
318
319 return 1;
320 }
321
322 void
323 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
324 {
325 struct scsipi_periph *periph;
326 struct scsipi_xfer *xs;
327 struct scsipi_adapter *adapt = chan->chan_adapter;
328 struct arc_softc *sc = device_private(adapt->adapt_dev);
329 struct arc_ccb *ccb;
330 struct arc_msg_scsicmd *cmd;
331 uint32_t reg;
332 uint8_t target;
333
334 switch (req) {
335 case ADAPTER_REQ_GROW_RESOURCES:
336 /* Not supported. */
337 return;
338 case ADAPTER_REQ_SET_XFER_MODE:
339 /* Not supported. */
340 return;
341 case ADAPTER_REQ_RUN_XFER:
342 break;
343 }
344
345 mutex_spin_enter(&sc->sc_mutex);
346
347 xs = arg;
348 periph = xs->xs_periph;
349 target = periph->periph_target;
350
351 if (xs->cmdlen > ARC_MSG_CDBLEN) {
352 memset(&xs->sense, 0, sizeof(xs->sense));
353 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
354 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
355 xs->sense.scsi_sense.asc = 0x20;
356 xs->error = XS_SENSE;
357 xs->status = SCSI_CHECK;
358 mutex_spin_exit(&sc->sc_mutex);
359 scsipi_done(xs);
360 return;
361 }
362
363 ccb = arc_get_ccb(sc);
364 if (ccb == NULL) {
365 xs->error = XS_RESOURCE_SHORTAGE;
366 mutex_spin_exit(&sc->sc_mutex);
367 scsipi_done(xs);
368 return;
369 }
370
371 ccb->ccb_xs = xs;
372
373 if (arc_load_xs(ccb) != 0) {
374 xs->error = XS_DRIVER_STUFFUP;
375 arc_put_ccb(sc, ccb);
376 mutex_spin_exit(&sc->sc_mutex);
377 scsipi_done(xs);
378 return;
379 }
380
381 cmd = &ccb->ccb_cmd->cmd;
382 reg = ccb->ccb_cmd_post;
383
384 /* bus is always 0 */
385 cmd->target = target;
386 cmd->lun = periph->periph_lun;
387 cmd->function = 1; /* XXX magic number */
388
389 cmd->cdb_len = xs->cmdlen;
390 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
391 if (xs->xs_control & XS_CTL_DATA_OUT)
392 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
393 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
394 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
395 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
396 }
397
398 cmd->context = htole32(ccb->ccb_id);
399 cmd->data_len = htole32(xs->datalen);
400
401 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
402
403 /* we've built the command, let's put it on the hw */
404 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
405 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
406 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
407
408 arc_push(sc, reg);
409 if (xs->xs_control & XS_CTL_POLL) {
410 if (arc_complete(sc, ccb, xs->timeout) != 0) {
411 xs->error = XS_DRIVER_STUFFUP;
412 mutex_spin_exit(&sc->sc_mutex);
413 scsipi_done(xs);
414 return;
415 }
416 }
417
418 mutex_spin_exit(&sc->sc_mutex);
419 }
420
421 int
422 arc_load_xs(struct arc_ccb *ccb)
423 {
424 struct arc_softc *sc = ccb->ccb_sc;
425 struct scsipi_xfer *xs = ccb->ccb_xs;
426 bus_dmamap_t dmap = ccb->ccb_dmamap;
427 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
428 uint64_t addr;
429 int i, error;
430
431 if (xs->datalen == 0)
432 return 0;
433
434 error = bus_dmamap_load(sc->sc_dmat, dmap,
435 xs->data, xs->datalen, NULL,
436 (xs->xs_control & XS_CTL_NOSLEEP) ?
437 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
438 if (error != 0) {
439 aprint_error("%s: error %d loading dmamap\n",
440 device_xname(&sc->sc_dev), error);
441 return 1;
442 }
443
444 for (i = 0; i < dmap->dm_nsegs; i++) {
445 sge = &sgl[i];
446
447 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
448 addr = dmap->dm_segs[i].ds_addr;
449 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
450 sge->sg_lo_addr = htole32((uint32_t)addr);
451 }
452
453 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
454 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
455 BUS_DMASYNC_PREWRITE);
456
457 return 0;
458 }
459
460 void
461 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
462 {
463 struct scsipi_xfer *xs = ccb->ccb_xs;
464 struct arc_msg_scsicmd *cmd;
465
466 if (xs->datalen != 0) {
467 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
468 ccb->ccb_dmamap->dm_mapsize,
469 (xs->xs_control & XS_CTL_DATA_IN) ?
470 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
471 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
472 }
473
474 /* timeout_del */
475 xs->status |= XS_STS_DONE;
476
477 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
478 cmd = &ccb->ccb_cmd->cmd;
479
480 switch (cmd->status) {
481 case ARC_MSG_STATUS_SELTIMEOUT:
482 case ARC_MSG_STATUS_ABORTED:
483 case ARC_MSG_STATUS_INIT_FAIL:
484 xs->status = SCSI_OK;
485 xs->error = XS_SELTIMEOUT;
486 break;
487
488 case SCSI_CHECK:
489 memset(&xs->sense, 0, sizeof(xs->sense));
490 memcpy(&xs->sense, cmd->sense_data,
491 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
492 xs->sense.scsi_sense.response_code =
493 SSD_RCODE_VALID | 0x70;
494 xs->status = SCSI_CHECK;
495 xs->error = XS_SENSE;
496 xs->resid = 0;
497 break;
498
499 default:
500 /* unknown device status */
501 xs->error = XS_BUSY; /* try again later? */
502 xs->status = SCSI_BUSY;
503 break;
504 }
505 } else {
506 xs->status = SCSI_OK;
507 xs->error = XS_NOERROR;
508 xs->resid = 0;
509 }
510
511 arc_put_ccb(sc, ccb);
512 scsipi_done(xs);
513 }
514
515 int
516 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
517 {
518 struct arc_ccb *ccb = NULL;
519 char *kva = ARC_DMA_KVA(sc->sc_requests);
520 struct arc_io_cmd *cmd;
521 uint32_t reg;
522
523 do {
524 reg = arc_pop(sc);
525 if (reg == 0xffffffff) {
526 if (timeout-- == 0)
527 return 1;
528
529 delay(1000);
530 continue;
531 }
532
533 cmd = (struct arc_io_cmd *)(kva +
534 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
535 ARC_DMA_DVA(sc->sc_requests)));
536 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
537
538 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
539 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541
542 arc_scsi_cmd_done(sc, ccb, reg);
543 } while (nccb != ccb);
544
545 return 0;
546 }
547
548 int
549 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
550 {
551 pcireg_t memtype;
552 pci_intr_handle_t ih;
553
554 sc->sc_pc = pa->pa_pc;
555 sc->sc_tag = pa->pa_tag;
556 sc->sc_dmat = pa->pa_dmat;
557
558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
561 aprint_error(": unable to map system interface register\n");
562 return 1;
563 }
564
565 if (pci_intr_map(pa, &ih) != 0) {
566 aprint_error(": unable to map interrupt\n");
567 goto unmap;
568 }
569
570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
571 arc_intr, sc);
572 if (sc->sc_ih == NULL) {
573 aprint_error(": unable to map interrupt [2]\n");
574 goto unmap;
575 }
576
577 aprint_normal("\n");
578 aprint_normal("%s: interrupting at %s\n",
579 device_xname(&sc->sc_dev), pci_intr_string(pa->pa_pc, ih));
580
581 return 0;
582
583 unmap:
584 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
585 sc->sc_ios = 0;
586 return 1;
587 }
588
589 void
590 arc_unmap_pci_resources(struct arc_softc *sc)
591 {
592 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
593 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
594 sc->sc_ios = 0;
595 }
596
597 int
598 arc_query_firmware(struct arc_softc *sc)
599 {
600 struct arc_msg_firmware_info fwinfo;
601 char string[81]; /* sizeof(vendor)*2+1 */
602
603 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
604 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
605 aprint_debug("%s: timeout waiting for firmware ok\n",
606 device_xname(&sc->sc_dev));
607 return 1;
608 }
609
610 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
611 aprint_debug("%s: timeout waiting for get config\n",
612 device_xname(&sc->sc_dev));
613 return 1;
614 }
615
616 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
617 aprint_debug("%s: timeout waiting to start bg rebuild\n",
618 device_xname(&sc->sc_dev));
619 return 1;
620 }
621
622 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
623
624 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
625 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
626
627 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
628 aprint_error("%s: invalid firmware info from iop\n",
629 device_xname(&sc->sc_dev));
630 return 1;
631 }
632
633 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
634 device_xname(&sc->sc_dev),
635 htole32(fwinfo.request_len));
636 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
637 device_xname(&sc->sc_dev),
638 htole32(fwinfo.queue_len));
639 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
640 device_xname(&sc->sc_dev),
641 htole32(fwinfo.sdram_size));
642 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
643 device_xname(&sc->sc_dev),
644 htole32(fwinfo.sata_ports));
645
646 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
647 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
648 device_xname(&sc->sc_dev), string);
649
650 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
651 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
652 device_xname(&sc->sc_dev), string);
653
654 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
655 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
656 device_xname(&sc->sc_dev), string);
657
658 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
659 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
660 htole32(fwinfo.sdram_size), string);
661
662 /* save the number of max disks for future use */
663 sc->sc_maxdisks = htole32(fwinfo.sata_ports);
664
665 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
666 aprint_error("%s: unexpected request frame size (%d != %d)\n",
667 device_xname(&sc->sc_dev),
668 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
669 return 1;
670 }
671
672 sc->sc_req_count = htole32(fwinfo.queue_len);
673
674 return 0;
675 }
676
677 #if NBIO > 0
678 static int
679 arc_bioctl(struct device *self, u_long cmd, void *addr)
680 {
681 struct arc_softc *sc = device_private(self);
682 int error = 0;
683
684 switch (cmd) {
685 case BIOCINQ:
686 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
687 break;
688
689 case BIOCVOL:
690 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
691 break;
692
693 case BIOCDISK:
694 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
695 break;
696
697 case BIOCDISK_NOVOL:
698 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
699 break;
700
701 case BIOCALARM:
702 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
703 break;
704
705 case BIOCSETSTATE:
706 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
707 break;
708
709 case BIOCVOLOPS:
710 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
711 break;
712
713 default:
714 error = ENOTTY;
715 break;
716 }
717
718 return error;
719 }
720
721 static int
722 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
723 {
724 switch (*reply) {
725 case ARC_FW_CMD_RAIDINVAL:
726 printf("%s: firmware error (invalid raid set)\n",
727 device_xname(&sc->sc_dev));
728 return EINVAL;
729 case ARC_FW_CMD_VOLINVAL:
730 printf("%s: firmware error (invalid volume set)\n",
731 device_xname(&sc->sc_dev));
732 return EINVAL;
733 case ARC_FW_CMD_NORAID:
734 printf("%s: firmware error (unexistent raid set)\n",
735 device_xname(&sc->sc_dev));
736 return ENODEV;
737 case ARC_FW_CMD_NOVOLUME:
738 printf("%s: firmware error (unexistent volume set)\n",
739 device_xname(&sc->sc_dev));
740 return ENODEV;
741 case ARC_FW_CMD_NOPHYSDRV:
742 printf("%s: firmware error (unexistent physical drive)\n",
743 device_xname(&sc->sc_dev));
744 return ENODEV;
745 case ARC_FW_CMD_PARAM_ERR:
746 printf("%s: firmware error (parameter error)\n",
747 device_xname(&sc->sc_dev));
748 return EINVAL;
749 case ARC_FW_CMD_UNSUPPORTED:
750 printf("%s: firmware error (unsupported command)\n",
751 device_xname(&sc->sc_dev));
752 return EOPNOTSUPP;
753 case ARC_FW_CMD_DISKCFG_CHGD:
754 printf("%s: firmware error (disk configuration changed)\n",
755 device_xname(&sc->sc_dev));
756 return EINVAL;
757 case ARC_FW_CMD_PASS_INVAL:
758 printf("%s: firmware error (invalid password)\n",
759 device_xname(&sc->sc_dev));
760 return EINVAL;
761 case ARC_FW_CMD_NODISKSPACE:
762 printf("%s: firmware error (no disk space available)\n",
763 device_xname(&sc->sc_dev));
764 return EOPNOTSUPP;
765 case ARC_FW_CMD_CHECKSUM_ERR:
766 printf("%s: firmware error (checksum error)\n",
767 device_xname(&sc->sc_dev));
768 return EINVAL;
769 case ARC_FW_CMD_PASS_REQD:
770 printf("%s: firmware error (password required)\n",
771 device_xname(&sc->sc_dev));
772 return EPERM;
773 case ARC_FW_CMD_OK:
774 default:
775 return 0;
776 }
777 }
778
779 static int
780 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
781 {
782 uint8_t request[2], reply[1];
783 size_t len;
784 int error = 0;
785
786 switch (ba->ba_opcode) {
787 case BIOC_SAENABLE:
788 case BIOC_SADISABLE:
789 request[0] = ARC_FW_SET_ALARM;
790 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
791 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
792 len = sizeof(request);
793
794 break;
795
796 case BIOC_SASILENCE:
797 request[0] = ARC_FW_MUTE_ALARM;
798 len = 1;
799
800 break;
801
802 case BIOC_GASTATUS:
803 /* system info is too big/ugly to deal with here */
804 return arc_bio_alarm_state(sc, ba);
805
806 default:
807 return EOPNOTSUPP;
808 }
809
810 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
811 if (error != 0)
812 return error;
813
814 return arc_fw_parse_status_code(sc, &reply[0]);
815 }
816
817 static int
818 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
819 {
820 struct arc_fw_sysinfo *sysinfo;
821 uint8_t request;
822 int error = 0;
823
824 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
825
826 request = ARC_FW_SYSINFO;
827 error = arc_msgbuf(sc, &request, sizeof(request),
828 sysinfo, sizeof(struct arc_fw_sysinfo));
829
830 if (error != 0)
831 goto out;
832
833 ba->ba_status = sysinfo->alarm;
834
835 out:
836 kmem_free(sysinfo, sizeof(*sysinfo));
837 return error;
838 }
839
840 static int
841 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
842 {
843 /* to create a raid set */
844 struct req_craidset {
845 uint8_t cmdcode;
846 uint32_t devmask;
847 uint8_t raidset_name[16];
848 } __packed;
849
850 /* to create a volume set */
851 struct req_cvolset {
852 uint8_t cmdcode;
853 uint8_t raidset;
854 uint8_t volset_name[16];
855 uint64_t capacity;
856 uint8_t raidlevel;
857 uint8_t stripe;
858 uint8_t scsi_chan;
859 uint8_t scsi_target;
860 uint8_t scsi_lun;
861 uint8_t tagqueue;
862 uint8_t cache;
863 uint8_t speed;
864 uint8_t quick_init;
865 } __packed;
866
867 struct scsibus_softc *scsibus_sc = NULL;
868 struct req_craidset req_craidset;
869 struct req_cvolset req_cvolset;
870 uint8_t request[2];
871 uint8_t reply[1];
872 int error = 0;
873
874 switch (bc->bc_opcode) {
875 case BIOC_VCREATE_VOLUME:
876 {
877 /*
878 * Zero out the structs so that we use some defaults
879 * in raid and volume sets.
880 */
881 memset(&req_craidset, 0, sizeof(req_craidset));
882 memset(&req_cvolset, 0, sizeof(req_cvolset));
883
884 /*
885 * Firstly we have to create the raid set and
886 * use the default name for all them.
887 */
888 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
889 req_craidset.devmask = bc->bc_devmask;
890 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
891 reply, sizeof(reply));
892 if (error != 0)
893 return error;
894
895 error = arc_fw_parse_status_code(sc, &reply[0]);
896 if (error) {
897 printf("%s: create raidset%d failed\n",
898 device_xname(&sc->sc_dev), bc->bc_volid);
899 return error;
900 }
901
902 /*
903 * At this point the raid set was created, so it's
904 * time to create the volume set.
905 */
906 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
907 req_cvolset.raidset = bc->bc_volid;
908 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
909
910 /*
911 * Set the RAID level.
912 */
913 switch (bc->bc_level) {
914 case 0:
915 case 1:
916 req_cvolset.raidlevel = bc->bc_level;
917 break;
918 case 3:
919 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
920 break;
921 case 5:
922 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
923 break;
924 case 6:
925 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
926 break;
927 default:
928 return EOPNOTSUPP;
929 }
930
931 /*
932 * Set the stripe size.
933 */
934 switch (bc->bc_stripe) {
935 case 4:
936 req_cvolset.stripe = 0;
937 break;
938 case 8:
939 req_cvolset.stripe = 1;
940 break;
941 case 16:
942 req_cvolset.stripe = 2;
943 break;
944 case 32:
945 req_cvolset.stripe = 3;
946 break;
947 case 64:
948 req_cvolset.stripe = 4;
949 break;
950 case 128:
951 req_cvolset.stripe = 5;
952 break;
953 default:
954 req_cvolset.stripe = 4; /* by default 64K */
955 break;
956 }
957
958 req_cvolset.scsi_chan = bc->bc_channel;
959 req_cvolset.scsi_target = bc->bc_target;
960 req_cvolset.scsi_lun = bc->bc_lun;
961 req_cvolset.tagqueue = 1; /* always enabled */
962 req_cvolset.cache = 1; /* always enabled */
963 req_cvolset.speed = 4; /* always max speed */
964
965 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
966 reply, sizeof(reply));
967 if (error != 0)
968 return error;
969
970 error = arc_fw_parse_status_code(sc, &reply[0]);
971 if (error) {
972 printf("%s: create volumeset%d failed\n",
973 device_xname(&sc->sc_dev), bc->bc_volid);
974 return error;
975 }
976
977 /*
978 * Do a rescan on the bus to attach the device associated
979 * with the new volume.
980 */
981 scsibus_sc = device_private(sc->sc_scsibus_dv);
982 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
983
984 break;
985 }
986 case BIOC_VREMOVE_VOLUME:
987 {
988 /*
989 * Remove the volume set specified in bc_volid.
990 */
991 request[0] = ARC_FW_DELETE_VOLUME;
992 request[1] = bc->bc_volid;
993 error = arc_msgbuf(sc, request, sizeof(request),
994 reply, sizeof(reply));
995 if (error != 0)
996 return error;
997
998 error = arc_fw_parse_status_code(sc, &reply[0]);
999 if (error) {
1000 printf("%s: delete volumeset%d failed\n",
1001 device_xname(&sc->sc_dev), bc->bc_volid);
1002 return error;
1003 }
1004
1005 /*
1006 * Detach the sd(4) device associated with the volume,
1007 * but if there's an error don't make it a priority.
1008 */
1009 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1010 bc->bc_lun, 0);
1011 if (error)
1012 printf("%s: couldn't detach sd device for volume %d "
1013 "at %u:%u.%u (error=%d)\n",
1014 device_xname(&sc->sc_dev), bc->bc_volid,
1015 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1016
1017 /*
1018 * and remove the raid set specified in bc_volid,
1019 * we only care about volumes.
1020 */
1021 request[0] = ARC_FW_DELETE_RAIDSET;
1022 request[1] = bc->bc_volid;
1023 error = arc_msgbuf(sc, request, sizeof(request),
1024 reply, sizeof(reply));
1025 if (error != 0)
1026 return error;
1027
1028 error = arc_fw_parse_status_code(sc, &reply[0]);
1029 if (error) {
1030 printf("%s: delete raidset%d failed\n",
1031 device_xname(&sc->sc_dev), bc->bc_volid);
1032 return error;
1033 }
1034
1035 break;
1036 }
1037 default:
1038 return EOPNOTSUPP;
1039 }
1040
1041 return error;
1042 }
1043
1044 static int
1045 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1046 {
1047 /* for a hotspare disk */
1048 struct request_hs {
1049 uint8_t cmdcode;
1050 uint32_t devmask;
1051 } __packed;
1052
1053 /* for a pass-through disk */
1054 struct request_pt {
1055 uint8_t cmdcode;
1056 uint8_t devid;
1057 uint8_t scsi_chan;
1058 uint8_t scsi_id;
1059 uint8_t scsi_lun;
1060 uint8_t tagged_queue;
1061 uint8_t cache_mode;
1062 uint8_t max_speed;
1063 } __packed;
1064
1065 struct scsibus_softc *scsibus_sc = NULL;
1066 struct request_hs req_hs; /* to add/remove hotspare */
1067 struct request_pt req_pt; /* to add a pass-through */
1068 uint8_t req_gen[2];
1069 uint8_t reply[1];
1070 int error = 0;
1071
1072 switch (bs->bs_status) {
1073 case BIOC_SSHOTSPARE:
1074 {
1075 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1076 req_hs.devmask = (1 << bs->bs_target);
1077 goto hotspare;
1078 }
1079 case BIOC_SSDELHOTSPARE:
1080 {
1081 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1082 req_hs.devmask = (1 << bs->bs_target);
1083 goto hotspare;
1084 }
1085 case BIOC_SSPASSTHRU:
1086 {
1087 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1088 req_pt.devid = bs->bs_other_id; /* this wants device# */
1089 req_pt.scsi_chan = bs->bs_channel;
1090 req_pt.scsi_id = bs->bs_target;
1091 req_pt.scsi_lun = bs->bs_lun;
1092 req_pt.tagged_queue = 1; /* always enabled */
1093 req_pt.cache_mode = 1; /* always enabled */
1094 req_pt.max_speed = 4; /* always max speed */
1095
1096 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1097 reply, sizeof(reply));
1098 if (error != 0)
1099 return error;
1100
1101 /*
1102 * Do a rescan on the bus to attach the new device
1103 * associated with the pass-through disk.
1104 */
1105 scsibus_sc = device_private(sc->sc_scsibus_dv);
1106 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1107
1108 goto out;
1109 }
1110 case BIOC_SSDELPASSTHRU:
1111 {
1112 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1113 req_gen[1] = bs->bs_target;
1114 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1115 reply, sizeof(reply));
1116 if (error != 0)
1117 return error;
1118
1119 /*
1120 * Detach the sd device associated with this pass-through disk.
1121 */
1122 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1123 bs->bs_lun, 0);
1124 if (error)
1125 printf("%s: couldn't detach sd device for the "
1126 "pass-through disk at %u:%u.%u (error=%d)\n",
1127 device_xname(&sc->sc_dev),
1128 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1129
1130 goto out;
1131 }
1132 case BIOC_SSCHECKSTART_VOL:
1133 {
1134 req_gen[0] = ARC_FW_START_CHECKVOL;
1135 req_gen[1] = bs->bs_volid;
1136 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1137 reply, sizeof(reply));
1138 if (error != 0)
1139 return error;
1140
1141 goto out;
1142 }
1143 case BIOC_SSCHECKSTOP_VOL:
1144 {
1145 uint8_t req = ARC_FW_STOP_CHECKVOL;
1146 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1147 if (error != 0)
1148 return error;
1149
1150 goto out;
1151 }
1152 default:
1153 return EOPNOTSUPP;
1154 }
1155
1156 hotspare:
1157 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1158 reply, sizeof(reply));
1159 if (error != 0)
1160 return error;
1161
1162 out:
1163 return arc_fw_parse_status_code(sc, &reply[0]);
1164 }
1165
1166 static int
1167 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1168 {
1169 uint8_t request[2];
1170 struct arc_fw_sysinfo *sysinfo;
1171 struct arc_fw_raidinfo *raidinfo;
1172 int maxraidset, nvols = 0, i;
1173 int error = 0;
1174
1175 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1176 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1177
1178 request[0] = ARC_FW_SYSINFO;
1179 error = arc_msgbuf(sc, request, 1, sysinfo,
1180 sizeof(struct arc_fw_sysinfo));
1181 if (error != 0)
1182 goto out;
1183
1184 maxraidset = sysinfo->max_raid_set;
1185
1186 request[0] = ARC_FW_RAIDINFO;
1187 for (i = 0; i < maxraidset; i++) {
1188 request[1] = i;
1189 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1190 sizeof(struct arc_fw_raidinfo));
1191 if (error != 0)
1192 goto out;
1193
1194 if (raidinfo->volumes)
1195 nvols++;
1196 }
1197
1198 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1199 bi->bi_novol = nvols;
1200 bi->bi_nodisk = sc->sc_maxdisks;
1201
1202 out:
1203 kmem_free(raidinfo, sizeof(*raidinfo));
1204 kmem_free(sysinfo, sizeof(*sysinfo));
1205 return error;
1206 }
1207
1208 static int
1209 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1210 {
1211 uint8_t request[2];
1212 struct arc_fw_sysinfo *sysinfo;
1213 int error = 0;
1214 int maxvols, nvols = 0, i;
1215
1216 sysinfo = kmem_zalloc(sizeof(*sysinfo), KM_SLEEP);
1217
1218 request[0] = ARC_FW_SYSINFO;
1219 error = arc_msgbuf(sc, request, 1, sysinfo,
1220 sizeof(struct arc_fw_sysinfo));
1221 if (error != 0)
1222 goto out;
1223
1224 maxvols = sysinfo->max_volume_set;
1225
1226 request[0] = ARC_FW_VOLINFO;
1227 for (i = 0; i < maxvols; i++) {
1228 request[1] = i;
1229 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1230 sizeof(struct arc_fw_volinfo));
1231 if (error != 0)
1232 goto out;
1233
1234 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1235 continue;
1236
1237 if (nvols == vol)
1238 break;
1239
1240 nvols++;
1241 }
1242
1243 if (nvols != vol ||
1244 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1245 error = ENODEV;
1246 goto out;
1247 }
1248
1249 out:
1250 kmem_free(sysinfo, sizeof(*sysinfo));
1251 return error;
1252 }
1253
1254 static int
1255 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1256 {
1257 struct arc_fw_volinfo *volinfo;
1258 uint64_t blocks;
1259 uint32_t status;
1260 int error = 0;
1261
1262 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1263
1264 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1265 if (error != 0)
1266 goto out;
1267
1268 bv->bv_percent = -1;
1269 bv->bv_seconds = 0;
1270
1271 status = htole32(volinfo->volume_status);
1272 if (status == 0x0) {
1273 if (htole32(volinfo->fail_mask) == 0x0)
1274 bv->bv_status = BIOC_SVONLINE;
1275 else
1276 bv->bv_status = BIOC_SVDEGRADED;
1277 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1278 bv->bv_status = BIOC_SVDEGRADED;
1279 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1280 bv->bv_status = BIOC_SVOFFLINE;
1281 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1282 bv->bv_status = BIOC_SVBUILDING;
1283 bv->bv_percent = htole32(volinfo->progress);
1284 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1285 bv->bv_status = BIOC_SVREBUILD;
1286 bv->bv_percent = htole32(volinfo->progress);
1287 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1288 bv->bv_status = BIOC_SVMIGRATING;
1289 bv->bv_percent = htole32(volinfo->progress);
1290 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1291 bv->bv_status = BIOC_SVCHECKING;
1292 bv->bv_percent = htole32(volinfo->progress);
1293 }
1294
1295 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1296 blocks += (uint64_t)htole32(volinfo->capacity);
1297 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1298
1299 switch (volinfo->raid_level) {
1300 case ARC_FW_VOL_RAIDLEVEL_0:
1301 bv->bv_level = 0;
1302 break;
1303 case ARC_FW_VOL_RAIDLEVEL_1:
1304 bv->bv_level = 1;
1305 break;
1306 case ARC_FW_VOL_RAIDLEVEL_3:
1307 bv->bv_level = 3;
1308 break;
1309 case ARC_FW_VOL_RAIDLEVEL_5:
1310 bv->bv_level = 5;
1311 break;
1312 case ARC_FW_VOL_RAIDLEVEL_6:
1313 bv->bv_level = 6;
1314 break;
1315 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1316 bv->bv_level = BIOC_SVOL_PASSTHRU;
1317 break;
1318 default:
1319 bv->bv_level = -1;
1320 break;
1321 }
1322
1323 bv->bv_nodisk = volinfo->member_disks;
1324 bv->bv_stripe_size = volinfo->stripe_size / 2;
1325 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1326 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1327 sizeof(volinfo->set_name));
1328
1329 out:
1330 kmem_free(volinfo, sizeof(*volinfo));
1331 return error;
1332 }
1333
1334 static int
1335 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1336 {
1337 struct arc_fw_diskinfo *diskinfo;
1338 uint8_t request[2];
1339 int error = 0;
1340
1341 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1342
1343 if (bd->bd_diskid > sc->sc_maxdisks) {
1344 error = ENODEV;
1345 goto out;
1346 }
1347
1348 request[0] = ARC_FW_DISKINFO;
1349 request[1] = bd->bd_diskid;
1350 error = arc_msgbuf(sc, request, sizeof(request),
1351 diskinfo, sizeof(struct arc_fw_diskinfo));
1352 if (error != 0)
1353 return error;
1354
1355 /* skip disks with no capacity */
1356 if (htole32(diskinfo->capacity) == 0 &&
1357 htole32(diskinfo->capacity2) == 0)
1358 goto out;
1359
1360 bd->bd_disknovol = true;
1361 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1362
1363 out:
1364 kmem_free(diskinfo, sizeof(*diskinfo));
1365 return error;
1366 }
1367
1368 static void
1369 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1370 struct arc_fw_diskinfo *diskinfo, int diskid)
1371 {
1372 uint64_t blocks;
1373 char model[81];
1374 char serial[41];
1375 char rev[17];
1376
1377 switch (htole32(diskinfo->device_state)) {
1378 case ARC_FW_DISK_PASSTHRU:
1379 bd->bd_status = BIOC_SDPASSTHRU;
1380 break;
1381 case ARC_FW_DISK_RAIDMEMBER:
1382 bd->bd_status = BIOC_SDONLINE;
1383 break;
1384 case ARC_FW_DISK_HOTSPARE:
1385 bd->bd_status = BIOC_SDHOTSPARE;
1386 break;
1387 case ARC_FW_DISK_UNUSED:
1388 bd->bd_status = BIOC_SDUNUSED;
1389 break;
1390 default:
1391 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1392 htole32(diskinfo->device_state));
1393 bd->bd_status = BIOC_SDINVALID;
1394 return;
1395 }
1396
1397 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1398 blocks += (uint64_t)htole32(diskinfo->capacity);
1399 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1400
1401 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1402 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1403 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1404 sizeof(diskinfo->firmware_rev));
1405
1406 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1407 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1408
1409 #if 0
1410 bd->bd_channel = diskinfo->scsi_attr.channel;
1411 bd->bd_target = diskinfo->scsi_attr.target;
1412 bd->bd_lun = diskinfo->scsi_attr.lun;
1413 #endif
1414
1415 /*
1416 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1417 * the diskid.
1418 */
1419 bd->bd_channel = 0;
1420 bd->bd_target = diskid;
1421 bd->bd_lun = 0;
1422 }
1423
1424 static int
1425 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1426 {
1427 uint8_t request[2];
1428 struct arc_fw_raidinfo *raidinfo;
1429 struct arc_fw_volinfo *volinfo;
1430 struct arc_fw_diskinfo *diskinfo;
1431 int error = 0;
1432
1433 volinfo = kmem_zalloc(sizeof(*volinfo), KM_SLEEP);
1434 raidinfo = kmem_zalloc(sizeof(*raidinfo), KM_SLEEP);
1435 diskinfo = kmem_zalloc(sizeof(*diskinfo), KM_SLEEP);
1436
1437 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1438 if (error != 0)
1439 goto out;
1440
1441 request[0] = ARC_FW_RAIDINFO;
1442 request[1] = volinfo->raid_set_number;
1443
1444 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1445 sizeof(struct arc_fw_raidinfo));
1446 if (error != 0)
1447 goto out;
1448
1449 if (bd->bd_diskid > raidinfo->member_devices) {
1450 error = ENODEV;
1451 goto out;
1452 }
1453
1454 request[0] = ARC_FW_DISKINFO;
1455 request[1] = raidinfo->device_array[bd->bd_diskid];
1456 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1457 sizeof(struct arc_fw_diskinfo));
1458 if (error != 0)
1459 goto out;
1460
1461 /* now fill our bio disk with data from the firmware */
1462 arc_bio_disk_filldata(sc, bd, diskinfo,
1463 raidinfo->device_array[bd->bd_diskid]);
1464
1465 out:
1466 kmem_free(raidinfo, sizeof(*raidinfo));
1467 kmem_free(volinfo, sizeof(*volinfo));
1468 kmem_free(diskinfo, sizeof(*diskinfo));
1469 return error;
1470 }
1471 #endif /* NBIO > 0 */
1472
1473 uint8_t
1474 arc_msg_cksum(void *cmd, uint16_t len)
1475 {
1476 uint8_t *buf = cmd;
1477 uint8_t cksum;
1478 int i;
1479
1480 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1481 for (i = 0; i < len; i++)
1482 cksum += buf[i];
1483
1484 return cksum;
1485 }
1486
1487
1488 int
1489 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1490 size_t rbuflen)
1491 {
1492 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1493 uint8_t *wbuf, *rbuf;
1494 int wlen, wdone = 0, rlen, rdone = 0;
1495 struct arc_fw_bufhdr *bufhdr;
1496 uint32_t reg, rwlen;
1497 int error = 0;
1498 #ifdef ARC_DEBUG
1499 int i;
1500 #endif
1501
1502 wbuf = rbuf = NULL;
1503
1504 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1505 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1506
1507 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1508 wbuf = kmem_alloc(wlen, KM_SLEEP);
1509
1510 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1511 rbuf = kmem_alloc(rlen, KM_SLEEP);
1512
1513 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1514 device_xname(&sc->sc_dev), wlen, rlen);
1515
1516 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1517 bufhdr->hdr = arc_fw_hdr;
1518 bufhdr->len = htole16(wbuflen);
1519 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1520 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1521
1522 arc_lock(sc);
1523 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1524 error = EBUSY;
1525 goto out;
1526 }
1527
1528 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1529
1530 do {
1531 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1532 memset(rwbuf, 0, sizeof(rwbuf));
1533 rwlen = (wlen - wdone) % sizeof(rwbuf);
1534 memcpy(rwbuf, &wbuf[wdone], rwlen);
1535
1536 #ifdef ARC_DEBUG
1537 if (arcdebug & ARC_D_DB) {
1538 printf("%s: write %d:",
1539 device_xname(&sc->sc_dev), rwlen);
1540 for (i = 0; i < rwlen; i++)
1541 printf(" 0x%02x", rwbuf[i]);
1542 printf("\n");
1543 }
1544 #endif
1545
1546 /* copy the chunk to the hw */
1547 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1548 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1549 sizeof(rwbuf));
1550
1551 /* say we have a buffer for the hw */
1552 arc_write(sc, ARC_REG_INB_DOORBELL,
1553 ARC_REG_INB_DOORBELL_WRITE_OK);
1554
1555 wdone += rwlen;
1556 }
1557
1558 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1559 arc_wait(sc);
1560
1561 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1562
1563 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1564 device_xname(&sc->sc_dev), reg);
1565
1566 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1567 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1568 if (rwlen > sizeof(rwbuf)) {
1569 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1570 device_xname(&sc->sc_dev));
1571 error = EIO;
1572 goto out;
1573 }
1574
1575 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1576 sizeof(rwbuf));
1577
1578 arc_write(sc, ARC_REG_INB_DOORBELL,
1579 ARC_REG_INB_DOORBELL_READ_OK);
1580
1581 #ifdef ARC_DEBUG
1582 printf("%s: len: %d+%d=%d/%d\n",
1583 device_xname(&sc->sc_dev),
1584 rwlen, rdone, rwlen + rdone, rlen);
1585 if (arcdebug & ARC_D_DB) {
1586 printf("%s: read:",
1587 device_xname(&sc->sc_dev));
1588 for (i = 0; i < rwlen; i++)
1589 printf(" 0x%02x", rwbuf[i]);
1590 printf("\n");
1591 }
1592 #endif
1593
1594 if ((rdone + rwlen) > rlen) {
1595 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1596 device_xname(&sc->sc_dev));
1597 error = EIO;
1598 goto out;
1599 }
1600
1601 memcpy(&rbuf[rdone], rwbuf, rwlen);
1602 rdone += rwlen;
1603 }
1604 } while (rdone != rlen);
1605
1606 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1607 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1608 bufhdr->len != htole16(rbuflen)) {
1609 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1610 device_xname(&sc->sc_dev));
1611 error = EIO;
1612 goto out;
1613 }
1614
1615 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1616
1617 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1618 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1619 device_xname(&sc->sc_dev));
1620 error = EIO;
1621 goto out;
1622 }
1623
1624 out:
1625 arc_unlock(sc);
1626 kmem_free(wbuf, wlen);
1627 kmem_free(rbuf, rlen);
1628
1629 return error;
1630 }
1631
1632 void
1633 arc_lock(struct arc_softc *sc)
1634 {
1635 rw_enter(&sc->sc_rwlock, RW_WRITER);
1636 mutex_spin_enter(&sc->sc_mutex);
1637 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1638 sc->sc_talking = 1;
1639 }
1640
1641 void
1642 arc_unlock(struct arc_softc *sc)
1643 {
1644 KASSERT(mutex_owned(&sc->sc_mutex));
1645
1646 arc_write(sc, ARC_REG_INTRMASK,
1647 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1648 sc->sc_talking = 0;
1649 mutex_spin_exit(&sc->sc_mutex);
1650 rw_exit(&sc->sc_rwlock);
1651 }
1652
1653 void
1654 arc_wait(struct arc_softc *sc)
1655 {
1656 KASSERT(mutex_owned(&sc->sc_mutex));
1657
1658 arc_write(sc, ARC_REG_INTRMASK,
1659 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1660 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1661 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1662 }
1663
1664 #if NBIO > 0
1665 static void
1666 arc_create_sensors(void *arg)
1667 {
1668 struct arc_softc *sc = arg;
1669 struct bioc_inq bi;
1670 struct bioc_vol bv;
1671 int i, j;
1672 size_t slen, count = 0;
1673
1674 memset(&bi, 0, sizeof(bi));
1675 if (arc_bio_inq(sc, &bi) != 0) {
1676 aprint_error("%s: unable to query firmware for sensor info\n",
1677 device_xname(&sc->sc_dev));
1678 kthread_exit(0);
1679 }
1680
1681 /* There's no point to continue if there are no volumes */
1682 if (!bi.bi_novol)
1683 kthread_exit(0);
1684
1685 for (i = 0; i < bi.bi_novol; i++) {
1686 memset(&bv, 0, sizeof(bv));
1687 bv.bv_volid = i;
1688 if (arc_bio_vol(sc, &bv) != 0)
1689 kthread_exit(0);
1690
1691 /* Skip passthrough volumes */
1692 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1693 continue;
1694
1695 /* new volume found */
1696 sc->sc_nsensors++;
1697 /* new disk in a volume found */
1698 sc->sc_nsensors+= bv.bv_nodisk;
1699 }
1700
1701 sc->sc_sme = sysmon_envsys_create();
1702 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1703 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1704
1705 /* Attach sensors for volumes and disks */
1706 for (i = 0; i < bi.bi_novol; i++) {
1707 memset(&bv, 0, sizeof(bv));
1708 bv.bv_volid = i;
1709 if (arc_bio_vol(sc, &bv) != 0)
1710 goto bad;
1711
1712 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1713 sc->sc_sensors[count].monitor = true;
1714 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1715
1716 /* Skip passthrough volumes */
1717 if (bv.bv_level == BIOC_SVOL_PASSTHRU)
1718 continue;
1719
1720 snprintf(sc->sc_sensors[count].desc,
1721 sizeof(sc->sc_sensors[count].desc),
1722 "RAID %d volume%d (%s)", bv.bv_level, i, bv.bv_dev);
1723 sc->sc_sensors[count].value_max = i;
1724
1725 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1726 &sc->sc_sensors[count]))
1727 goto bad;
1728
1729 count++;
1730
1731 /* Attach disk sensors for this volume */
1732 for (j = 0; j < bv.bv_nodisk; j++) {
1733 sc->sc_sensors[count].units = ENVSYS_DRIVE;
1734 sc->sc_sensors[count].monitor = true;
1735 sc->sc_sensors[count].flags = ENVSYS_FMONSTCHANGED;
1736
1737 snprintf(sc->sc_sensors[count].desc,
1738 sizeof(sc->sc_sensors[count].desc),
1739 "disk%d volume%d (%s)", j, i, bv.bv_dev);
1740 sc->sc_sensors[count].value_max = i;
1741 sc->sc_sensors[count].value_avg = j + 10;
1742
1743 if (sysmon_envsys_sensor_attach(sc->sc_sme,
1744 &sc->sc_sensors[count]))
1745 goto bad;
1746
1747 count++;
1748 }
1749 }
1750
1751 /*
1752 * Register our envsys driver with the framework now that the
1753 * sensors were all attached.
1754 */
1755 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1756 sc->sc_sme->sme_cookie = sc;
1757 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1758
1759 if (sysmon_envsys_register(sc->sc_sme)) {
1760 aprint_debug("%s: unable to register with sysmon\n",
1761 device_xname(&sc->sc_dev));
1762 goto bad;
1763 }
1764 kthread_exit(0);
1765
1766 bad:
1767 kmem_free(sc->sc_sensors, slen);
1768 sysmon_envsys_destroy(sc->sc_sme);
1769 kthread_exit(0);
1770 }
1771
1772 static void
1773 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1774 {
1775 struct arc_softc *sc = sme->sme_cookie;
1776 struct bioc_vol bv;
1777 struct bioc_disk bd;
1778
1779 /* sanity check */
1780 if (edata->units != ENVSYS_DRIVE)
1781 return;
1782
1783 memset(&bv, 0, sizeof(bv));
1784 bv.bv_volid = edata->value_max;
1785
1786 if (arc_bio_vol(sc, &bv)) {
1787 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1788 edata->state = ENVSYS_SINVALID;
1789 return;
1790 }
1791
1792 /* Current sensor is handling a disk volume member */
1793 if (edata->value_avg) {
1794 memset(&bd, 0, sizeof(bd));
1795 bd.bd_volid = edata->value_max;
1796 bd.bd_diskid = edata->value_avg - 10;
1797
1798 if (arc_bio_disk_volume(sc, &bd)) {
1799 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1800 edata->state = ENVSYS_SINVALID;
1801 return;
1802 }
1803
1804 switch (bd.bd_status) {
1805 case BIOC_SDONLINE:
1806 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1807 edata->state = ENVSYS_SVALID;
1808 break;
1809 case BIOC_SDOFFLINE:
1810 edata->value_cur = ENVSYS_DRIVE_OFFLINE;
1811 edata->state = ENVSYS_SCRITICAL;
1812 break;
1813 default:
1814 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1815 edata->state = ENVSYS_SCRITICAL;
1816 break;
1817 }
1818
1819 return;
1820 }
1821
1822 /* Current sensor is handling a volume */
1823 switch (bv.bv_status) {
1824 case BIOC_SVOFFLINE:
1825 edata->value_cur = ENVSYS_DRIVE_FAIL;
1826 edata->state = ENVSYS_SCRITICAL;
1827 break;
1828 case BIOC_SVDEGRADED:
1829 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1830 edata->state = ENVSYS_SCRITICAL;
1831 break;
1832 case BIOC_SVBUILDING:
1833 edata->value_cur = ENVSYS_DRIVE_BUILD;
1834 edata->state = ENVSYS_SVALID;
1835 break;
1836 case BIOC_SVMIGRATING:
1837 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1838 edata->state = ENVSYS_SVALID;
1839 break;
1840 case BIOC_SVCHECKING:
1841 edata->value_cur = ENVSYS_DRIVE_CHECK;
1842 edata->state = ENVSYS_SVALID;
1843 break;
1844 case BIOC_SVSCRUB:
1845 case BIOC_SVONLINE:
1846 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1847 edata->state = ENVSYS_SVALID;
1848 break;
1849 case BIOC_SVINVALID:
1850 /* FALLTHROUGH */
1851 default:
1852 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1853 edata->state = ENVSYS_SINVALID;
1854 break;
1855 }
1856 }
1857 #endif /* NBIO > 0 */
1858
1859 uint32_t
1860 arc_read(struct arc_softc *sc, bus_size_t r)
1861 {
1862 uint32_t v;
1863
1864 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1865 BUS_SPACE_BARRIER_READ);
1866 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1867
1868 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1869 device_xname(&sc->sc_dev), r, v);
1870
1871 return v;
1872 }
1873
1874 void
1875 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1876 {
1877 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1878 BUS_SPACE_BARRIER_READ);
1879 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1880 (uint32_t *)buf, len >> 2);
1881 }
1882
1883 void
1884 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1885 {
1886 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1887 device_xname(&sc->sc_dev), r, v);
1888
1889 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1890 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1891 BUS_SPACE_BARRIER_WRITE);
1892 }
1893
1894 void
1895 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1896 {
1897 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1898 (const uint32_t *)buf, len >> 2);
1899 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1900 BUS_SPACE_BARRIER_WRITE);
1901 }
1902
1903 int
1904 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1905 uint32_t target)
1906 {
1907 int i;
1908
1909 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1910 device_xname(&sc->sc_dev), r, mask, target);
1911
1912 for (i = 0; i < 10000; i++) {
1913 if ((arc_read(sc, r) & mask) == target)
1914 return 0;
1915 delay(1000);
1916 }
1917
1918 return 1;
1919 }
1920
1921 int
1922 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1923 uint32_t target)
1924 {
1925 int i;
1926
1927 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1928 device_xname(&sc->sc_dev), r, mask, target);
1929
1930 for (i = 0; i < 10000; i++) {
1931 if ((arc_read(sc, r) & mask) != target)
1932 return 0;
1933 delay(1000);
1934 }
1935
1936 return 1;
1937 }
1938
1939 int
1940 arc_msg0(struct arc_softc *sc, uint32_t m)
1941 {
1942 /* post message */
1943 arc_write(sc, ARC_REG_INB_MSG0, m);
1944 /* wait for the fw to do it */
1945 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1946 ARC_REG_INTRSTAT_MSG0) != 0)
1947 return 1;
1948
1949 /* ack it */
1950 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1951
1952 return 0;
1953 }
1954
1955 struct arc_dmamem *
1956 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1957 {
1958 struct arc_dmamem *adm;
1959 int nsegs;
1960
1961 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1962 if (adm == NULL)
1963 return NULL;
1964
1965 adm->adm_size = size;
1966
1967 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1968 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1969 goto admfree;
1970
1971 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1972 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1973 goto destroy;
1974
1975 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1976 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1977 goto free;
1978
1979 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1980 NULL, BUS_DMA_NOWAIT) != 0)
1981 goto unmap;
1982
1983 memset(adm->adm_kva, 0, size);
1984
1985 return adm;
1986
1987 unmap:
1988 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1989 free:
1990 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1991 destroy:
1992 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1993 admfree:
1994 kmem_free(adm, sizeof(*adm));
1995
1996 return NULL;
1997 }
1998
1999 void
2000 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
2001 {
2002 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
2003 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
2004 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
2005 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
2006 kmem_free(adm, sizeof(*adm));
2007 }
2008
2009 int
2010 arc_alloc_ccbs(struct arc_softc *sc)
2011 {
2012 struct arc_ccb *ccb;
2013 uint8_t *cmd;
2014 int i;
2015 size_t ccbslen;
2016
2017 TAILQ_INIT(&sc->sc_ccb_free);
2018
2019 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
2020 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
2021
2022 sc->sc_requests = arc_dmamem_alloc(sc,
2023 ARC_MAX_IOCMDLEN * sc->sc_req_count);
2024 if (sc->sc_requests == NULL) {
2025 aprint_error("%s: unable to allocate ccb dmamem\n",
2026 device_xname(&sc->sc_dev));
2027 goto free_ccbs;
2028 }
2029 cmd = ARC_DMA_KVA(sc->sc_requests);
2030
2031 for (i = 0; i < sc->sc_req_count; i++) {
2032 ccb = &sc->sc_ccbs[i];
2033
2034 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
2035 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
2036 aprint_error("%s: unable to create dmamap for ccb %d\n",
2037 device_xname(&sc->sc_dev), i);
2038 goto free_maps;
2039 }
2040
2041 ccb->ccb_sc = sc;
2042 ccb->ccb_id = i;
2043 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
2044
2045 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
2046 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
2047 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
2048
2049 arc_put_ccb(sc, ccb);
2050 }
2051
2052 return 0;
2053
2054 free_maps:
2055 while ((ccb = arc_get_ccb(sc)) != NULL)
2056 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2057 arc_dmamem_free(sc, sc->sc_requests);
2058
2059 free_ccbs:
2060 kmem_free(sc->sc_ccbs, ccbslen);
2061
2062 return 1;
2063 }
2064
2065 struct arc_ccb *
2066 arc_get_ccb(struct arc_softc *sc)
2067 {
2068 struct arc_ccb *ccb;
2069
2070 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
2071 if (ccb != NULL)
2072 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
2073
2074 return ccb;
2075 }
2076
2077 void
2078 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
2079 {
2080 ccb->ccb_xs = NULL;
2081 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
2082 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
2083 }
2084