arcmsr.c revision 1.9 1 /* $NetBSD: arcmsr.c,v 1.9 2008/01/02 23:48:05 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.9 2008/01/02 23:48:05 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/kthread.h>
33 #include <sys/mutex.h>
34 #include <sys/condvar.h>
35 #include <sys/rwlock.h>
36
37 #if NBIO > 0
38 #include <sys/ioctl.h>
39 #include <dev/biovar.h>
40 #endif
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/scsipi/scsipi_all.h>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsiconf.h>
49
50 #include <dev/sysmon/sysmonvar.h>
51
52 #include <sys/bus.h>
53
54 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
55
56 #include <dev/pci/arcmsrvar.h>
57
58 /* #define ARC_DEBUG */
59 #ifdef ARC_DEBUG
60 #define ARC_D_INIT (1<<0)
61 #define ARC_D_RW (1<<1)
62 #define ARC_D_DB (1<<2)
63
64 int arcdebug = 0;
65
66 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
67 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
68
69 #else
70 #define DPRINTF(p...) /* p */
71 #define DNPRINTF(n, p...) /* n, p */
72 #endif
73
74 /*
75 * the fw header must always equal this.
76 */
77 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
78
79 /*
80 * autoconf(9) glue.
81 */
82 static int arc_match(device_t, struct cfdata *, void *);
83 static void arc_attach(device_t, device_t, void *);
84 static int arc_detach(device_t, int);
85 static void arc_shutdown(void *);
86 static int arc_intr(void *);
87 static void arc_minphys(struct buf *);
88
89 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
90 arc_match, arc_attach, arc_detach, NULL);
91
92 /*
93 * bio(4) and sysmon_envsys(9) glue.
94 */
95 #if NBIO > 0
96 static int arc_bioctl(struct device *, u_long, void *);
97 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
98 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
99 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
100 static int arc_bio_disk_novol(struct arc_softc *, struct bioc_disk *);
101 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
102 struct arc_fw_diskinfo *, int);
103 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
104 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
105 static int arc_bio_getvol(struct arc_softc *, int,
106 struct arc_fw_volinfo *);
107 static int arc_bio_setstate(struct arc_softc *, struct bioc_setstate *);
108 static int arc_bio_volops(struct arc_softc *, struct bioc_volops *);
109 static void arc_create_sensors(void *);
110 static void arc_refresh_sensors(struct sysmon_envsys *, envsys_data_t *);
111 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
112 #endif
113
114 static int
115 arc_match(device_t parent, struct cfdata *match, void *aux)
116 {
117 struct pci_attach_args *pa = aux;
118
119 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
120 switch (PCI_PRODUCT(pa->pa_id)) {
121 case PCI_PRODUCT_ARECA_ARC1110:
122 case PCI_PRODUCT_ARECA_ARC1120:
123 case PCI_PRODUCT_ARECA_ARC1130:
124 case PCI_PRODUCT_ARECA_ARC1160:
125 case PCI_PRODUCT_ARECA_ARC1170:
126 case PCI_PRODUCT_ARECA_ARC1200:
127 case PCI_PRODUCT_ARECA_ARC1202:
128 case PCI_PRODUCT_ARECA_ARC1210:
129 case PCI_PRODUCT_ARECA_ARC1220:
130 case PCI_PRODUCT_ARECA_ARC1230:
131 case PCI_PRODUCT_ARECA_ARC1260:
132 case PCI_PRODUCT_ARECA_ARC1270:
133 case PCI_PRODUCT_ARECA_ARC1280:
134 case PCI_PRODUCT_ARECA_ARC1380:
135 case PCI_PRODUCT_ARECA_ARC1381:
136 case PCI_PRODUCT_ARECA_ARC1680:
137 case PCI_PRODUCT_ARECA_ARC1681:
138 return 1;
139 default:
140 break;
141 }
142 }
143
144 return 0;
145 }
146
147 static void
148 arc_attach(device_t parent, device_t self, void *aux)
149 {
150 struct arc_softc *sc = device_private(self);
151 struct pci_attach_args *pa = aux;
152 struct scsipi_adapter *adapt = &sc->sc_adapter;
153 struct scsipi_channel *chan = &sc->sc_chan;
154
155 sc->sc_talking = 0;
156 rw_init(&sc->sc_rwlock);
157 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
158 cv_init(&sc->sc_condvar, "arcdb");
159
160 if (arc_map_pci_resources(sc, pa) != 0) {
161 /* error message printed by arc_map_pci_resources */
162 return;
163 }
164
165 if (arc_query_firmware(sc) != 0) {
166 /* error message printed by arc_query_firmware */
167 goto unmap_pci;
168 }
169
170 if (arc_alloc_ccbs(sc) != 0) {
171 /* error message printed by arc_alloc_ccbs */
172 goto unmap_pci;
173 }
174
175 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
176 if (sc->sc_shutdownhook == NULL)
177 panic("unable to establish arc powerhook");
178
179 memset(adapt, 0, sizeof(*adapt));
180 adapt->adapt_dev = self;
181 adapt->adapt_nchannels = 1;
182 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
183 adapt->adapt_max_periph = adapt->adapt_openings;
184 adapt->adapt_minphys = arc_minphys;
185 adapt->adapt_request = arc_scsi_cmd;
186
187 memset(chan, 0, sizeof(*chan));
188 chan->chan_adapter = adapt;
189 chan->chan_bustype = &scsi_bustype;
190 chan->chan_nluns = ARC_MAX_LUN;
191 chan->chan_ntargets = ARC_MAX_TARGET;
192 chan->chan_id = ARC_MAX_TARGET;
193 chan->chan_channel = 0;
194 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
195
196 /*
197 * Save the device_t returned, because we could to attach
198 * devices via the management interface.
199 */
200 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
201
202 /* enable interrupts */
203 arc_write(sc, ARC_REG_INTRMASK,
204 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
205
206 #if NBIO > 0
207 /*
208 * Register the driver to bio(4) and setup the sensors.
209 */
210 if (bio_register(self, arc_bioctl) != 0)
211 panic("%s: bioctl registration failed\n", device_xname(self));
212
213 /*
214 * you need to talk to the firmware to get volume info. our firmware
215 * interface relies on being able to sleep, so we need to use a thread
216 * to do the work.
217 */
218 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
219 arc_create_sensors, sc, &sc->sc_lwp, "arcmsr_sensors") != 0)
220 panic("%s: unable to create a kernel thread for sensors\n",
221 device_xname(self));
222 #endif
223
224 return;
225
226 unmap_pci:
227 arc_unmap_pci_resources(sc);
228 }
229
230 static int
231 arc_detach(device_t self, int flags)
232 {
233 struct arc_softc *sc = device_private(self);
234
235 shutdownhook_disestablish(sc->sc_shutdownhook);
236
237 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
238 aprint_error("%s: timeout waiting to stop bg rebuild\n",
239 device_xname(&sc->sc_dev));
240
241 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
242 aprint_error("%s: timeout waiting to flush cache\n",
243 device_xname(&sc->sc_dev));
244
245 return 0;
246 }
247
248 static void
249 arc_shutdown(void *xsc)
250 {
251 struct arc_softc *sc = xsc;
252
253 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
254 aprint_error("%s: timeout waiting to stop bg rebuild\n",
255 device_xname(&sc->sc_dev));
256
257 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
258 aprint_error("%s: timeout waiting to flush cache\n",
259 device_xname(&sc->sc_dev));
260 }
261
262 static void
263 arc_minphys(struct buf *bp)
264 {
265 if (bp->b_bcount > MAXPHYS)
266 bp->b_bcount = MAXPHYS;
267 minphys(bp);
268 }
269
270 static int
271 arc_intr(void *arg)
272 {
273 struct arc_softc *sc = arg;
274 struct arc_ccb *ccb = NULL;
275 char *kva = ARC_DMA_KVA(sc->sc_requests);
276 struct arc_io_cmd *cmd;
277 uint32_t reg, intrstat;
278
279 mutex_spin_enter(&sc->sc_mutex);
280 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
281 if (intrstat == 0x0) {
282 mutex_spin_exit(&sc->sc_mutex);
283 return 0;
284 }
285
286 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
287 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
288
289 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
290 if (sc->sc_talking) {
291 arc_write(sc, ARC_REG_INTRMASK,
292 ~ARC_REG_INTRMASK_POSTQUEUE);
293 cv_broadcast(&sc->sc_condvar);
294 } else {
295 /* otherwise drop it */
296 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
297 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
298 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
299 arc_write(sc, ARC_REG_INB_DOORBELL,
300 ARC_REG_INB_DOORBELL_READ_OK);
301 }
302 }
303 mutex_spin_exit(&sc->sc_mutex);
304
305 while ((reg = arc_pop(sc)) != 0xffffffff) {
306 cmd = (struct arc_io_cmd *)(kva +
307 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
308 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
309 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
310
311 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
312 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314
315 arc_scsi_cmd_done(sc, ccb, reg);
316 }
317
318
319 return 1;
320 }
321
322 void
323 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
324 {
325 struct scsipi_periph *periph;
326 struct scsipi_xfer *xs;
327 struct scsipi_adapter *adapt = chan->chan_adapter;
328 struct arc_softc *sc = device_private(adapt->adapt_dev);
329 struct arc_ccb *ccb;
330 struct arc_msg_scsicmd *cmd;
331 uint32_t reg;
332 uint8_t target;
333
334 switch (req) {
335 case ADAPTER_REQ_GROW_RESOURCES:
336 /* Not supported. */
337 return;
338 case ADAPTER_REQ_SET_XFER_MODE:
339 /* Not supported. */
340 return;
341 case ADAPTER_REQ_RUN_XFER:
342 break;
343 }
344
345 mutex_spin_enter(&sc->sc_mutex);
346
347 xs = arg;
348 periph = xs->xs_periph;
349 target = periph->periph_target;
350
351 if (xs->cmdlen > ARC_MSG_CDBLEN) {
352 memset(&xs->sense, 0, sizeof(xs->sense));
353 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
354 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
355 xs->sense.scsi_sense.asc = 0x20;
356 xs->error = XS_SENSE;
357 xs->status = SCSI_CHECK;
358 mutex_spin_exit(&sc->sc_mutex);
359 scsipi_done(xs);
360 return;
361 }
362
363 ccb = arc_get_ccb(sc);
364 if (ccb == NULL) {
365 xs->error = XS_RESOURCE_SHORTAGE;
366 mutex_spin_exit(&sc->sc_mutex);
367 scsipi_done(xs);
368 return;
369 }
370
371 ccb->ccb_xs = xs;
372
373 if (arc_load_xs(ccb) != 0) {
374 xs->error = XS_DRIVER_STUFFUP;
375 arc_put_ccb(sc, ccb);
376 mutex_spin_exit(&sc->sc_mutex);
377 scsipi_done(xs);
378 return;
379 }
380
381 cmd = &ccb->ccb_cmd->cmd;
382 reg = ccb->ccb_cmd_post;
383
384 /* bus is always 0 */
385 cmd->target = target;
386 cmd->lun = periph->periph_lun;
387 cmd->function = 1; /* XXX magic number */
388
389 cmd->cdb_len = xs->cmdlen;
390 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
391 if (xs->xs_control & XS_CTL_DATA_OUT)
392 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
393 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
394 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
395 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
396 }
397
398 cmd->context = htole32(ccb->ccb_id);
399 cmd->data_len = htole32(xs->datalen);
400
401 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
402
403 /* we've built the command, let's put it on the hw */
404 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
405 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
406 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
407
408 arc_push(sc, reg);
409 if (xs->xs_control & XS_CTL_POLL) {
410 if (arc_complete(sc, ccb, xs->timeout) != 0) {
411 xs->error = XS_DRIVER_STUFFUP;
412 mutex_spin_exit(&sc->sc_mutex);
413 scsipi_done(xs);
414 return;
415 }
416 }
417
418 mutex_spin_exit(&sc->sc_mutex);
419 }
420
421 int
422 arc_load_xs(struct arc_ccb *ccb)
423 {
424 struct arc_softc *sc = ccb->ccb_sc;
425 struct scsipi_xfer *xs = ccb->ccb_xs;
426 bus_dmamap_t dmap = ccb->ccb_dmamap;
427 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
428 uint64_t addr;
429 int i, error;
430
431 if (xs->datalen == 0)
432 return 0;
433
434 error = bus_dmamap_load(sc->sc_dmat, dmap,
435 xs->data, xs->datalen, NULL,
436 (xs->xs_control & XS_CTL_NOSLEEP) ?
437 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
438 if (error != 0) {
439 aprint_error("%s: error %d loading dmamap\n",
440 device_xname(&sc->sc_dev), error);
441 return 1;
442 }
443
444 for (i = 0; i < dmap->dm_nsegs; i++) {
445 sge = &sgl[i];
446
447 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
448 addr = dmap->dm_segs[i].ds_addr;
449 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
450 sge->sg_lo_addr = htole32((uint32_t)addr);
451 }
452
453 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
454 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
455 BUS_DMASYNC_PREWRITE);
456
457 return 0;
458 }
459
460 void
461 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
462 {
463 struct scsipi_xfer *xs = ccb->ccb_xs;
464 struct arc_msg_scsicmd *cmd;
465
466 if (xs->datalen != 0) {
467 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
468 ccb->ccb_dmamap->dm_mapsize,
469 (xs->xs_control & XS_CTL_DATA_IN) ?
470 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
471 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
472 }
473
474 /* timeout_del */
475 xs->status |= XS_STS_DONE;
476
477 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
478 cmd = &ccb->ccb_cmd->cmd;
479
480 switch (cmd->status) {
481 case ARC_MSG_STATUS_SELTIMEOUT:
482 case ARC_MSG_STATUS_ABORTED:
483 case ARC_MSG_STATUS_INIT_FAIL:
484 xs->status = SCSI_OK;
485 xs->error = XS_SELTIMEOUT;
486 break;
487
488 case SCSI_CHECK:
489 memset(&xs->sense, 0, sizeof(xs->sense));
490 memcpy(&xs->sense, cmd->sense_data,
491 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
492 xs->sense.scsi_sense.response_code =
493 SSD_RCODE_VALID | 0x70;
494 xs->status = SCSI_CHECK;
495 xs->error = XS_SENSE;
496 xs->resid = 0;
497 break;
498
499 default:
500 /* unknown device status */
501 xs->error = XS_BUSY; /* try again later? */
502 xs->status = SCSI_BUSY;
503 break;
504 }
505 } else {
506 xs->status = SCSI_OK;
507 xs->error = XS_NOERROR;
508 xs->resid = 0;
509 }
510
511 arc_put_ccb(sc, ccb);
512 scsipi_done(xs);
513 }
514
515 int
516 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
517 {
518 struct arc_ccb *ccb = NULL;
519 char *kva = ARC_DMA_KVA(sc->sc_requests);
520 struct arc_io_cmd *cmd;
521 uint32_t reg;
522
523 do {
524 reg = arc_pop(sc);
525 if (reg == 0xffffffff) {
526 if (timeout-- == 0)
527 return 1;
528
529 delay(1000);
530 continue;
531 }
532
533 cmd = (struct arc_io_cmd *)(kva +
534 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
535 ARC_DMA_DVA(sc->sc_requests)));
536 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
537
538 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
539 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541
542 arc_scsi_cmd_done(sc, ccb, reg);
543 } while (nccb != ccb);
544
545 return 0;
546 }
547
548 int
549 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
550 {
551 pcireg_t memtype;
552 pci_intr_handle_t ih;
553
554 sc->sc_pc = pa->pa_pc;
555 sc->sc_tag = pa->pa_tag;
556 sc->sc_dmat = pa->pa_dmat;
557
558 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
559 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
560 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
561 aprint_error(": unable to map system interface register\n");
562 return 1;
563 }
564
565 if (pci_intr_map(pa, &ih) != 0) {
566 aprint_error(": unable to map interrupt\n");
567 goto unmap;
568 }
569
570 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
571 arc_intr, sc);
572 if (sc->sc_ih == NULL) {
573 aprint_error(": unable to map interrupt [2]\n");
574 goto unmap;
575 }
576 aprint_normal(": interrupting at %s\n",
577 pci_intr_string(pa->pa_pc, ih));
578
579 return 0;
580
581 unmap:
582 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
583 sc->sc_ios = 0;
584 return 1;
585 }
586
587 void
588 arc_unmap_pci_resources(struct arc_softc *sc)
589 {
590 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
591 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
592 sc->sc_ios = 0;
593 }
594
595 int
596 arc_query_firmware(struct arc_softc *sc)
597 {
598 struct arc_msg_firmware_info fwinfo;
599 char string[81]; /* sizeof(vendor)*2+1 */
600
601 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
602 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
603 aprint_debug("%s: timeout waiting for firmware ok\n",
604 device_xname(&sc->sc_dev));
605 return 1;
606 }
607
608 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
609 aprint_debug("%s: timeout waiting for get config\n",
610 device_xname(&sc->sc_dev));
611 return 1;
612 }
613
614 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
615 aprint_debug("%s: timeout waiting to start bg rebuild\n",
616 device_xname(&sc->sc_dev));
617 return 1;
618 }
619
620 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
621
622 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
623 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
624
625 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
626 aprint_error("%s: invalid firmware info from iop\n",
627 device_xname(&sc->sc_dev));
628 return 1;
629 }
630
631 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
632 device_xname(&sc->sc_dev),
633 htole32(fwinfo.request_len));
634 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
635 device_xname(&sc->sc_dev),
636 htole32(fwinfo.queue_len));
637 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
638 device_xname(&sc->sc_dev),
639 htole32(fwinfo.sdram_size));
640 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
641 device_xname(&sc->sc_dev),
642 htole32(fwinfo.sata_ports));
643
644 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
645 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
646 device_xname(&sc->sc_dev), string);
647
648 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
649 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
650 device_xname(&sc->sc_dev), string);
651
652 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
653 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
654 device_xname(&sc->sc_dev), string);
655
656 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
657 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
658 htole32(fwinfo.sdram_size), string);
659
660 /* save the number of max disks for future use */
661 sc->sc_maxdisks = htole32(fwinfo.sata_ports);
662
663 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
664 aprint_error("%s: unexpected request frame size (%d != %d)\n",
665 device_xname(&sc->sc_dev),
666 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
667 return 1;
668 }
669
670 sc->sc_req_count = htole32(fwinfo.queue_len);
671
672 return 0;
673 }
674
675 #if NBIO > 0
676 static int
677 arc_bioctl(struct device *self, u_long cmd, void *addr)
678 {
679 struct arc_softc *sc = device_private(self);
680 int error = 0;
681
682 switch (cmd) {
683 case BIOCINQ:
684 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
685 break;
686
687 case BIOCVOL:
688 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
689 break;
690
691 case BIOCDISK:
692 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
693 break;
694
695 case BIOCDISK_NOVOL:
696 error = arc_bio_disk_novol(sc, (struct bioc_disk *)addr);
697 break;
698
699 case BIOCALARM:
700 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
701 break;
702
703 case BIOCSETSTATE:
704 error = arc_bio_setstate(sc, (struct bioc_setstate *)addr);
705 break;
706
707 case BIOCVOLOPS:
708 error = arc_bio_volops(sc, (struct bioc_volops *)addr);
709 break;
710
711 default:
712 error = ENOTTY;
713 break;
714 }
715
716 return error;
717 }
718
719 static int
720 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
721 {
722 switch (*reply) {
723 case ARC_FW_CMD_RAIDINVAL:
724 printf("%s: firmware error (invalid raid set)\n",
725 device_xname(&sc->sc_dev));
726 return EINVAL;
727 case ARC_FW_CMD_VOLINVAL:
728 printf("%s: firmware error (invalid volume set)\n",
729 device_xname(&sc->sc_dev));
730 return EINVAL;
731 case ARC_FW_CMD_NORAID:
732 printf("%s: firmware error (unexistent raid set)\n",
733 device_xname(&sc->sc_dev));
734 return ENODEV;
735 case ARC_FW_CMD_NOVOLUME:
736 printf("%s: firmware error (unexistent volume set)\n",
737 device_xname(&sc->sc_dev));
738 return ENODEV;
739 case ARC_FW_CMD_NOPHYSDRV:
740 printf("%s: firmware error (unexistent physical drive)\n",
741 device_xname(&sc->sc_dev));
742 return ENODEV;
743 case ARC_FW_CMD_PARAM_ERR:
744 printf("%s: firmware error (parameter error)\n",
745 device_xname(&sc->sc_dev));
746 return EINVAL;
747 case ARC_FW_CMD_UNSUPPORTED:
748 printf("%s: firmware error (unsupported command)\n",
749 device_xname(&sc->sc_dev));
750 return EOPNOTSUPP;
751 case ARC_FW_CMD_DISKCFG_CHGD:
752 printf("%s: firmware error (disk configuration changed)\n",
753 device_xname(&sc->sc_dev));
754 return EINVAL;
755 case ARC_FW_CMD_PASS_INVAL:
756 printf("%s: firmware error (invalid password)\n",
757 device_xname(&sc->sc_dev));
758 return EINVAL;
759 case ARC_FW_CMD_NODISKSPACE:
760 printf("%s: firmware error (no disk space available)\n",
761 device_xname(&sc->sc_dev));
762 return EOPNOTSUPP;
763 case ARC_FW_CMD_CHECKSUM_ERR:
764 printf("%s: firmware error (checksum error)\n",
765 device_xname(&sc->sc_dev));
766 return EINVAL;
767 case ARC_FW_CMD_PASS_REQD:
768 printf("%s: firmware error (password required)\n",
769 device_xname(&sc->sc_dev));
770 return EPERM;
771 case ARC_FW_CMD_OK:
772 default:
773 return 0;
774 }
775 }
776
777 static int
778 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
779 {
780 uint8_t request[2], reply[1];
781 size_t len;
782 int error = 0;
783
784 switch (ba->ba_opcode) {
785 case BIOC_SAENABLE:
786 case BIOC_SADISABLE:
787 request[0] = ARC_FW_SET_ALARM;
788 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
789 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
790 len = sizeof(request);
791
792 break;
793
794 case BIOC_SASILENCE:
795 request[0] = ARC_FW_MUTE_ALARM;
796 len = 1;
797
798 break;
799
800 case BIOC_GASTATUS:
801 /* system info is too big/ugly to deal with here */
802 return arc_bio_alarm_state(sc, ba);
803
804 default:
805 return EOPNOTSUPP;
806 }
807
808 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
809 if (error != 0)
810 return error;
811
812 return arc_fw_parse_status_code(sc, &reply[0]);
813 }
814
815 static int
816 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
817 {
818 struct arc_fw_sysinfo *sysinfo;
819 uint8_t request;
820 int error = 0;
821
822 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
823
824 request = ARC_FW_SYSINFO;
825 error = arc_msgbuf(sc, &request, sizeof(request),
826 sysinfo, sizeof(struct arc_fw_sysinfo));
827
828 if (error != 0)
829 goto out;
830
831 ba->ba_status = sysinfo->alarm;
832
833 out:
834 kmem_free(sysinfo, sizeof(*sysinfo));
835 return error;
836 }
837
838 static int
839 arc_bio_volops(struct arc_softc *sc, struct bioc_volops *bc)
840 {
841 /* to create a raid set */
842 struct req_craidset {
843 uint8_t cmdcode;
844 uint32_t devmask;
845 uint8_t raidset_name[16];
846 } __packed;
847
848 /* to create a volume set */
849 struct req_cvolset {
850 uint8_t cmdcode;
851 uint8_t raidset;
852 uint8_t volset_name[16];
853 uint64_t capacity;
854 uint8_t raidlevel;
855 uint8_t stripe;
856 uint8_t scsi_chan;
857 uint8_t scsi_target;
858 uint8_t scsi_lun;
859 uint8_t tagqueue;
860 uint8_t cache;
861 uint8_t speed;
862 uint8_t quick_init;
863 } __packed;
864
865 struct scsibus_softc *scsibus_sc = NULL;
866 struct req_craidset req_craidset;
867 struct req_cvolset req_cvolset;
868 uint8_t request[2];
869 uint8_t reply[1];
870 int error = 0;
871
872 switch (bc->bc_opcode) {
873 case BIOC_VCREATE_VOLUME:
874 {
875 /*
876 * Zero out the structs so that we use some defaults
877 * in raid and volume sets.
878 */
879 memset(&req_craidset, 0, sizeof(req_craidset));
880 memset(&req_cvolset, 0, sizeof(req_cvolset));
881
882 /*
883 * Firstly we have to create the raid set and
884 * use the default name for all them.
885 */
886 req_craidset.cmdcode = ARC_FW_CREATE_RAIDSET;
887 req_craidset.devmask = bc->bc_devmask;
888 error = arc_msgbuf(sc, &req_craidset, sizeof(req_craidset),
889 reply, sizeof(reply));
890 if (error != 0)
891 return error;
892
893 error = arc_fw_parse_status_code(sc, &reply[0]);
894 if (error) {
895 printf("%s: create raidset%d failed\n",
896 device_xname(&sc->sc_dev), bc->bc_volid);
897 return error;
898 }
899
900 /*
901 * At this point the raid set was created, so it's
902 * time to create the volume set.
903 */
904 req_cvolset.cmdcode = ARC_FW_CREATE_VOLUME;
905 req_cvolset.raidset = bc->bc_volid;
906 req_cvolset.capacity = bc->bc_size * ARC_BLOCKSIZE;
907
908 /*
909 * Set the RAID level.
910 */
911 switch (bc->bc_level) {
912 case 0:
913 case 1:
914 req_cvolset.raidlevel = bc->bc_level;
915 break;
916 case 3:
917 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_3;
918 break;
919 case 5:
920 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_5;
921 break;
922 case 6:
923 req_cvolset.raidlevel = ARC_FW_VOL_RAIDLEVEL_6;
924 break;
925 default:
926 return EOPNOTSUPP;
927 }
928
929 /*
930 * Set the stripe size.
931 */
932 switch (bc->bc_stripe) {
933 case 4:
934 req_cvolset.stripe = 0;
935 break;
936 case 8:
937 req_cvolset.stripe = 1;
938 break;
939 case 16:
940 req_cvolset.stripe = 2;
941 break;
942 case 32:
943 req_cvolset.stripe = 3;
944 break;
945 case 64:
946 req_cvolset.stripe = 4;
947 break;
948 case 128:
949 req_cvolset.stripe = 5;
950 break;
951 default:
952 req_cvolset.stripe = 4; /* by default 64K */
953 break;
954 }
955
956 req_cvolset.scsi_chan = bc->bc_channel;
957 req_cvolset.scsi_target = bc->bc_target;
958 req_cvolset.scsi_lun = bc->bc_lun;
959 req_cvolset.tagqueue = 1; /* always enabled */
960 req_cvolset.cache = 1; /* always enabled */
961 req_cvolset.speed = 4; /* always max speed */
962
963 error = arc_msgbuf(sc, &req_cvolset, sizeof(req_cvolset),
964 reply, sizeof(reply));
965 if (error != 0)
966 return error;
967
968 error = arc_fw_parse_status_code(sc, &reply[0]);
969 if (error) {
970 printf("%s: create volumeset%d failed\n",
971 device_xname(&sc->sc_dev), bc->bc_volid);
972 return error;
973 }
974
975 /*
976 * Do a rescan on the bus to attach the device associated
977 * with the new volume.
978 */
979 scsibus_sc = device_private(sc->sc_scsibus_dv);
980 (void)scsi_probe_bus(scsibus_sc, bc->bc_target, bc->bc_lun);
981
982 break;
983 }
984 case BIOC_VREMOVE_VOLUME:
985 {
986 /*
987 * Remove the volume set specified in bc_volid.
988 */
989 request[0] = ARC_FW_DELETE_VOLUME;
990 request[1] = bc->bc_volid;
991 error = arc_msgbuf(sc, request, sizeof(request),
992 reply, sizeof(reply));
993 if (error != 0)
994 return error;
995
996 error = arc_fw_parse_status_code(sc, &reply[0]);
997 if (error) {
998 printf("%s: delete volumeset%d failed\n",
999 device_xname(&sc->sc_dev), bc->bc_volid);
1000 return error;
1001 }
1002
1003 /*
1004 * Detach the sd(4) device associated with the volume,
1005 * but if there's an error don't make it a priority.
1006 */
1007 error = scsipi_target_detach(&sc->sc_chan, bc->bc_target,
1008 bc->bc_lun, 0);
1009 if (error)
1010 printf("%s: couldn't detach sd device for volume %d "
1011 "at %u:%u.%u (error=%d)\n",
1012 device_xname(&sc->sc_dev), bc->bc_volid,
1013 bc->bc_channel, bc->bc_target, bc->bc_lun, error);
1014
1015 /*
1016 * and remove the raid set specified in bc_volid,
1017 * we only care about volumes.
1018 */
1019 request[0] = ARC_FW_DELETE_RAIDSET;
1020 request[1] = bc->bc_volid;
1021 error = arc_msgbuf(sc, request, sizeof(request),
1022 reply, sizeof(reply));
1023 if (error != 0)
1024 return error;
1025
1026 error = arc_fw_parse_status_code(sc, &reply[0]);
1027 if (error) {
1028 printf("%s: delete raidset%d failed\n",
1029 device_xname(&sc->sc_dev), bc->bc_volid);
1030 return error;
1031 }
1032
1033 break;
1034 }
1035 default:
1036 return EOPNOTSUPP;
1037 }
1038
1039 return error;
1040 }
1041
1042 static int
1043 arc_bio_setstate(struct arc_softc *sc, struct bioc_setstate *bs)
1044 {
1045 /* for a hotspare disk */
1046 struct request_hs {
1047 uint8_t cmdcode;
1048 uint32_t devmask;
1049 } __packed;
1050
1051 /* for a pass-through disk */
1052 struct request_pt {
1053 uint8_t cmdcode;
1054 uint8_t devid;
1055 uint8_t scsi_chan;
1056 uint8_t scsi_id;
1057 uint8_t scsi_lun;
1058 uint8_t tagged_queue;
1059 uint8_t cache_mode;
1060 uint8_t max_speed;
1061 } __packed;
1062
1063 struct scsibus_softc *scsibus_sc = NULL;
1064 struct request_hs req_hs; /* to add/remove hotspare */
1065 struct request_pt req_pt; /* to add a pass-through */
1066 uint8_t req_gen[2];
1067 uint8_t reply[1];
1068 int error = 0;
1069
1070 switch (bs->bs_status) {
1071 case BIOC_SSHOTSPARE:
1072 {
1073 req_hs.cmdcode = ARC_FW_CREATE_HOTSPARE;
1074 req_hs.devmask = (1 << bs->bs_target);
1075 goto hotspare;
1076 }
1077 case BIOC_SSDELHOTSPARE:
1078 {
1079 req_hs.cmdcode = ARC_FW_DELETE_HOTSPARE;
1080 req_hs.devmask = (1 << bs->bs_target);
1081 goto hotspare;
1082 }
1083 case BIOC_SSPASSTHRU:
1084 {
1085 req_pt.cmdcode = ARC_FW_CREATE_PASSTHRU;
1086 req_pt.devid = bs->bs_other_id; /* this wants device# */
1087 req_pt.scsi_chan = bs->bs_channel;
1088 req_pt.scsi_id = bs->bs_target;
1089 req_pt.scsi_lun = bs->bs_lun;
1090 req_pt.tagged_queue = 1; /* always enabled */
1091 req_pt.cache_mode = 1; /* always enabled */
1092 req_pt.max_speed = 4; /* always max speed */
1093
1094 error = arc_msgbuf(sc, &req_pt, sizeof(req_pt),
1095 reply, sizeof(reply));
1096 if (error != 0)
1097 return error;
1098
1099 /*
1100 * Do a rescan on the bus to attach the new device
1101 * associated with the pass-through disk.
1102 */
1103 scsibus_sc = device_private(sc->sc_scsibus_dv);
1104 (void)scsi_probe_bus(scsibus_sc, bs->bs_target, bs->bs_lun);
1105
1106 goto out;
1107 }
1108 case BIOC_SSDELPASSTHRU:
1109 {
1110 req_gen[0] = ARC_FW_DELETE_PASSTHRU;
1111 req_gen[1] = bs->bs_target;
1112 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1113 reply, sizeof(reply));
1114 if (error != 0)
1115 return error;
1116
1117 /*
1118 * Detach the sd device associated with this pass-through disk.
1119 */
1120 error = scsipi_target_detach(&sc->sc_chan, bs->bs_target,
1121 bs->bs_lun, 0);
1122 if (error)
1123 printf("%s: couldn't detach sd device for the "
1124 "pass-through disk at %u:%u.%u (error=%d)\n",
1125 device_xname(&sc->sc_dev),
1126 bs->bs_channel, bs->bs_target, bs->bs_lun, error);
1127
1128 goto out;
1129 }
1130 case BIOC_SSCHECKSTART_VOL:
1131 {
1132 req_gen[0] = ARC_FW_START_CHECKVOL;
1133 req_gen[1] = bs->bs_volid;
1134 error = arc_msgbuf(sc, &req_gen, sizeof(req_gen),
1135 reply, sizeof(reply));
1136 if (error != 0)
1137 return error;
1138
1139 goto out;
1140 }
1141 case BIOC_SSCHECKSTOP_VOL:
1142 {
1143 uint8_t req = ARC_FW_STOP_CHECKVOL;
1144 error = arc_msgbuf(sc, &req, 1, reply, sizeof(reply));
1145 if (error != 0)
1146 return error;
1147
1148 goto out;
1149 }
1150 default:
1151 return EOPNOTSUPP;
1152 }
1153
1154 hotspare:
1155 error = arc_msgbuf(sc, &req_hs, sizeof(req_hs),
1156 reply, sizeof(reply));
1157 if (error != 0)
1158 return error;
1159
1160 out:
1161 return arc_fw_parse_status_code(sc, &reply[0]);
1162 }
1163
1164 static int
1165 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
1166 {
1167 uint8_t request[2];
1168 struct arc_fw_sysinfo *sysinfo;
1169 struct arc_fw_raidinfo *raidinfo;
1170 int maxraidset, nvols = 0, i;
1171 int error = 0;
1172
1173 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
1174 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
1175
1176 request[0] = ARC_FW_SYSINFO;
1177 error = arc_msgbuf(sc, request, 1, sysinfo,
1178 sizeof(struct arc_fw_sysinfo));
1179 if (error != 0)
1180 goto out;
1181
1182 maxraidset = sysinfo->max_raid_set;
1183
1184 request[0] = ARC_FW_RAIDINFO;
1185 for (i = 0; i < maxraidset; i++) {
1186 request[1] = i;
1187 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1188 sizeof(struct arc_fw_raidinfo));
1189 if (error != 0)
1190 goto out;
1191
1192 if (raidinfo->volumes)
1193 nvols++;
1194 }
1195
1196 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
1197 bi->bi_novol = nvols;
1198 bi->bi_nodisk = sc->sc_maxdisks;
1199
1200 out:
1201 kmem_free(raidinfo, sizeof(*raidinfo));
1202 kmem_free(sysinfo, sizeof(*sysinfo));
1203 return error;
1204 }
1205
1206 static int
1207 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
1208 {
1209 uint8_t request[2];
1210 struct arc_fw_sysinfo *sysinfo;
1211 int error = 0;
1212 int maxvols, nvols = 0, i;
1213
1214 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
1215
1216 request[0] = ARC_FW_SYSINFO;
1217 error = arc_msgbuf(sc, request, 1, sysinfo,
1218 sizeof(struct arc_fw_sysinfo));
1219 if (error != 0)
1220 goto out;
1221
1222 maxvols = sysinfo->max_volume_set;
1223
1224 request[0] = ARC_FW_VOLINFO;
1225 for (i = 0; i < maxvols; i++) {
1226 request[1] = i;
1227 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
1228 sizeof(struct arc_fw_volinfo));
1229 if (error != 0)
1230 goto out;
1231
1232 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
1233 continue;
1234
1235 if (nvols == vol)
1236 break;
1237
1238 nvols++;
1239 }
1240
1241 if (nvols != vol ||
1242 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
1243 error = ENODEV;
1244 goto out;
1245 }
1246
1247 out:
1248 kmem_free(sysinfo, sizeof(*sysinfo));
1249 return error;
1250 }
1251
1252 static int
1253 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
1254 {
1255 struct arc_fw_volinfo *volinfo;
1256 uint64_t blocks;
1257 uint32_t status;
1258 int error = 0;
1259
1260 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
1261
1262 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
1263 if (error != 0)
1264 goto out;
1265
1266 bv->bv_percent = -1;
1267 bv->bv_seconds = 0;
1268
1269 status = htole32(volinfo->volume_status);
1270 if (status == 0x0) {
1271 if (htole32(volinfo->fail_mask) == 0x0)
1272 bv->bv_status = BIOC_SVONLINE;
1273 else
1274 bv->bv_status = BIOC_SVDEGRADED;
1275 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
1276 bv->bv_status = BIOC_SVDEGRADED;
1277 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
1278 bv->bv_status = BIOC_SVOFFLINE;
1279 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
1280 bv->bv_status = BIOC_SVBUILDING;
1281 bv->bv_percent = htole32(volinfo->progress);
1282 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
1283 bv->bv_status = BIOC_SVREBUILD;
1284 bv->bv_percent = htole32(volinfo->progress);
1285 } else if (status & ARC_FW_VOL_STATUS_MIGRATING) {
1286 bv->bv_status = BIOC_SVMIGRATING;
1287 bv->bv_percent = htole32(volinfo->progress);
1288 } else if (status & ARC_FW_VOL_STATUS_CHECKING) {
1289 bv->bv_status = BIOC_SVCHECKING;
1290 bv->bv_percent = htole32(volinfo->progress);
1291 }
1292
1293 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
1294 blocks += (uint64_t)htole32(volinfo->capacity);
1295 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
1296
1297 switch (volinfo->raid_level) {
1298 case ARC_FW_VOL_RAIDLEVEL_0:
1299 bv->bv_level = 0;
1300 break;
1301 case ARC_FW_VOL_RAIDLEVEL_1:
1302 bv->bv_level = 1;
1303 break;
1304 case ARC_FW_VOL_RAIDLEVEL_3:
1305 bv->bv_level = 3;
1306 break;
1307 case ARC_FW_VOL_RAIDLEVEL_5:
1308 bv->bv_level = 5;
1309 break;
1310 case ARC_FW_VOL_RAIDLEVEL_6:
1311 bv->bv_level = 6;
1312 break;
1313 case ARC_FW_VOL_RAIDLEVEL_PASSTHRU:
1314 bv->bv_level = BIOC_SVOL_PASSTHRU;
1315 break;
1316 default:
1317 bv->bv_level = -1;
1318 break;
1319 }
1320
1321 bv->bv_nodisk = volinfo->member_disks;
1322 bv->bv_stripe_size = volinfo->stripe_size / 2;
1323 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
1324 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
1325 sizeof(volinfo->set_name));
1326
1327 out:
1328 kmem_free(volinfo, sizeof(*volinfo));
1329 return error;
1330 }
1331
1332 static int
1333 arc_bio_disk_novol(struct arc_softc *sc, struct bioc_disk *bd)
1334 {
1335 struct arc_fw_diskinfo *diskinfo;
1336 uint8_t request[2];
1337 int error = 0;
1338
1339 diskinfo = kmem_zalloc(sizeof(struct arc_fw_diskinfo), KM_SLEEP);
1340
1341 if (bd->bd_diskid > sc->sc_maxdisks) {
1342 error = ENODEV;
1343 goto out;
1344 }
1345
1346 request[0] = ARC_FW_DISKINFO;
1347 request[1] = bd->bd_diskid;
1348 error = arc_msgbuf(sc, request, sizeof(request),
1349 diskinfo, sizeof(struct arc_fw_diskinfo));
1350 if (error != 0)
1351 return error;
1352
1353 /* skip disks with no capacity */
1354 if (htole32(diskinfo->capacity) == 0 &&
1355 htole32(diskinfo->capacity2) == 0)
1356 goto out;
1357
1358 bd->bd_disknovol = true;
1359 arc_bio_disk_filldata(sc, bd, diskinfo, bd->bd_diskid);
1360
1361 out:
1362 kmem_free(diskinfo, sizeof(*diskinfo));
1363 return error;
1364 }
1365
1366 static void
1367 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
1368 struct arc_fw_diskinfo *diskinfo, int diskid)
1369 {
1370 uint64_t blocks;
1371 char model[81];
1372 char serial[41];
1373 char rev[17];
1374
1375 switch (htole32(diskinfo->device_state)) {
1376 case ARC_FW_DISK_PASSTHRU:
1377 bd->bd_status = BIOC_SDPASSTHRU;
1378 break;
1379 case ARC_FW_DISK_RAIDMEMBER:
1380 bd->bd_status = BIOC_SDONLINE;
1381 break;
1382 case ARC_FW_DISK_HOTSPARE:
1383 bd->bd_status = BIOC_SDHOTSPARE;
1384 break;
1385 case ARC_FW_DISK_UNUSED:
1386 bd->bd_status = BIOC_SDUNUSED;
1387 break;
1388 default:
1389 printf("%s: unknown disk device_state: 0x%x\n", __func__,
1390 htole32(diskinfo->device_state));
1391 bd->bd_status = BIOC_SDINVALID;
1392 return;
1393 }
1394
1395 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
1396 blocks += (uint64_t)htole32(diskinfo->capacity);
1397 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
1398
1399 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
1400 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
1401 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
1402 sizeof(diskinfo->firmware_rev));
1403
1404 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1405 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1406
1407 #if 0
1408 bd->bd_channel = diskinfo->scsi_attr.channel;
1409 bd->bd_target = diskinfo->scsi_attr.target;
1410 bd->bd_lun = diskinfo->scsi_attr.lun;
1411 #endif
1412
1413 /*
1414 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1415 * the diskid.
1416 */
1417 bd->bd_channel = 0;
1418 bd->bd_target = diskid;
1419 bd->bd_lun = 0;
1420 }
1421
1422 static int
1423 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1424 {
1425 uint8_t request[2];
1426 struct arc_fw_raidinfo *raidinfo;
1427 struct arc_fw_volinfo *volinfo;
1428 struct arc_fw_diskinfo *diskinfo;
1429 int error = 0;
1430
1431 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
1432 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
1433 diskinfo = kmem_zalloc(sizeof(struct arc_fw_diskinfo), KM_SLEEP);
1434
1435 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1436 if (error != 0)
1437 goto out;
1438
1439 request[0] = ARC_FW_RAIDINFO;
1440 request[1] = volinfo->raid_set_number;
1441
1442 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1443 sizeof(struct arc_fw_raidinfo));
1444 if (error != 0)
1445 goto out;
1446
1447 if (bd->bd_diskid > raidinfo->member_devices) {
1448 error = ENODEV;
1449 goto out;
1450 }
1451
1452 request[0] = ARC_FW_DISKINFO;
1453 request[1] = raidinfo->device_array[bd->bd_diskid];
1454 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1455 sizeof(struct arc_fw_diskinfo));
1456 if (error != 0)
1457 goto out;
1458
1459 /* now fill our bio disk with data from the firmware */
1460 arc_bio_disk_filldata(sc, bd, diskinfo,
1461 raidinfo->device_array[bd->bd_diskid]);
1462
1463 out:
1464 kmem_free(raidinfo, sizeof(*raidinfo));
1465 kmem_free(volinfo, sizeof(*volinfo));
1466 kmem_free(diskinfo, sizeof(*diskinfo));
1467 return error;
1468 }
1469 #endif /* NBIO > 0 */
1470
1471 uint8_t
1472 arc_msg_cksum(void *cmd, uint16_t len)
1473 {
1474 uint8_t *buf = cmd;
1475 uint8_t cksum;
1476 int i;
1477
1478 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1479 for (i = 0; i < len; i++)
1480 cksum += buf[i];
1481
1482 return cksum;
1483 }
1484
1485
1486 int
1487 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1488 size_t rbuflen)
1489 {
1490 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1491 uint8_t *wbuf, *rbuf;
1492 int wlen, wdone = 0, rlen, rdone = 0;
1493 struct arc_fw_bufhdr *bufhdr;
1494 uint32_t reg, rwlen;
1495 int error = 0;
1496 #ifdef ARC_DEBUG
1497 int i;
1498 #endif
1499
1500 wbuf = rbuf = NULL;
1501
1502 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1503 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1504
1505 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1506 wbuf = kmem_alloc(wlen, KM_SLEEP);
1507
1508 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1509 rbuf = kmem_alloc(rlen, KM_SLEEP);
1510
1511 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1512 device_xname(&sc->sc_dev), wlen, rlen);
1513
1514 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1515 bufhdr->hdr = arc_fw_hdr;
1516 bufhdr->len = htole16(wbuflen);
1517 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1518 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1519
1520 arc_lock(sc);
1521 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1522 error = EBUSY;
1523 goto out;
1524 }
1525
1526 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1527
1528 do {
1529 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1530 memset(rwbuf, 0, sizeof(rwbuf));
1531 rwlen = (wlen - wdone) % sizeof(rwbuf);
1532 memcpy(rwbuf, &wbuf[wdone], rwlen);
1533
1534 #ifdef ARC_DEBUG
1535 if (arcdebug & ARC_D_DB) {
1536 printf("%s: write %d:",
1537 device_xname(&sc->sc_dev), rwlen);
1538 for (i = 0; i < rwlen; i++)
1539 printf(" 0x%02x", rwbuf[i]);
1540 printf("\n");
1541 }
1542 #endif
1543
1544 /* copy the chunk to the hw */
1545 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1546 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1547 sizeof(rwbuf));
1548
1549 /* say we have a buffer for the hw */
1550 arc_write(sc, ARC_REG_INB_DOORBELL,
1551 ARC_REG_INB_DOORBELL_WRITE_OK);
1552
1553 wdone += rwlen;
1554 }
1555
1556 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1557 arc_wait(sc);
1558
1559 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1560
1561 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1562 device_xname(&sc->sc_dev), reg);
1563
1564 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1565 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1566 if (rwlen > sizeof(rwbuf)) {
1567 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1568 device_xname(&sc->sc_dev));
1569 error = EIO;
1570 goto out;
1571 }
1572
1573 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1574 sizeof(rwbuf));
1575
1576 arc_write(sc, ARC_REG_INB_DOORBELL,
1577 ARC_REG_INB_DOORBELL_READ_OK);
1578
1579 #ifdef ARC_DEBUG
1580 printf("%s: len: %d+%d=%d/%d\n",
1581 device_xname(&sc->sc_dev),
1582 rwlen, rdone, rwlen + rdone, rlen);
1583 if (arcdebug & ARC_D_DB) {
1584 printf("%s: read:",
1585 device_xname(&sc->sc_dev));
1586 for (i = 0; i < rwlen; i++)
1587 printf(" 0x%02x", rwbuf[i]);
1588 printf("\n");
1589 }
1590 #endif
1591
1592 if ((rdone + rwlen) > rlen) {
1593 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1594 device_xname(&sc->sc_dev));
1595 error = EIO;
1596 goto out;
1597 }
1598
1599 memcpy(&rbuf[rdone], rwbuf, rwlen);
1600 rdone += rwlen;
1601 }
1602 } while (rdone != rlen);
1603
1604 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1605 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1606 bufhdr->len != htole16(rbuflen)) {
1607 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1608 device_xname(&sc->sc_dev));
1609 error = EIO;
1610 goto out;
1611 }
1612
1613 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1614
1615 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1616 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1617 device_xname(&sc->sc_dev));
1618 error = EIO;
1619 goto out;
1620 }
1621
1622 out:
1623 arc_unlock(sc);
1624 kmem_free(wbuf, wlen);
1625 kmem_free(rbuf, rlen);
1626
1627 return error;
1628 }
1629
1630 void
1631 arc_lock(struct arc_softc *sc)
1632 {
1633 rw_enter(&sc->sc_rwlock, RW_WRITER);
1634 mutex_spin_enter(&sc->sc_mutex);
1635 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1636 sc->sc_talking = 1;
1637 }
1638
1639 void
1640 arc_unlock(struct arc_softc *sc)
1641 {
1642 KASSERT(mutex_owned(&sc->sc_mutex));
1643
1644 arc_write(sc, ARC_REG_INTRMASK,
1645 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1646 sc->sc_talking = 0;
1647 mutex_spin_exit(&sc->sc_mutex);
1648 rw_exit(&sc->sc_rwlock);
1649 }
1650
1651 void
1652 arc_wait(struct arc_softc *sc)
1653 {
1654 KASSERT(mutex_owned(&sc->sc_mutex));
1655
1656 arc_write(sc, ARC_REG_INTRMASK,
1657 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1658 if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, hz) == EWOULDBLOCK)
1659 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1660 }
1661
1662 #if NBIO > 0
1663 static void
1664 arc_create_sensors(void *arg)
1665 {
1666 struct arc_softc *sc = arg;
1667 struct bioc_inq bi;
1668 struct bioc_vol bv;
1669 int i;
1670 size_t slen;
1671
1672 memset(&bi, 0, sizeof(bi));
1673 if (arc_bio_inq(sc, &bi) != 0) {
1674 aprint_error("%s: unable to query firmware for sensor info\n",
1675 device_xname(&sc->sc_dev));
1676 kthread_exit(0);
1677 }
1678
1679 sc->sc_nsensors = bi.bi_novol;
1680 /*
1681 * There's no point to continue if there are no drives connected...
1682 */
1683 if (!sc->sc_nsensors)
1684 kthread_exit(0);
1685
1686 sc->sc_sme = sysmon_envsys_create();
1687 slen = sizeof(envsys_data_t) * sc->sc_nsensors;
1688 sc->sc_sensors = kmem_zalloc(slen, KM_SLEEP);
1689
1690 for (i = 0; i < sc->sc_nsensors; i++) {
1691 memset(&bv, 0, sizeof(bv));
1692 bv.bv_volid = i;
1693 if (arc_bio_vol(sc, &bv) != 0)
1694 goto bad;
1695
1696 sc->sc_sensors[i].units = ENVSYS_DRIVE;
1697 sc->sc_sensors[i].monitor = true;
1698 sc->sc_sensors[i].flags = ENVSYS_FMONSTCHANGED;
1699 snprintf(sc->sc_sensors[i].desc, sizeof(sc->sc_sensors[i].desc),
1700 "RAID volume %s", bv.bv_dev);
1701 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensors[i]))
1702 goto bad;
1703 }
1704
1705 sc->sc_sme->sme_name = device_xname(&sc->sc_dev);
1706 sc->sc_sme->sme_cookie = sc;
1707 sc->sc_sme->sme_refresh = arc_refresh_sensors;
1708 if (sysmon_envsys_register(sc->sc_sme)) {
1709 aprint_debug("%s: unable to register with sysmon\n",
1710 device_xname(&sc->sc_dev));
1711 goto bad;
1712 }
1713 kthread_exit(0);
1714
1715 bad:
1716 kmem_free(sc->sc_sensors, slen);
1717 sysmon_envsys_destroy(sc->sc_sme);
1718 kthread_exit(0);
1719 }
1720
1721 static void
1722 arc_refresh_sensors(struct sysmon_envsys *sme, envsys_data_t *edata)
1723 {
1724 struct arc_softc *sc = sme->sme_cookie;
1725 struct bioc_vol bv;
1726
1727 memset(&bv, 0, sizeof(bv));
1728 bv.bv_volid = edata->sensor;
1729
1730 if (arc_bio_vol(sc, &bv)) {
1731 edata->value_cur = ENVSYS_DRIVE_EMPTY;
1732 edata->state = ENVSYS_SINVALID;
1733 return;
1734 }
1735
1736 switch (bv.bv_status) {
1737 case BIOC_SVOFFLINE:
1738 edata->value_cur = ENVSYS_DRIVE_FAIL;
1739 edata->state = ENVSYS_SCRITICAL;
1740 break;
1741 case BIOC_SVDEGRADED:
1742 edata->value_cur = ENVSYS_DRIVE_PFAIL;
1743 edata->state = ENVSYS_SCRITICAL;
1744 break;
1745 case BIOC_SVBUILDING:
1746 edata->value_cur = ENVSYS_DRIVE_REBUILD;
1747 edata->state = ENVSYS_SVALID;
1748 break;
1749 case BIOC_SVMIGRATING:
1750 edata->value_cur = ENVSYS_DRIVE_MIGRATING;
1751 edata->state = ENVSYS_SVALID;
1752 break;
1753 case BIOC_SVSCRUB:
1754 case BIOC_SVONLINE:
1755 edata->value_cur = ENVSYS_DRIVE_ONLINE;
1756 edata->state = ENVSYS_SVALID;
1757 break;
1758 case BIOC_SVINVALID:
1759 /* FALLTRHOUGH */
1760 default:
1761 edata->value_cur = ENVSYS_DRIVE_EMPTY; /* unknown state */
1762 edata->state = ENVSYS_SINVALID;
1763 break;
1764 }
1765 }
1766 #endif /* NBIO > 0 */
1767
1768 uint32_t
1769 arc_read(struct arc_softc *sc, bus_size_t r)
1770 {
1771 uint32_t v;
1772
1773 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1774 BUS_SPACE_BARRIER_READ);
1775 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1776
1777 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1778 device_xname(&sc->sc_dev), r, v);
1779
1780 return v;
1781 }
1782
1783 void
1784 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1785 {
1786 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1787 BUS_SPACE_BARRIER_READ);
1788 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1789 (uint32_t *)buf, len >> 2);
1790 }
1791
1792 void
1793 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1794 {
1795 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1796 device_xname(&sc->sc_dev), r, v);
1797
1798 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1799 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1800 BUS_SPACE_BARRIER_WRITE);
1801 }
1802
1803 void
1804 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1805 {
1806 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1807 (const uint32_t *)buf, len >> 2);
1808 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1809 BUS_SPACE_BARRIER_WRITE);
1810 }
1811
1812 int
1813 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1814 uint32_t target)
1815 {
1816 int i;
1817
1818 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1819 device_xname(&sc->sc_dev), r, mask, target);
1820
1821 for (i = 0; i < 10000; i++) {
1822 if ((arc_read(sc, r) & mask) == target)
1823 return 0;
1824 delay(1000);
1825 }
1826
1827 return 1;
1828 }
1829
1830 int
1831 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1832 uint32_t target)
1833 {
1834 int i;
1835
1836 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1837 device_xname(&sc->sc_dev), r, mask, target);
1838
1839 for (i = 0; i < 10000; i++) {
1840 if ((arc_read(sc, r) & mask) != target)
1841 return 0;
1842 delay(1000);
1843 }
1844
1845 return 1;
1846 }
1847
1848 int
1849 arc_msg0(struct arc_softc *sc, uint32_t m)
1850 {
1851 /* post message */
1852 arc_write(sc, ARC_REG_INB_MSG0, m);
1853 /* wait for the fw to do it */
1854 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1855 ARC_REG_INTRSTAT_MSG0) != 0)
1856 return 1;
1857
1858 /* ack it */
1859 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1860
1861 return 0;
1862 }
1863
1864 struct arc_dmamem *
1865 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1866 {
1867 struct arc_dmamem *adm;
1868 int nsegs;
1869
1870 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1871 if (adm == NULL)
1872 return NULL;
1873
1874 adm->adm_size = size;
1875
1876 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1877 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1878 goto admfree;
1879
1880 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1881 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1882 goto destroy;
1883
1884 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1885 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1886 goto free;
1887
1888 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1889 NULL, BUS_DMA_NOWAIT) != 0)
1890 goto unmap;
1891
1892 memset(adm->adm_kva, 0, size);
1893
1894 return adm;
1895
1896 unmap:
1897 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1898 free:
1899 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1900 destroy:
1901 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1902 admfree:
1903 kmem_free(adm, sizeof(*adm));
1904
1905 return NULL;
1906 }
1907
1908 void
1909 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1910 {
1911 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1912 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1913 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1914 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1915 kmem_free(adm, sizeof(*adm));
1916 }
1917
1918 int
1919 arc_alloc_ccbs(struct arc_softc *sc)
1920 {
1921 struct arc_ccb *ccb;
1922 uint8_t *cmd;
1923 int i;
1924 size_t ccbslen;
1925
1926 TAILQ_INIT(&sc->sc_ccb_free);
1927
1928 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
1929 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
1930
1931 sc->sc_requests = arc_dmamem_alloc(sc,
1932 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1933 if (sc->sc_requests == NULL) {
1934 aprint_error("%s: unable to allocate ccb dmamem\n",
1935 device_xname(&sc->sc_dev));
1936 goto free_ccbs;
1937 }
1938 cmd = ARC_DMA_KVA(sc->sc_requests);
1939
1940 for (i = 0; i < sc->sc_req_count; i++) {
1941 ccb = &sc->sc_ccbs[i];
1942
1943 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1944 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1945 aprint_error("%s: unable to create dmamap for ccb %d\n",
1946 device_xname(&sc->sc_dev), i);
1947 goto free_maps;
1948 }
1949
1950 ccb->ccb_sc = sc;
1951 ccb->ccb_id = i;
1952 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1953
1954 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1955 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1956 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1957
1958 arc_put_ccb(sc, ccb);
1959 }
1960
1961 return 0;
1962
1963 free_maps:
1964 while ((ccb = arc_get_ccb(sc)) != NULL)
1965 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1966 arc_dmamem_free(sc, sc->sc_requests);
1967
1968 free_ccbs:
1969 kmem_free(sc->sc_ccbs, ccbslen);
1970
1971 return 1;
1972 }
1973
1974 struct arc_ccb *
1975 arc_get_ccb(struct arc_softc *sc)
1976 {
1977 struct arc_ccb *ccb;
1978
1979 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1980 if (ccb != NULL)
1981 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1982
1983 return ccb;
1984 }
1985
1986 void
1987 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1988 {
1989 ccb->ccb_xs = NULL;
1990 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1991 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1992 }
1993