arcmsr.c revision 1.9.4.2 1 /* $NetBSD: arcmsr.c,v 1.9.4.2 2008/01/11 17:03:16 xtraeme Exp $ */
2 /* $OpenBSD: arc.c,v 1.68 2007/10/27 03:28:27 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2007 Juan Romero Pardines <xtraeme (at) netbsd.org>
6 * Copyright (c) 2006 David Gwynne <dlg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: arcmsr.c,v 1.9.4.2 2008/01/11 17:03:16 xtraeme Exp $");
25
26 #include <sys/param.h>
27 #include <sys/buf.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/device.h>
31 #include <sys/kmem.h>
32 #include <sys/lock.h>
33
34 #if NBIO > 0
35 #include <sys/ioctl.h>
36 #include <dev/biovar.h>
37 #endif
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/scsipi/scsipi_all.h>
44 #include <dev/scsipi/scsi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46
47 #include <machine/bus.h>
48
49 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
50
51 #include <dev/pci/arcmsrvar.h>
52
53 /* #define ARC_DEBUG */
54 #ifdef ARC_DEBUG
55 #define ARC_D_INIT (1<<0)
56 #define ARC_D_RW (1<<1)
57 #define ARC_D_DB (1<<2)
58
59 int arcdebug = 0;
60
61 #define DPRINTF(p...) do { if (arcdebug) printf(p); } while (0)
62 #define DNPRINTF(n, p...) do { if ((n) & arcdebug) printf(p); } while (0)
63
64 #else
65 #define DPRINTF(p...) /* p */
66 #define DNPRINTF(n, p...) /* n, p */
67 #endif
68
69 /*
70 * the fw header must always equal this.
71 */
72 static struct arc_fw_hdr arc_fw_hdr = { 0x5e, 0x01, 0x61 };
73
74 /*
75 * autoconf(9) glue.
76 */
77 static int arc_match(struct device *, struct cfdata *, void *);
78 static void arc_attach(struct device *, struct device *, void *);
79 static int arc_detach(struct device *, int);
80 static void arc_shutdown(void *);
81 static int arc_intr(void *);
82 static void arc_minphys(struct buf *);
83
84 CFATTACH_DECL(arcmsr, sizeof(struct arc_softc),
85 arc_match, arc_attach, arc_detach, NULL);
86
87 /*
88 * bio(4) glue.
89 */
90 #if NBIO > 0
91 static int arc_bioctl(struct device *, u_long, caddr_t);
92 static int arc_bio_inq(struct arc_softc *, struct bioc_inq *);
93 static int arc_bio_vol(struct arc_softc *, struct bioc_vol *);
94 static int arc_bio_disk_volume(struct arc_softc *, struct bioc_disk *);
95 static void arc_bio_disk_filldata(struct arc_softc *, struct bioc_disk *,
96 struct arc_fw_diskinfo *, int);
97 static int arc_bio_alarm(struct arc_softc *, struct bioc_alarm *);
98 static int arc_bio_alarm_state(struct arc_softc *, struct bioc_alarm *);
99 static int arc_bio_getvol(struct arc_softc *, int,
100 struct arc_fw_volinfo *);
101 static int arc_fw_parse_status_code(struct arc_softc *, uint8_t *);
102 #endif
103
104 static int
105 arc_match(struct device *parent, struct cfdata *match, void *aux)
106 {
107 struct pci_attach_args *pa = aux;
108
109 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ARECA) {
110 switch (PCI_PRODUCT(pa->pa_id)) {
111 case PCI_PRODUCT_ARECA_ARC1110:
112 case PCI_PRODUCT_ARECA_ARC1120:
113 case PCI_PRODUCT_ARECA_ARC1130:
114 case PCI_PRODUCT_ARECA_ARC1160:
115 case PCI_PRODUCT_ARECA_ARC1170:
116 case PCI_PRODUCT_ARECA_ARC1200:
117 case PCI_PRODUCT_ARECA_ARC1202:
118 case PCI_PRODUCT_ARECA_ARC1210:
119 case PCI_PRODUCT_ARECA_ARC1220:
120 case PCI_PRODUCT_ARECA_ARC1230:
121 case PCI_PRODUCT_ARECA_ARC1260:
122 case PCI_PRODUCT_ARECA_ARC1270:
123 case PCI_PRODUCT_ARECA_ARC1280:
124 case PCI_PRODUCT_ARECA_ARC1380:
125 case PCI_PRODUCT_ARECA_ARC1381:
126 case PCI_PRODUCT_ARECA_ARC1680:
127 case PCI_PRODUCT_ARECA_ARC1681:
128 return 1;
129 default:
130 break;
131 }
132 }
133
134 return 0;
135 }
136
137 static void
138 arc_attach(struct device *parent, struct device *self, void *aux)
139 {
140 struct arc_softc *sc = device_private(self);
141 struct pci_attach_args *pa = aux;
142 struct scsipi_adapter *adapt = &sc->sc_adapter;
143 struct scsipi_channel *chan = &sc->sc_chan;
144
145 sc->sc_talking = 0;
146 lockinit(&sc->sc_lock, PZERO, "arcdb", 0, 0);
147
148 if (arc_map_pci_resources(sc, pa) != 0) {
149 /* error message printed by arc_map_pci_resources */
150 return;
151 }
152
153 if (arc_query_firmware(sc) != 0) {
154 /* error message printed by arc_query_firmware */
155 goto unmap_pci;
156 }
157
158 if (arc_alloc_ccbs(sc) != 0) {
159 /* error message printed by arc_alloc_ccbs */
160 goto unmap_pci;
161 }
162
163 sc->sc_shutdownhook = shutdownhook_establish(arc_shutdown, sc);
164 if (sc->sc_shutdownhook == NULL)
165 panic("unable to establish arc powerhook");
166
167 memset(adapt, 0, sizeof(*adapt));
168 adapt->adapt_dev = self;
169 adapt->adapt_nchannels = 1;
170 adapt->adapt_openings = sc->sc_req_count / ARC_MAX_TARGET;
171 adapt->adapt_max_periph = adapt->adapt_openings;
172 adapt->adapt_minphys = arc_minphys;
173 adapt->adapt_request = arc_scsi_cmd;
174
175 memset(chan, 0, sizeof(*chan));
176 chan->chan_adapter = adapt;
177 chan->chan_bustype = &scsi_bustype;
178 chan->chan_nluns = ARC_MAX_LUN;
179 chan->chan_ntargets = ARC_MAX_TARGET;
180 chan->chan_id = ARC_MAX_TARGET;
181 chan->chan_channel = 0;
182 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
183
184 /*
185 * Save the struct device * returned, because we could to attach
186 * devices via the management interface.
187 */
188 sc->sc_scsibus_dv = config_found(self, &sc->sc_chan, scsiprint);
189
190 /* enable interrupts */
191 arc_write(sc, ARC_REG_INTRMASK,
192 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRSTAT_DOORBELL));
193
194 #if NBIO > 0
195 /*
196 * Register the driver to bio(4) and setup the sensors.
197 */
198 if (bio_register(self, arc_bioctl) != 0)
199 panic("%s: bioctl registration failed\n", device_xname(self));
200 #endif
201
202 return;
203
204 unmap_pci:
205 arc_unmap_pci_resources(sc);
206 }
207
208 static int
209 arc_detach(struct device *self, int flags)
210 {
211 struct arc_softc *sc = device_private(self);
212
213 shutdownhook_disestablish(sc->sc_shutdownhook);
214
215 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
216 aprint_error("%s: timeout waiting to stop bg rebuild\n",
217 device_xname(&sc->sc_dev));
218
219 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
220 aprint_error("%s: timeout waiting to flush cache\n",
221 device_xname(&sc->sc_dev));
222
223 return 0;
224 }
225
226 static void
227 arc_shutdown(void *xsc)
228 {
229 struct arc_softc *sc = xsc;
230
231 if (arc_msg0(sc, ARC_REG_INB_MSG0_STOP_BGRB) != 0)
232 aprint_error("%s: timeout waiting to stop bg rebuild\n",
233 device_xname(&sc->sc_dev));
234
235 if (arc_msg0(sc, ARC_REG_INB_MSG0_FLUSH_CACHE) != 0)
236 aprint_error("%s: timeout waiting to flush cache\n",
237 device_xname(&sc->sc_dev));
238 }
239
240 static void
241 arc_minphys(struct buf *bp)
242 {
243 if (bp->b_bcount > MAXPHYS)
244 bp->b_bcount = MAXPHYS;
245 minphys(bp);
246 }
247
248 static int
249 arc_intr(void *arg)
250 {
251 struct arc_softc *sc = arg;
252 struct arc_ccb *ccb = NULL;
253 char *kva = ARC_DMA_KVA(sc->sc_requests);
254 struct arc_io_cmd *cmd;
255 uint32_t reg, intrstat;
256 int s;
257
258 s = splbio();
259 intrstat = arc_read(sc, ARC_REG_INTRSTAT);
260 if (intrstat == 0x0) {
261 splx(s);
262 return 0;
263 }
264
265 intrstat &= ARC_REG_INTRSTAT_POSTQUEUE | ARC_REG_INTRSTAT_DOORBELL;
266 arc_write(sc, ARC_REG_INTRSTAT, intrstat);
267
268 if (intrstat & ARC_REG_INTRSTAT_DOORBELL) {
269 if (sc->sc_talking) {
270 arc_write(sc, ARC_REG_INTRMASK,
271 ~ARC_REG_INTRMASK_POSTQUEUE);
272 wakeup(sc);
273 } else {
274 /* otherwise drop it */
275 reg = arc_read(sc, ARC_REG_OUTB_DOORBELL);
276 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
277 if (reg & ARC_REG_OUTB_DOORBELL_WRITE_OK)
278 arc_write(sc, ARC_REG_INB_DOORBELL,
279 ARC_REG_INB_DOORBELL_READ_OK);
280 }
281 }
282 splx(s);
283
284 while ((reg = arc_pop(sc)) != 0xffffffff) {
285 cmd = (struct arc_io_cmd *)(kva +
286 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
287 (uint32_t)ARC_DMA_DVA(sc->sc_requests)));
288 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
289
290 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
291 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
292 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
293
294 arc_scsi_cmd_done(sc, ccb, reg);
295 }
296
297
298 return 1;
299 }
300
301 void
302 arc_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
303 {
304 struct scsipi_periph *periph;
305 struct scsipi_xfer *xs;
306 struct scsipi_adapter *adapt = chan->chan_adapter;
307 struct arc_softc *sc = device_private(adapt->adapt_dev);
308 struct arc_ccb *ccb;
309 struct arc_msg_scsicmd *cmd;
310 uint32_t reg;
311 uint8_t target;
312 int s;
313
314 switch (req) {
315 case ADAPTER_REQ_GROW_RESOURCES:
316 /* Not supported. */
317 return;
318 case ADAPTER_REQ_SET_XFER_MODE:
319 /* Not supported. */
320 return;
321 case ADAPTER_REQ_RUN_XFER:
322 break;
323 }
324
325 s = splbio();
326
327 xs = arg;
328 periph = xs->xs_periph;
329 target = periph->periph_target;
330
331 if (xs->cmdlen > ARC_MSG_CDBLEN) {
332 memset(&xs->sense, 0, sizeof(xs->sense));
333 xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | 0x70;
334 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
335 xs->sense.scsi_sense.asc = 0x20;
336 xs->error = XS_SENSE;
337 xs->status = SCSI_CHECK;
338 splx(s);
339 scsipi_done(xs);
340 return;
341 }
342
343 ccb = arc_get_ccb(sc);
344 if (ccb == NULL) {
345 xs->error = XS_RESOURCE_SHORTAGE;
346 splx(s);
347 scsipi_done(xs);
348 return;
349 }
350
351 ccb->ccb_xs = xs;
352
353 if (arc_load_xs(ccb) != 0) {
354 xs->error = XS_DRIVER_STUFFUP;
355 arc_put_ccb(sc, ccb);
356 splx(s);
357 scsipi_done(xs);
358 return;
359 }
360
361 cmd = &ccb->ccb_cmd->cmd;
362 reg = ccb->ccb_cmd_post;
363
364 /* bus is always 0 */
365 cmd->target = target;
366 cmd->lun = periph->periph_lun;
367 cmd->function = 1; /* XXX magic number */
368
369 cmd->cdb_len = xs->cmdlen;
370 cmd->sgl_len = ccb->ccb_dmamap->dm_nsegs;
371 if (xs->xs_control & XS_CTL_DATA_OUT)
372 cmd->flags = ARC_MSG_SCSICMD_FLAG_WRITE;
373 if (ccb->ccb_dmamap->dm_nsegs > ARC_SGL_256LEN) {
374 cmd->flags |= ARC_MSG_SCSICMD_FLAG_SGL_BSIZE_512;
375 reg |= ARC_REG_POST_QUEUE_BIGFRAME;
376 }
377
378 cmd->context = htole32(ccb->ccb_id);
379 cmd->data_len = htole32(xs->datalen);
380
381 memcpy(cmd->cdb, xs->cmd, xs->cmdlen);
382
383 /* we've built the command, let's put it on the hw */
384 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
385 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
387
388 arc_push(sc, reg);
389 if (xs->xs_control & XS_CTL_POLL) {
390 if (arc_complete(sc, ccb, xs->timeout) != 0) {
391 xs->error = XS_DRIVER_STUFFUP;
392 splx(s);
393 scsipi_done(xs);
394 return;
395 }
396 }
397
398 splx(s);
399 }
400
401 int
402 arc_load_xs(struct arc_ccb *ccb)
403 {
404 struct arc_softc *sc = ccb->ccb_sc;
405 struct scsipi_xfer *xs = ccb->ccb_xs;
406 bus_dmamap_t dmap = ccb->ccb_dmamap;
407 struct arc_sge *sgl = ccb->ccb_cmd->sgl, *sge;
408 uint64_t addr;
409 int i, error;
410
411 if (xs->datalen == 0)
412 return 0;
413
414 error = bus_dmamap_load(sc->sc_dmat, dmap,
415 xs->data, xs->datalen, NULL,
416 (xs->xs_control & XS_CTL_NOSLEEP) ?
417 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
418 if (error != 0) {
419 aprint_error("%s: error %d loading dmamap\n",
420 device_xname(&sc->sc_dev), error);
421 return 1;
422 }
423
424 for (i = 0; i < dmap->dm_nsegs; i++) {
425 sge = &sgl[i];
426
427 sge->sg_hdr = htole32(ARC_SGE_64BIT | dmap->dm_segs[i].ds_len);
428 addr = dmap->dm_segs[i].ds_addr;
429 sge->sg_hi_addr = htole32((uint32_t)(addr >> 32));
430 sge->sg_lo_addr = htole32((uint32_t)addr);
431 }
432
433 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
434 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
435 BUS_DMASYNC_PREWRITE);
436
437 return 0;
438 }
439
440 void
441 arc_scsi_cmd_done(struct arc_softc *sc, struct arc_ccb *ccb, uint32_t reg)
442 {
443 struct scsipi_xfer *xs = ccb->ccb_xs;
444 struct arc_msg_scsicmd *cmd;
445
446 if (xs->datalen != 0) {
447 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
448 ccb->ccb_dmamap->dm_mapsize,
449 (xs->xs_control & XS_CTL_DATA_IN) ?
450 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
451 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
452 }
453
454 /* timeout_del */
455 xs->status |= XS_STS_DONE;
456
457 if (reg & ARC_REG_REPLY_QUEUE_ERR) {
458 cmd = &ccb->ccb_cmd->cmd;
459
460 switch (cmd->status) {
461 case ARC_MSG_STATUS_SELTIMEOUT:
462 case ARC_MSG_STATUS_ABORTED:
463 case ARC_MSG_STATUS_INIT_FAIL:
464 xs->status = SCSI_OK;
465 xs->error = XS_SELTIMEOUT;
466 break;
467
468 case SCSI_CHECK:
469 memset(&xs->sense, 0, sizeof(xs->sense));
470 memcpy(&xs->sense, cmd->sense_data,
471 min(ARC_MSG_SENSELEN, sizeof(xs->sense)));
472 xs->sense.scsi_sense.response_code =
473 SSD_RCODE_VALID | 0x70;
474 xs->status = SCSI_CHECK;
475 xs->error = XS_SENSE;
476 xs->resid = 0;
477 break;
478
479 default:
480 /* unknown device status */
481 xs->error = XS_BUSY; /* try again later? */
482 xs->status = SCSI_BUSY;
483 break;
484 }
485 } else {
486 xs->status = SCSI_OK;
487 xs->error = XS_NOERROR;
488 xs->resid = 0;
489 }
490
491 arc_put_ccb(sc, ccb);
492 scsipi_done(xs);
493 }
494
495 int
496 arc_complete(struct arc_softc *sc, struct arc_ccb *nccb, int timeout)
497 {
498 struct arc_ccb *ccb = NULL;
499 char *kva = ARC_DMA_KVA(sc->sc_requests);
500 struct arc_io_cmd *cmd;
501 uint32_t reg;
502
503 do {
504 reg = arc_pop(sc);
505 if (reg == 0xffffffff) {
506 if (timeout-- == 0)
507 return 1;
508
509 delay(1000);
510 continue;
511 }
512
513 cmd = (struct arc_io_cmd *)(kva +
514 ((reg << ARC_REG_REPLY_QUEUE_ADDR_SHIFT) -
515 ARC_DMA_DVA(sc->sc_requests)));
516 ccb = &sc->sc_ccbs[htole32(cmd->cmd.context)];
517
518 bus_dmamap_sync(sc->sc_dmat, ARC_DMA_MAP(sc->sc_requests),
519 ccb->ccb_offset, ARC_MAX_IOCMDLEN,
520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
521
522 arc_scsi_cmd_done(sc, ccb, reg);
523 } while (nccb != ccb);
524
525 return 0;
526 }
527
528 int
529 arc_map_pci_resources(struct arc_softc *sc, struct pci_attach_args *pa)
530 {
531 pcireg_t memtype;
532 pci_intr_handle_t ih;
533
534 sc->sc_pc = pa->pa_pc;
535 sc->sc_tag = pa->pa_tag;
536 sc->sc_dmat = pa->pa_dmat;
537
538 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, ARC_PCI_BAR);
539 if (pci_mapreg_map(pa, ARC_PCI_BAR, memtype, 0, &sc->sc_iot,
540 &sc->sc_ioh, NULL, &sc->sc_ios) != 0) {
541 aprint_error(": unable to map system interface register\n");
542 return 1;
543 }
544
545 if (pci_intr_map(pa, &ih) != 0) {
546 aprint_error(": unable to map interrupt\n");
547 goto unmap;
548 }
549
550 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
551 arc_intr, sc);
552 if (sc->sc_ih == NULL) {
553 aprint_error(": unable to map interrupt [2]\n");
554 goto unmap;
555 }
556 aprint_normal(": interrupting at %s\n",
557 pci_intr_string(pa->pa_pc, ih));
558
559 return 0;
560
561 unmap:
562 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
563 sc->sc_ios = 0;
564 return 1;
565 }
566
567 void
568 arc_unmap_pci_resources(struct arc_softc *sc)
569 {
570 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
571 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
572 sc->sc_ios = 0;
573 }
574
575 int
576 arc_query_firmware(struct arc_softc *sc)
577 {
578 struct arc_msg_firmware_info fwinfo;
579 char string[81]; /* sizeof(vendor)*2+1 */
580
581 if (arc_wait_eq(sc, ARC_REG_OUTB_ADDR1, ARC_REG_OUTB_ADDR1_FIRMWARE_OK,
582 ARC_REG_OUTB_ADDR1_FIRMWARE_OK) != 0) {
583 aprint_debug("%s: timeout waiting for firmware ok\n",
584 device_xname(&sc->sc_dev));
585 return 1;
586 }
587
588 if (arc_msg0(sc, ARC_REG_INB_MSG0_GET_CONFIG) != 0) {
589 aprint_debug("%s: timeout waiting for get config\n",
590 device_xname(&sc->sc_dev));
591 return 1;
592 }
593
594 if (arc_msg0(sc, ARC_REG_INB_MSG0_START_BGRB) != 0) {
595 aprint_debug("%s: timeout waiting to start bg rebuild\n",
596 device_xname(&sc->sc_dev));
597 return 1;
598 }
599
600 arc_read_region(sc, ARC_REG_MSGBUF, &fwinfo, sizeof(fwinfo));
601
602 DNPRINTF(ARC_D_INIT, "%s: signature: 0x%08x\n",
603 device_xname(&sc->sc_dev), htole32(fwinfo.signature));
604
605 if (htole32(fwinfo.signature) != ARC_FWINFO_SIGNATURE_GET_CONFIG) {
606 aprint_error("%s: invalid firmware info from iop\n",
607 device_xname(&sc->sc_dev));
608 return 1;
609 }
610
611 DNPRINTF(ARC_D_INIT, "%s: request_len: %d\n",
612 device_xname(&sc->sc_dev),
613 htole32(fwinfo.request_len));
614 DNPRINTF(ARC_D_INIT, "%s: queue_len: %d\n",
615 device_xname(&sc->sc_dev),
616 htole32(fwinfo.queue_len));
617 DNPRINTF(ARC_D_INIT, "%s: sdram_size: %d\n",
618 device_xname(&sc->sc_dev),
619 htole32(fwinfo.sdram_size));
620 DNPRINTF(ARC_D_INIT, "%s: sata_ports: %d\n",
621 device_xname(&sc->sc_dev),
622 htole32(fwinfo.sata_ports));
623
624 scsipi_strvis(string, 81, fwinfo.vendor, sizeof(fwinfo.vendor));
625 DNPRINTF(ARC_D_INIT, "%s: vendor: \"%s\"\n",
626 device_xname(&sc->sc_dev), string);
627
628 scsipi_strvis(string, 17, fwinfo.model, sizeof(fwinfo.model));
629 aprint_normal("%s: Areca %s Host Adapter RAID controller\n",
630 device_xname(&sc->sc_dev), string);
631
632 scsipi_strvis(string, 33, fwinfo.fw_version, sizeof(fwinfo.fw_version));
633 DNPRINTF(ARC_D_INIT, "%s: version: \"%s\"\n",
634 device_xname(&sc->sc_dev), string);
635
636 aprint_normal("%s: %d ports, %dMB SDRAM, firmware <%s>\n",
637 device_xname(&sc->sc_dev), htole32(fwinfo.sata_ports),
638 htole32(fwinfo.sdram_size), string);
639
640 /* save the number of max disks for future use */
641 sc->sc_maxdisks = htole32(fwinfo.sata_ports);
642
643 if (htole32(fwinfo.request_len) != ARC_MAX_IOCMDLEN) {
644 aprint_error("%s: unexpected request frame size (%d != %d)\n",
645 device_xname(&sc->sc_dev),
646 htole32(fwinfo.request_len), ARC_MAX_IOCMDLEN);
647 return 1;
648 }
649
650 sc->sc_req_count = htole32(fwinfo.queue_len);
651
652 return 0;
653 }
654
655 #if NBIO > 0
656 static int
657 arc_bioctl(struct device *self, u_long cmd, caddr_t addr)
658 {
659 struct arc_softc *sc = device_private(self);
660 int error = 0;
661
662 switch (cmd) {
663 case BIOCINQ:
664 error = arc_bio_inq(sc, (struct bioc_inq *)addr);
665 break;
666
667 case BIOCVOL:
668 error = arc_bio_vol(sc, (struct bioc_vol *)addr);
669 break;
670
671 case BIOCDISK:
672 error = arc_bio_disk_volume(sc, (struct bioc_disk *)addr);
673 break;
674
675 case BIOCALARM:
676 error = arc_bio_alarm(sc, (struct bioc_alarm *)addr);
677 break;
678
679 default:
680 error = ENOTTY;
681 break;
682 }
683
684 return error;
685 }
686
687 static int
688 arc_fw_parse_status_code(struct arc_softc *sc, uint8_t *reply)
689 {
690 switch (*reply) {
691 case ARC_FW_CMD_RAIDINVAL:
692 printf("%s: firmware error (invalid raid set)\n",
693 device_xname(&sc->sc_dev));
694 return EINVAL;
695 case ARC_FW_CMD_VOLINVAL:
696 printf("%s: firmware error (invalid volume set)\n",
697 device_xname(&sc->sc_dev));
698 return EINVAL;
699 case ARC_FW_CMD_NORAID:
700 printf("%s: firmware error (unexistent raid set)\n",
701 device_xname(&sc->sc_dev));
702 return ENODEV;
703 case ARC_FW_CMD_NOVOLUME:
704 printf("%s: firmware error (unexistent volume set)\n",
705 device_xname(&sc->sc_dev));
706 return ENODEV;
707 case ARC_FW_CMD_NOPHYSDRV:
708 printf("%s: firmware error (unexistent physical drive)\n",
709 device_xname(&sc->sc_dev));
710 return ENODEV;
711 case ARC_FW_CMD_PARAM_ERR:
712 printf("%s: firmware error (parameter error)\n",
713 device_xname(&sc->sc_dev));
714 return EINVAL;
715 case ARC_FW_CMD_UNSUPPORTED:
716 printf("%s: firmware error (unsupported command)\n",
717 device_xname(&sc->sc_dev));
718 return EOPNOTSUPP;
719 case ARC_FW_CMD_DISKCFG_CHGD:
720 printf("%s: firmware error (disk configuration changed)\n",
721 device_xname(&sc->sc_dev));
722 return EINVAL;
723 case ARC_FW_CMD_PASS_INVAL:
724 printf("%s: firmware error (invalid password)\n",
725 device_xname(&sc->sc_dev));
726 return EINVAL;
727 case ARC_FW_CMD_NODISKSPACE:
728 printf("%s: firmware error (no disk space available)\n",
729 device_xname(&sc->sc_dev));
730 return EOPNOTSUPP;
731 case ARC_FW_CMD_CHECKSUM_ERR:
732 printf("%s: firmware error (checksum error)\n",
733 device_xname(&sc->sc_dev));
734 return EINVAL;
735 case ARC_FW_CMD_PASS_REQD:
736 printf("%s: firmware error (password required)\n",
737 device_xname(&sc->sc_dev));
738 return EPERM;
739 case ARC_FW_CMD_OK:
740 default:
741 return 0;
742 }
743 }
744
745 static int
746 arc_bio_alarm(struct arc_softc *sc, struct bioc_alarm *ba)
747 {
748 uint8_t request[2], reply[1];
749 size_t len;
750 int error = 0;
751
752 switch (ba->ba_opcode) {
753 case BIOC_SAENABLE:
754 case BIOC_SADISABLE:
755 request[0] = ARC_FW_SET_ALARM;
756 request[1] = (ba->ba_opcode == BIOC_SAENABLE) ?
757 ARC_FW_SET_ALARM_ENABLE : ARC_FW_SET_ALARM_DISABLE;
758 len = sizeof(request);
759
760 break;
761
762 case BIOC_SASILENCE:
763 request[0] = ARC_FW_MUTE_ALARM;
764 len = 1;
765
766 break;
767
768 case BIOC_GASTATUS:
769 /* system info is too big/ugly to deal with here */
770 return arc_bio_alarm_state(sc, ba);
771
772 default:
773 return EOPNOTSUPP;
774 }
775
776 error = arc_msgbuf(sc, request, len, reply, sizeof(reply));
777 if (error != 0)
778 return error;
779
780 return arc_fw_parse_status_code(sc, &reply[0]);
781 }
782
783 static int
784 arc_bio_alarm_state(struct arc_softc *sc, struct bioc_alarm *ba)
785 {
786 struct arc_fw_sysinfo *sysinfo;
787 uint8_t request;
788 int error = 0;
789
790 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
791
792 request = ARC_FW_SYSINFO;
793 error = arc_msgbuf(sc, &request, sizeof(request),
794 sysinfo, sizeof(struct arc_fw_sysinfo));
795
796 if (error != 0)
797 goto out;
798
799 ba->ba_status = sysinfo->alarm;
800
801 out:
802 kmem_free(sysinfo, sizeof(*sysinfo));
803 return error;
804 }
805
806 static int
807 arc_bio_inq(struct arc_softc *sc, struct bioc_inq *bi)
808 {
809 uint8_t request[2];
810 struct arc_fw_sysinfo *sysinfo;
811 struct arc_fw_raidinfo *raidinfo;
812 int maxraidset, nvols = 0, i;
813 int error = 0;
814
815 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
816 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
817
818 request[0] = ARC_FW_SYSINFO;
819 error = arc_msgbuf(sc, request, 1, sysinfo,
820 sizeof(struct arc_fw_sysinfo));
821 if (error != 0)
822 goto out;
823
824 maxraidset = sysinfo->max_raid_set;
825
826 request[0] = ARC_FW_RAIDINFO;
827 for (i = 0; i < maxraidset; i++) {
828 request[1] = i;
829 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
830 sizeof(struct arc_fw_raidinfo));
831 if (error != 0)
832 goto out;
833
834 if (raidinfo->volumes)
835 nvols++;
836 }
837
838 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev));
839 bi->bi_novol = nvols;
840 bi->bi_nodisk = sc->sc_maxdisks;
841
842 out:
843 kmem_free(raidinfo, sizeof(*raidinfo));
844 kmem_free(sysinfo, sizeof(*sysinfo));
845 return error;
846 }
847
848 static int
849 arc_bio_getvol(struct arc_softc *sc, int vol, struct arc_fw_volinfo *volinfo)
850 {
851 uint8_t request[2];
852 struct arc_fw_sysinfo *sysinfo;
853 int error = 0;
854 int maxvols, nvols = 0, i;
855
856 sysinfo = kmem_zalloc(sizeof(struct arc_fw_sysinfo), KM_SLEEP);
857
858 request[0] = ARC_FW_SYSINFO;
859 error = arc_msgbuf(sc, request, 1, sysinfo,
860 sizeof(struct arc_fw_sysinfo));
861 if (error != 0)
862 goto out;
863
864 maxvols = sysinfo->max_volume_set;
865
866 request[0] = ARC_FW_VOLINFO;
867 for (i = 0; i < maxvols; i++) {
868 request[1] = i;
869 error = arc_msgbuf(sc, request, sizeof(request), volinfo,
870 sizeof(struct arc_fw_volinfo));
871 if (error != 0)
872 goto out;
873
874 if (volinfo->capacity == 0 && volinfo->capacity2 == 0)
875 continue;
876
877 if (nvols == vol)
878 break;
879
880 nvols++;
881 }
882
883 if (nvols != vol ||
884 (volinfo->capacity == 0 && volinfo->capacity2 == 0)) {
885 error = ENODEV;
886 goto out;
887 }
888
889 out:
890 kmem_free(sysinfo, sizeof(*sysinfo));
891 return error;
892 }
893
894 static int
895 arc_bio_vol(struct arc_softc *sc, struct bioc_vol *bv)
896 {
897 struct arc_fw_volinfo *volinfo;
898 uint64_t blocks;
899 uint32_t status;
900 int error = 0;
901
902 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
903
904 error = arc_bio_getvol(sc, bv->bv_volid, volinfo);
905 if (error != 0)
906 goto out;
907
908 bv->bv_percent = -1;
909 bv->bv_seconds = 0;
910
911 status = htole32(volinfo->volume_status);
912 if (status == 0x0) {
913 if (htole32(volinfo->fail_mask) == 0x0)
914 bv->bv_status = BIOC_SVONLINE;
915 else
916 bv->bv_status = BIOC_SVDEGRADED;
917 } else if (status & ARC_FW_VOL_STATUS_NEED_REGEN) {
918 bv->bv_status = BIOC_SVDEGRADED;
919 } else if (status & ARC_FW_VOL_STATUS_FAILED) {
920 bv->bv_status = BIOC_SVOFFLINE;
921 } else if (status & ARC_FW_VOL_STATUS_INITTING) {
922 bv->bv_status = BIOC_SVBUILDING;
923 bv->bv_percent = htole32(volinfo->progress) / 10;
924 } else if (status & ARC_FW_VOL_STATUS_REBUILDING) {
925 bv->bv_status = BIOC_SVREBUILD;
926 bv->bv_percent = htole32(volinfo->progress) / 10;
927 }
928
929 blocks = (uint64_t)htole32(volinfo->capacity2) << 32;
930 blocks += (uint64_t)htole32(volinfo->capacity);
931 bv->bv_size = blocks * ARC_BLOCKSIZE; /* XXX */
932
933 switch (volinfo->raid_level) {
934 case ARC_FW_VOL_RAIDLEVEL_0:
935 bv->bv_level = 0;
936 break;
937 case ARC_FW_VOL_RAIDLEVEL_1:
938 bv->bv_level = 1;
939 break;
940 case ARC_FW_VOL_RAIDLEVEL_3:
941 bv->bv_level = 3;
942 break;
943 case ARC_FW_VOL_RAIDLEVEL_5:
944 bv->bv_level = 5;
945 break;
946 case ARC_FW_VOL_RAIDLEVEL_6:
947 bv->bv_level = 6;
948 break;
949 default:
950 bv->bv_level = -1;
951 break;
952 }
953
954 bv->bv_nodisk = volinfo->member_disks;
955 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "sd%d", bv->bv_volid);
956 scsipi_strvis(bv->bv_vendor, sizeof(bv->bv_vendor), volinfo->set_name,
957 sizeof(volinfo->set_name));
958
959 out:
960 kmem_free(volinfo, sizeof(*volinfo));
961 return error;
962 }
963
964 static void
965 arc_bio_disk_filldata(struct arc_softc *sc, struct bioc_disk *bd,
966 struct arc_fw_diskinfo *diskinfo, int diskid)
967 {
968 uint64_t blocks;
969 char model[81];
970 char serial[41];
971 char rev[17];
972
973 switch (htole32(diskinfo->device_state)) {
974 case ARC_FW_DISK_RAIDMEMBER:
975 bd->bd_status = BIOC_SDONLINE;
976 break;
977 case ARC_FW_DISK_HOTSPARE:
978 bd->bd_status = BIOC_SDHOTSPARE;
979 break;
980 case ARC_FW_DISK_UNUSED:
981 bd->bd_status = BIOC_SDUNUSED;
982 break;
983 default:
984 printf("%s: unknown disk device_state: 0x%x\n", __func__,
985 htole32(diskinfo->device_state));
986 bd->bd_status = BIOC_SDINVALID;
987 return;
988 }
989
990 blocks = (uint64_t)htole32(diskinfo->capacity2) << 32;
991 blocks += (uint64_t)htole32(diskinfo->capacity);
992 bd->bd_size = blocks * ARC_BLOCKSIZE; /* XXX */
993
994 scsipi_strvis(model, 81, diskinfo->model, sizeof(diskinfo->model));
995 scsipi_strvis(serial, 41, diskinfo->serial, sizeof(diskinfo->serial));
996 scsipi_strvis(rev, 17, diskinfo->firmware_rev,
997 sizeof(diskinfo->firmware_rev));
998
999 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s", model, rev);
1000 strlcpy(bd->bd_serial, serial, sizeof(bd->bd_serial));
1001
1002 #if 0
1003 bd->bd_channel = diskinfo->scsi_attr.channel;
1004 bd->bd_target = diskinfo->scsi_attr.target;
1005 bd->bd_lun = diskinfo->scsi_attr.lun;
1006 #endif
1007
1008 /*
1009 * the firwmare doesnt seem to fill scsi_attr in, so fake it with
1010 * the diskid.
1011 */
1012 bd->bd_channel = 0;
1013 bd->bd_target = diskid;
1014 bd->bd_lun = 0;
1015 }
1016
1017 static int
1018 arc_bio_disk_volume(struct arc_softc *sc, struct bioc_disk *bd)
1019 {
1020 uint8_t request[2];
1021 struct arc_fw_raidinfo *raidinfo;
1022 struct arc_fw_volinfo *volinfo;
1023 struct arc_fw_diskinfo *diskinfo;
1024 int error = 0;
1025
1026 volinfo = kmem_zalloc(sizeof(struct arc_fw_volinfo), KM_SLEEP);
1027 raidinfo = kmem_zalloc(sizeof(struct arc_fw_raidinfo), KM_SLEEP);
1028 diskinfo = kmem_zalloc(sizeof(struct arc_fw_diskinfo), KM_SLEEP);
1029
1030 error = arc_bio_getvol(sc, bd->bd_volid, volinfo);
1031 if (error != 0)
1032 goto out;
1033
1034 request[0] = ARC_FW_RAIDINFO;
1035 request[1] = volinfo->raid_set_number;
1036
1037 error = arc_msgbuf(sc, request, sizeof(request), raidinfo,
1038 sizeof(struct arc_fw_raidinfo));
1039 if (error != 0)
1040 goto out;
1041
1042 if (bd->bd_diskid > raidinfo->member_devices) {
1043 error = ENODEV;
1044 goto out;
1045 }
1046
1047 request[0] = ARC_FW_DISKINFO;
1048 request[1] = raidinfo->device_array[bd->bd_diskid];
1049 error = arc_msgbuf(sc, request, sizeof(request), diskinfo,
1050 sizeof(struct arc_fw_diskinfo));
1051 if (error != 0)
1052 goto out;
1053
1054 /* now fill our bio disk with data from the firmware */
1055 arc_bio_disk_filldata(sc, bd, diskinfo,
1056 raidinfo->device_array[bd->bd_diskid]);
1057
1058 out:
1059 kmem_free(raidinfo, sizeof(*raidinfo));
1060 kmem_free(volinfo, sizeof(*volinfo));
1061 kmem_free(diskinfo, sizeof(*diskinfo));
1062 return error;
1063 }
1064 #endif /* NBIO > 0 */
1065
1066 uint8_t
1067 arc_msg_cksum(void *cmd, uint16_t len)
1068 {
1069 uint8_t *buf = cmd;
1070 uint8_t cksum;
1071 int i;
1072
1073 cksum = (uint8_t)(len >> 8) + (uint8_t)len;
1074 for (i = 0; i < len; i++)
1075 cksum += buf[i];
1076
1077 return cksum;
1078 }
1079
1080
1081 int
1082 arc_msgbuf(struct arc_softc *sc, void *wptr, size_t wbuflen, void *rptr,
1083 size_t rbuflen)
1084 {
1085 uint8_t rwbuf[ARC_REG_IOC_RWBUF_MAXLEN];
1086 uint8_t *wbuf, *rbuf;
1087 int wlen, wdone = 0, rlen, rdone = 0;
1088 struct arc_fw_bufhdr *bufhdr;
1089 uint32_t reg, rwlen;
1090 int error = 0;
1091 #ifdef ARC_DEBUG
1092 int i;
1093 #endif
1094
1095 wbuf = rbuf = NULL;
1096
1097 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wbuflen: %d rbuflen: %d\n",
1098 device_xname(&sc->sc_dev), wbuflen, rbuflen);
1099
1100 wlen = sizeof(struct arc_fw_bufhdr) + wbuflen + 1; /* 1 for cksum */
1101 wbuf = kmem_alloc(wlen, KM_SLEEP);
1102
1103 rlen = sizeof(struct arc_fw_bufhdr) + rbuflen + 1; /* 1 for cksum */
1104 rbuf = kmem_alloc(rlen, KM_SLEEP);
1105
1106 DNPRINTF(ARC_D_DB, "%s: arc_msgbuf wlen: %d rlen: %d\n",
1107 device_xname(&sc->sc_dev), wlen, rlen);
1108
1109 bufhdr = (struct arc_fw_bufhdr *)wbuf;
1110 bufhdr->hdr = arc_fw_hdr;
1111 bufhdr->len = htole16(wbuflen);
1112 memcpy(wbuf + sizeof(struct arc_fw_bufhdr), wptr, wbuflen);
1113 wbuf[wlen - 1] = arc_msg_cksum(wptr, wbuflen);
1114
1115 arc_lock(sc);
1116 if (arc_read(sc, ARC_REG_OUTB_DOORBELL) != 0) {
1117 error = EBUSY;
1118 goto out;
1119 }
1120
1121 reg = ARC_REG_OUTB_DOORBELL_READ_OK;
1122
1123 do {
1124 if ((reg & ARC_REG_OUTB_DOORBELL_READ_OK) && wdone < wlen) {
1125 memset(rwbuf, 0, sizeof(rwbuf));
1126 rwlen = (wlen - wdone) % sizeof(rwbuf);
1127 memcpy(rwbuf, &wbuf[wdone], rwlen);
1128
1129 #ifdef ARC_DEBUG
1130 if (arcdebug & ARC_D_DB) {
1131 printf("%s: write %d:",
1132 device_xname(&sc->sc_dev), rwlen);
1133 for (i = 0; i < rwlen; i++)
1134 printf(" 0x%02x", rwbuf[i]);
1135 printf("\n");
1136 }
1137 #endif
1138
1139 /* copy the chunk to the hw */
1140 arc_write(sc, ARC_REG_IOC_WBUF_LEN, rwlen);
1141 arc_write_region(sc, ARC_REG_IOC_WBUF, rwbuf,
1142 sizeof(rwbuf));
1143
1144 /* say we have a buffer for the hw */
1145 arc_write(sc, ARC_REG_INB_DOORBELL,
1146 ARC_REG_INB_DOORBELL_WRITE_OK);
1147
1148 wdone += rwlen;
1149 }
1150
1151 while ((reg = arc_read(sc, ARC_REG_OUTB_DOORBELL)) == 0)
1152 arc_wait(sc);
1153
1154 arc_write(sc, ARC_REG_OUTB_DOORBELL, reg);
1155
1156 DNPRINTF(ARC_D_DB, "%s: reg: 0x%08x\n",
1157 device_xname(&sc->sc_dev), reg);
1158
1159 if ((reg & ARC_REG_OUTB_DOORBELL_WRITE_OK) && rdone < rlen) {
1160 rwlen = arc_read(sc, ARC_REG_IOC_RBUF_LEN);
1161 if (rwlen > sizeof(rwbuf)) {
1162 DNPRINTF(ARC_D_DB, "%s: rwlen too big\n",
1163 device_xname(&sc->sc_dev));
1164 error = EIO;
1165 goto out;
1166 }
1167
1168 arc_read_region(sc, ARC_REG_IOC_RBUF, rwbuf,
1169 sizeof(rwbuf));
1170
1171 arc_write(sc, ARC_REG_INB_DOORBELL,
1172 ARC_REG_INB_DOORBELL_READ_OK);
1173
1174 #ifdef ARC_DEBUG
1175 printf("%s: len: %d+%d=%d/%d\n",
1176 device_xname(&sc->sc_dev),
1177 rwlen, rdone, rwlen + rdone, rlen);
1178 if (arcdebug & ARC_D_DB) {
1179 printf("%s: read:",
1180 device_xname(&sc->sc_dev));
1181 for (i = 0; i < rwlen; i++)
1182 printf(" 0x%02x", rwbuf[i]);
1183 printf("\n");
1184 }
1185 #endif
1186
1187 if ((rdone + rwlen) > rlen) {
1188 DNPRINTF(ARC_D_DB, "%s: rwbuf too big\n",
1189 device_xname(&sc->sc_dev));
1190 error = EIO;
1191 goto out;
1192 }
1193
1194 memcpy(&rbuf[rdone], rwbuf, rwlen);
1195 rdone += rwlen;
1196 }
1197 } while (rdone != rlen);
1198
1199 bufhdr = (struct arc_fw_bufhdr *)rbuf;
1200 if (memcmp(&bufhdr->hdr, &arc_fw_hdr, sizeof(bufhdr->hdr)) != 0 ||
1201 bufhdr->len != htole16(rbuflen)) {
1202 DNPRINTF(ARC_D_DB, "%s: rbuf hdr is wrong\n",
1203 device_xname(&sc->sc_dev));
1204 error = EIO;
1205 goto out;
1206 }
1207
1208 memcpy(rptr, rbuf + sizeof(struct arc_fw_bufhdr), rbuflen);
1209
1210 if (rbuf[rlen - 1] != arc_msg_cksum(rptr, rbuflen)) {
1211 DNPRINTF(ARC_D_DB, "%s: invalid cksum\n",
1212 device_xname(&sc->sc_dev));
1213 error = EIO;
1214 goto out;
1215 }
1216
1217 out:
1218 arc_unlock(sc);
1219 kmem_free(wbuf, wlen);
1220 kmem_free(rbuf, rlen);
1221
1222 return error;
1223 }
1224
1225 void
1226 arc_lock(struct arc_softc *sc)
1227 {
1228 int s;
1229
1230 lockmgr(&sc->sc_lock, LK_EXCLUSIVE, 0);
1231 s = splbio();
1232 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1233 sc->sc_talking = 1;
1234 splx(s);
1235 }
1236
1237 void
1238 arc_unlock(struct arc_softc *sc)
1239 {
1240 int s;
1241
1242 s = splbio();
1243 arc_write(sc, ARC_REG_INTRMASK,
1244 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1245 sc->sc_talking = 0;
1246 splx(s);
1247 lockmgr(&sc->sc_lock, LK_RELEASE, 0);
1248 }
1249
1250 void
1251 arc_wait(struct arc_softc *sc)
1252 {
1253 int s;
1254
1255 s = splbio();
1256 arc_write(sc, ARC_REG_INTRMASK,
1257 ~(ARC_REG_INTRMASK_POSTQUEUE|ARC_REG_INTRMASK_DOORBELL));
1258 if (tsleep(sc, PWAIT, "arcdb", hz) == EWOULDBLOCK)
1259 arc_write(sc, ARC_REG_INTRMASK, ~ARC_REG_INTRMASK_POSTQUEUE);
1260 splx(s);
1261 }
1262
1263 uint32_t
1264 arc_read(struct arc_softc *sc, bus_size_t r)
1265 {
1266 uint32_t v;
1267
1268 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1269 BUS_SPACE_BARRIER_READ);
1270 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1271
1272 DNPRINTF(ARC_D_RW, "%s: arc_read 0x%lx 0x%08x\n",
1273 device_xname(&sc->sc_dev), r, v);
1274
1275 return v;
1276 }
1277
1278 void
1279 arc_read_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1280 {
1281 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1282 BUS_SPACE_BARRIER_READ);
1283 bus_space_read_region_4(sc->sc_iot, sc->sc_ioh, r,
1284 (uint32_t *)buf, len >> 2);
1285 }
1286
1287 void
1288 arc_write(struct arc_softc *sc, bus_size_t r, uint32_t v)
1289 {
1290 DNPRINTF(ARC_D_RW, "%s: arc_write 0x%lx 0x%08x\n",
1291 device_xname(&sc->sc_dev), r, v);
1292
1293 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1294 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1295 BUS_SPACE_BARRIER_WRITE);
1296 }
1297
1298 void
1299 arc_write_region(struct arc_softc *sc, bus_size_t r, void *buf, size_t len)
1300 {
1301 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, r,
1302 (const uint32_t *)buf, len >> 2);
1303 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, len,
1304 BUS_SPACE_BARRIER_WRITE);
1305 }
1306
1307 int
1308 arc_wait_eq(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1309 uint32_t target)
1310 {
1311 int i;
1312
1313 DNPRINTF(ARC_D_RW, "%s: arc_wait_eq 0x%lx 0x%08x 0x%08x\n",
1314 device_xname(&sc->sc_dev), r, mask, target);
1315
1316 for (i = 0; i < 10000; i++) {
1317 if ((arc_read(sc, r) & mask) == target)
1318 return 0;
1319 delay(1000);
1320 }
1321
1322 return 1;
1323 }
1324
1325 int
1326 arc_wait_ne(struct arc_softc *sc, bus_size_t r, uint32_t mask,
1327 uint32_t target)
1328 {
1329 int i;
1330
1331 DNPRINTF(ARC_D_RW, "%s: arc_wait_ne 0x%lx 0x%08x 0x%08x\n",
1332 device_xname(&sc->sc_dev), r, mask, target);
1333
1334 for (i = 0; i < 10000; i++) {
1335 if ((arc_read(sc, r) & mask) != target)
1336 return 0;
1337 delay(1000);
1338 }
1339
1340 return 1;
1341 }
1342
1343 int
1344 arc_msg0(struct arc_softc *sc, uint32_t m)
1345 {
1346 /* post message */
1347 arc_write(sc, ARC_REG_INB_MSG0, m);
1348 /* wait for the fw to do it */
1349 if (arc_wait_eq(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0,
1350 ARC_REG_INTRSTAT_MSG0) != 0)
1351 return 1;
1352
1353 /* ack it */
1354 arc_write(sc, ARC_REG_INTRSTAT, ARC_REG_INTRSTAT_MSG0);
1355
1356 return 0;
1357 }
1358
1359 struct arc_dmamem *
1360 arc_dmamem_alloc(struct arc_softc *sc, size_t size)
1361 {
1362 struct arc_dmamem *adm;
1363 int nsegs;
1364
1365 adm = kmem_zalloc(sizeof(*adm), KM_NOSLEEP);
1366 if (adm == NULL)
1367 return NULL;
1368
1369 adm->adm_size = size;
1370
1371 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1372 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1373 goto admfree;
1374
1375 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &adm->adm_seg,
1376 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1377 goto destroy;
1378
1379 if (bus_dmamem_map(sc->sc_dmat, &adm->adm_seg, nsegs, size,
1380 &adm->adm_kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
1381 goto free;
1382
1383 if (bus_dmamap_load(sc->sc_dmat, adm->adm_map, adm->adm_kva, size,
1384 NULL, BUS_DMA_NOWAIT) != 0)
1385 goto unmap;
1386
1387 memset(adm->adm_kva, 0, size);
1388
1389 return adm;
1390
1391 unmap:
1392 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, size);
1393 free:
1394 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1395 destroy:
1396 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1397 admfree:
1398 kmem_free(adm, sizeof(*adm));
1399
1400 return NULL;
1401 }
1402
1403 void
1404 arc_dmamem_free(struct arc_softc *sc, struct arc_dmamem *adm)
1405 {
1406 bus_dmamap_unload(sc->sc_dmat, adm->adm_map);
1407 bus_dmamem_unmap(sc->sc_dmat, adm->adm_kva, adm->adm_size);
1408 bus_dmamem_free(sc->sc_dmat, &adm->adm_seg, 1);
1409 bus_dmamap_destroy(sc->sc_dmat, adm->adm_map);
1410 kmem_free(adm, sizeof(*adm));
1411 }
1412
1413 int
1414 arc_alloc_ccbs(struct arc_softc *sc)
1415 {
1416 struct arc_ccb *ccb;
1417 uint8_t *cmd;
1418 int i;
1419 size_t ccbslen;
1420
1421 TAILQ_INIT(&sc->sc_ccb_free);
1422
1423 ccbslen = sizeof(struct arc_ccb) * sc->sc_req_count;
1424 sc->sc_ccbs = kmem_zalloc(ccbslen, KM_SLEEP);
1425
1426 sc->sc_requests = arc_dmamem_alloc(sc,
1427 ARC_MAX_IOCMDLEN * sc->sc_req_count);
1428 if (sc->sc_requests == NULL) {
1429 aprint_error("%s: unable to allocate ccb dmamem\n",
1430 device_xname(&sc->sc_dev));
1431 goto free_ccbs;
1432 }
1433 cmd = ARC_DMA_KVA(sc->sc_requests);
1434
1435 for (i = 0; i < sc->sc_req_count; i++) {
1436 ccb = &sc->sc_ccbs[i];
1437
1438 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, ARC_SGL_MAXLEN,
1439 MAXPHYS, 0, 0, &ccb->ccb_dmamap) != 0) {
1440 aprint_error("%s: unable to create dmamap for ccb %d\n",
1441 device_xname(&sc->sc_dev), i);
1442 goto free_maps;
1443 }
1444
1445 ccb->ccb_sc = sc;
1446 ccb->ccb_id = i;
1447 ccb->ccb_offset = ARC_MAX_IOCMDLEN * i;
1448
1449 ccb->ccb_cmd = (struct arc_io_cmd *)&cmd[ccb->ccb_offset];
1450 ccb->ccb_cmd_post = (ARC_DMA_DVA(sc->sc_requests) +
1451 ccb->ccb_offset) >> ARC_REG_POST_QUEUE_ADDR_SHIFT;
1452
1453 arc_put_ccb(sc, ccb);
1454 }
1455
1456 return 0;
1457
1458 free_maps:
1459 while ((ccb = arc_get_ccb(sc)) != NULL)
1460 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1461 arc_dmamem_free(sc, sc->sc_requests);
1462
1463 free_ccbs:
1464 kmem_free(sc->sc_ccbs, ccbslen);
1465
1466 return 1;
1467 }
1468
1469 struct arc_ccb *
1470 arc_get_ccb(struct arc_softc *sc)
1471 {
1472 struct arc_ccb *ccb;
1473
1474 ccb = TAILQ_FIRST(&sc->sc_ccb_free);
1475 if (ccb != NULL)
1476 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
1477
1478 return ccb;
1479 }
1480
1481 void
1482 arc_put_ccb(struct arc_softc *sc, struct arc_ccb *ccb)
1483 {
1484 ccb->ccb_xs = NULL;
1485 memset(ccb->ccb_cmd, 0, ARC_MAX_IOCMDLEN);
1486 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
1487 }
1488