fwdev.c revision 1.28.2.1 1 /* $NetBSD: fwdev.c,v 1.28.2.1 2014/08/10 06:54:52 tls Exp $ */
2 /*-
3 * Copyright (c) 2003 Hidetoshi Shimokawa
4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the acknowledgement as bellow:
17 *
18 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 *
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: src/sys/dev/firewire/fwdev.c,v 1.52 2007/06/06 14:31:36 simokawa Exp $
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: fwdev.c,v 1.28.2.1 2014/08/10 06:54:52 tls Exp $");
41
42 #include <sys/param.h>
43 #include <sys/device.h>
44 #include <sys/errno.h>
45 #include <sys/buf.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/poll.h>
52 #include <sys/proc.h>
53 #include <sys/select.h>
54
55 #include <dev/ieee1394/firewire.h>
56 #include <dev/ieee1394/firewirereg.h>
57 #include <dev/ieee1394/fwdma.h>
58 #include <dev/ieee1394/fwmem.h>
59 #include <dev/ieee1394/iec68113.h>
60
61 #include "ioconf.h"
62
63 #define FWNODE_INVAL 0xffff
64
65 dev_type_open(fw_open);
66 dev_type_close(fw_close);
67 dev_type_read(fw_read);
68 dev_type_write(fw_write);
69 dev_type_ioctl(fw_ioctl);
70 dev_type_poll(fw_poll);
71 dev_type_mmap(fw_mmap);
72 dev_type_strategy(fw_strategy);
73
74 const struct bdevsw fw_bdevsw = {
75 .d_open = fw_open,
76 .d_close = fw_close,
77 .d_strategy = fw_strategy,
78 .d_ioctl = fw_ioctl,
79 .d_dump = nodump,
80 .d_psize = nosize,
81 .d_discard = nodiscard,
82 .d_flag = D_OTHER
83 };
84
85 const struct cdevsw fw_cdevsw = {
86 .d_open = fw_open,
87 .d_close = fw_close,
88 .d_read = fw_read,
89 .d_write = fw_write,
90 .d_ioctl = fw_ioctl,
91 .d_stop = nostop,
92 .d_tty = notty,
93 .d_poll = fw_poll,
94 .d_mmap = fw_mmap,
95 .d_kqfilter = nokqfilter,
96 .d_discard = nodiscard,
97 .d_flag = D_OTHER
98 };
99
100 struct fw_drv1 {
101 struct firewire_comm *fc;
102 struct fw_xferq *ir;
103 struct fw_xferq *it;
104 struct fw_isobufreq bufreq;
105 STAILQ_HEAD(, fw_bind) binds;
106 STAILQ_HEAD(, fw_xfer) rq;
107 };
108
109 static int fwdev_allocbuf(struct firewire_comm *, struct fw_xferq *,
110 struct fw_bufspec *);
111 static int fwdev_freebuf(struct fw_xferq *);
112 static int fw_read_async(struct fw_drv1 *, struct uio *, int);
113 static int fw_write_async(struct fw_drv1 *, struct uio *, int);
114 static void fw_hand(struct fw_xfer *);
115
116
117 int
118 fw_open(dev_t dev, int flags, int fmt, struct lwp *td)
119 {
120 struct firewire_softc *sc;
121 struct fw_drv1 *d;
122 int err = 0;
123
124 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
125 if (sc == NULL)
126 return ENXIO;
127
128 if (DEV_FWMEM(dev))
129 return fwmem_open(dev, flags, fmt, td);
130
131 mutex_enter(&sc->fc->fc_mtx);
132 if (sc->si_drv1 != NULL) {
133 mutex_exit(&sc->fc->fc_mtx);
134 return EBUSY;
135 }
136 /* set dummy value for allocation */
137 sc->si_drv1 = (void *)-1;
138 mutex_exit(&sc->fc->fc_mtx);
139
140 sc->si_drv1 = malloc(sizeof(struct fw_drv1), M_FW, M_WAITOK | M_ZERO);
141 if (sc->si_drv1 == NULL)
142 return ENOMEM;
143
144 d = (struct fw_drv1 *)sc->si_drv1;
145 d->fc = sc->fc;
146 STAILQ_INIT(&d->binds);
147 STAILQ_INIT(&d->rq);
148
149 return err;
150 }
151
152 int
153 fw_close(dev_t dev, int flags, int fmt, struct lwp *td)
154 {
155 struct firewire_softc *sc;
156 struct firewire_comm *fc;
157 struct fw_drv1 *d;
158 struct fw_xfer *xfer;
159 struct fw_bind *fwb;
160 int err = 0;
161
162 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
163 if (sc == NULL)
164 return ENXIO;
165
166 if (DEV_FWMEM(dev))
167 return fwmem_close(dev, flags, fmt, td);
168
169 d = (struct fw_drv1 *)sc->si_drv1;
170 fc = d->fc;
171
172 /* remove binding */
173 for (fwb = STAILQ_FIRST(&d->binds); fwb != NULL;
174 fwb = STAILQ_FIRST(&d->binds)) {
175 fw_bindremove(fc, fwb);
176 STAILQ_REMOVE_HEAD(&d->binds, chlist);
177 fw_xferlist_remove(&fwb->xferlist);
178 free(fwb, M_FW);
179 }
180 if (d->ir != NULL) {
181 struct fw_xferq *ir = d->ir;
182
183 if ((ir->flag & FWXFERQ_OPEN) == 0)
184 return EINVAL;
185 if (ir->flag & FWXFERQ_RUNNING) {
186 ir->flag &= ~FWXFERQ_RUNNING;
187 fc->irx_disable(fc, ir->dmach);
188 }
189 /* free extbuf */
190 fwdev_freebuf(ir);
191 /* drain receiving buffer */
192 for (xfer = STAILQ_FIRST(&ir->q); xfer != NULL;
193 xfer = STAILQ_FIRST(&ir->q)) {
194 ir->queued--;
195 STAILQ_REMOVE_HEAD(&ir->q, link);
196
197 xfer->resp = 0;
198 fw_xfer_done(xfer);
199 }
200 ir->flag &=
201 ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
202 d->ir = NULL;
203
204 }
205 if (d->it != NULL) {
206 struct fw_xferq *it = d->it;
207
208 if ((it->flag & FWXFERQ_OPEN) == 0)
209 return EINVAL;
210 if (it->flag & FWXFERQ_RUNNING) {
211 it->flag &= ~FWXFERQ_RUNNING;
212 fc->itx_disable(fc, it->dmach);
213 }
214 /* free extbuf */
215 fwdev_freebuf(it);
216 it->flag &=
217 ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
218 d->it = NULL;
219 }
220 free(sc->si_drv1, M_FW);
221 sc->si_drv1 = NULL;
222
223 return err;
224 }
225
226 int
227 fw_read(dev_t dev, struct uio *uio, int ioflag)
228 {
229 struct firewire_softc *sc;
230 struct firewire_comm *fc;
231 struct fw_drv1 *d;
232 struct fw_xferq *ir;
233 struct fw_pkt *fp;
234 int err = 0, slept = 0;
235
236 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
237 if (sc == NULL)
238 return ENXIO;
239
240 if (DEV_FWMEM(dev))
241 return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
242
243 d = (struct fw_drv1 *)sc->si_drv1;
244 fc = d->fc;
245 ir = d->ir;
246
247 if (ir == NULL)
248 return fw_read_async(d, uio, ioflag);
249
250 if (ir->buf == NULL)
251 return EIO;
252
253 mutex_enter(&fc->fc_mtx);
254 readloop:
255 if (ir->stproc == NULL) {
256 /* iso bulkxfer */
257 ir->stproc = STAILQ_FIRST(&ir->stvalid);
258 if (ir->stproc != NULL) {
259 STAILQ_REMOVE_HEAD(&ir->stvalid, link);
260 ir->queued = 0;
261 }
262 }
263 if (ir->stproc == NULL) {
264 /* no data avaliable */
265 if (slept == 0) {
266 slept = 1;
267 ir->flag |= FWXFERQ_WAKEUP;
268 mutex_exit(&fc->fc_mtx);
269 err = tsleep(ir, FWPRI, "fw_read", hz);
270 mutex_enter(&fc->fc_mtx);
271 ir->flag &= ~FWXFERQ_WAKEUP;
272 if (err == 0)
273 goto readloop;
274 } else if (slept == 1)
275 err = EIO;
276 mutex_exit(&fc->fc_mtx);
277 return err;
278 } else if (ir->stproc != NULL) {
279 /* iso bulkxfer */
280 mutex_exit(&fc->fc_mtx);
281 fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
282 ir->stproc->poffset + ir->queued);
283 if (fc->irx_post != NULL)
284 fc->irx_post(fc, fp->mode.ld);
285 if (fp->mode.stream.len == 0)
286 return EIO;
287 err = uiomove((void *)fp,
288 fp->mode.stream.len + sizeof(uint32_t), uio);
289 ir->queued++;
290 if (ir->queued >= ir->bnpacket) {
291 STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
292 fc->irx_enable(fc, ir->dmach);
293 ir->stproc = NULL;
294 }
295 if (uio->uio_resid >= ir->psize) {
296 slept = -1;
297 mutex_enter(&fc->fc_mtx);
298 goto readloop;
299 }
300 } else
301 mutex_exit(&fc->fc_mtx);
302 return err;
303 }
304
305 int
306 fw_write(dev_t dev, struct uio *uio, int ioflag)
307 {
308 struct firewire_softc *sc;
309 struct firewire_comm *fc;
310 struct fw_drv1 *d;
311 struct fw_pkt *fp;
312 struct fw_xferq *it;
313 int slept = 0, err = 0;
314
315 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
316 if (sc == NULL)
317 return ENXIO;
318
319 if (DEV_FWMEM(dev))
320 return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
321
322 d = (struct fw_drv1 *)sc->si_drv1;
323 fc = d->fc;
324 it = d->it;
325
326 if (it == NULL)
327 return fw_write_async(d, uio, ioflag);
328
329 if (it->buf == NULL)
330 return EIO;
331
332 mutex_enter(&fc->fc_mtx);
333 isoloop:
334 if (it->stproc == NULL) {
335 it->stproc = STAILQ_FIRST(&it->stfree);
336 if (it->stproc != NULL) {
337 STAILQ_REMOVE_HEAD(&it->stfree, link);
338 it->queued = 0;
339 } else if (slept == 0) {
340 slept = 1;
341 #if 0 /* XXX to avoid lock recursion */
342 err = fc->itx_enable(fc, it->dmach);
343 if (err)
344 goto out;
345 #endif
346 mutex_exit(&fc->fc_mtx);
347 err = tsleep(it, FWPRI, "fw_write", hz);
348 mutex_enter(&fc->fc_mtx);
349 if (err)
350 goto out;
351 goto isoloop;
352 } else {
353 err = EIO;
354 goto out;
355 }
356 }
357 mutex_exit(&fc->fc_mtx);
358 fp = (struct fw_pkt *)fwdma_v_addr(it->buf,
359 it->stproc->poffset + it->queued);
360 err = uiomove((void *)fp, sizeof(struct fw_isohdr), uio);
361 if (err != 0)
362 return err;
363 err =
364 uiomove((void *)fp->mode.stream.payload, fp->mode.stream.len, uio);
365 it->queued++;
366 if (it->queued >= it->bnpacket) {
367 STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link);
368 it->stproc = NULL;
369 err = fc->itx_enable(fc, it->dmach);
370 }
371 if (uio->uio_resid >= sizeof(struct fw_isohdr)) {
372 slept = 0;
373 mutex_enter(&fc->fc_mtx);
374 goto isoloop;
375 }
376 return err;
377
378 out:
379 mutex_exit(&fc->fc_mtx);
380 return err;
381 }
382
383 int
384 fw_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *td)
385 {
386 struct firewire_softc *sc;
387 struct firewire_comm *fc;
388 struct fw_drv1 *d;
389 struct fw_device *fwdev;
390 struct fw_bind *fwb;
391 struct fw_xferq *ir, *it;
392 struct fw_xfer *xfer;
393 struct fw_pkt *fp;
394 struct fw_devinfo *devinfo;
395 struct fw_devlstreq *fwdevlst = (struct fw_devlstreq *)data;
396 struct fw_asyreq *asyreq = (struct fw_asyreq *)data;
397 struct fw_isochreq *ichreq = (struct fw_isochreq *)data;
398 struct fw_isobufreq *ibufreq = (struct fw_isobufreq *)data;
399 struct fw_asybindreq *bindreq = (struct fw_asybindreq *)data;
400 struct fw_crom_buf *crom_buf = (struct fw_crom_buf *)data;
401 int i, len, err = 0;
402 void *ptr;
403
404 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
405 if (sc == NULL)
406 return ENXIO;
407
408 if (DEV_FWMEM(dev))
409 return fwmem_ioctl(dev, cmd, data, flag, td);
410
411 if (!data)
412 return EINVAL;
413
414 d = (struct fw_drv1 *)sc->si_drv1;
415 fc = d->fc;
416 ir = d->ir;
417 it = d->it;
418
419 switch (cmd) {
420 case FW_STSTREAM:
421 if (it == NULL) {
422 i = fw_open_isodma(fc, /* tx */1);
423 if (i < 0) {
424 err = EBUSY;
425 break;
426 }
427 it = fc->it[i];
428 err = fwdev_allocbuf(fc, it, &d->bufreq.tx);
429 if (err) {
430 it->flag &= ~FWXFERQ_OPEN;
431 break;
432 }
433 }
434 it->flag &= ~0xff;
435 it->flag |= (0x3f & ichreq->ch);
436 it->flag |= ((0x3 & ichreq->tag) << 6);
437 d->it = it;
438 break;
439
440 case FW_GTSTREAM:
441 if (it != NULL) {
442 ichreq->ch = it->flag & 0x3f;
443 ichreq->tag = it->flag >> 2 & 0x3;
444 } else
445 err = EINVAL;
446 break;
447
448 case FW_SRSTREAM:
449 if (ir == NULL) {
450 i = fw_open_isodma(fc, /* tx */0);
451 if (i < 0) {
452 err = EBUSY;
453 break;
454 }
455 ir = fc->ir[i];
456 err = fwdev_allocbuf(fc, ir, &d->bufreq.rx);
457 if (err) {
458 ir->flag &= ~FWXFERQ_OPEN;
459 break;
460 }
461 }
462 ir->flag &= ~0xff;
463 ir->flag |= (0x3f & ichreq->ch);
464 ir->flag |= ((0x3 & ichreq->tag) << 6);
465 d->ir = ir;
466 err = fc->irx_enable(fc, ir->dmach);
467 break;
468
469 case FW_GRSTREAM:
470 if (d->ir != NULL) {
471 ichreq->ch = ir->flag & 0x3f;
472 ichreq->tag = ir->flag >> 2 & 0x3;
473 } else
474 err = EINVAL;
475 break;
476
477 case FW_SSTBUF:
478 memcpy(&d->bufreq, ibufreq, sizeof(d->bufreq));
479 break;
480
481 case FW_GSTBUF:
482 memset(&ibufreq->rx, 0, sizeof(ibufreq->rx));
483 if (ir != NULL) {
484 ibufreq->rx.nchunk = ir->bnchunk;
485 ibufreq->rx.npacket = ir->bnpacket;
486 ibufreq->rx.psize = ir->psize;
487 }
488 memset(&ibufreq->tx, 0, sizeof(ibufreq->tx));
489 if (it != NULL) {
490 ibufreq->tx.nchunk = it->bnchunk;
491 ibufreq->tx.npacket = it->bnpacket;
492 ibufreq->tx.psize = it->psize;
493 }
494 break;
495
496 case FW_ASYREQ:
497 {
498 const struct tcode_info *tinfo;
499 int pay_len = 0;
500
501 fp = &asyreq->pkt;
502 tinfo = &fc->tcode[fp->mode.hdr.tcode];
503
504 if ((tinfo->flag & FWTI_BLOCK_ASY) != 0)
505 pay_len = MAX(0, asyreq->req.len - tinfo->hdr_len);
506
507 xfer = fw_xfer_alloc_buf(M_FW, pay_len, PAGE_SIZE/*XXX*/);
508 if (xfer == NULL)
509 return ENOMEM;
510
511 switch (asyreq->req.type) {
512 case FWASREQNODE:
513 break;
514
515 case FWASREQEUI:
516 fwdev = fw_noderesolve_eui64(fc, &asyreq->req.dst.eui);
517 if (fwdev == NULL) {
518 aprint_error_dev(fc->bdev,
519 "cannot find node\n");
520 err = EINVAL;
521 goto out;
522 }
523 fp->mode.hdr.dst = FWLOCALBUS | fwdev->dst;
524 break;
525
526 case FWASRESTL:
527 /* XXX what's this? */
528 break;
529
530 case FWASREQSTREAM:
531 /* nothing to do */
532 break;
533 }
534
535 memcpy(&xfer->send.hdr, fp, tinfo->hdr_len);
536 if (pay_len > 0)
537 memcpy(xfer->send.payload, (char *)fp + tinfo->hdr_len,
538 pay_len);
539 xfer->send.spd = asyreq->req.sped;
540 xfer->hand = fw_xferwake;
541
542 if ((err = fw_asyreq(fc, -1, xfer)) != 0)
543 goto out;
544 if ((err = fw_xferwait(xfer)) != 0)
545 goto out;
546 if (xfer->resp != 0) {
547 err = EIO;
548 goto out;
549 }
550 if ((tinfo->flag & FWTI_TLABEL) == 0)
551 goto out;
552
553 /* copy response */
554 tinfo = &fc->tcode[xfer->recv.hdr.mode.hdr.tcode];
555 if (xfer->recv.hdr.mode.hdr.tcode == FWTCODE_RRESB ||
556 xfer->recv.hdr.mode.hdr.tcode == FWTCODE_LRES) {
557 pay_len = xfer->recv.pay_len;
558 if (asyreq->req.len >=
559 xfer->recv.pay_len + tinfo->hdr_len)
560 asyreq->req.len =
561 xfer->recv.pay_len + tinfo->hdr_len;
562 else {
563 err = EINVAL;
564 pay_len = 0;
565 }
566 } else
567 pay_len = 0;
568 memcpy(fp, &xfer->recv.hdr, tinfo->hdr_len);
569 memcpy((char *)fp + tinfo->hdr_len, xfer->recv.payload,
570 pay_len);
571 out:
572 fw_xfer_free_buf(xfer);
573 break;
574 }
575
576 case FW_IBUSRST:
577 fc->ibr(fc);
578 break;
579
580 case FW_CBINDADDR:
581 fwb = fw_bindlookup(fc, bindreq->start.hi, bindreq->start.lo);
582 if (fwb == NULL) {
583 err = EINVAL;
584 break;
585 }
586 fw_bindremove(fc, fwb);
587 STAILQ_REMOVE(&d->binds, fwb, fw_bind, chlist);
588 fw_xferlist_remove(&fwb->xferlist);
589 free(fwb, M_FW);
590 break;
591
592 case FW_SBINDADDR:
593 if (bindreq->len <= 0 ) {
594 err = EINVAL;
595 break;
596 }
597 if (bindreq->start.hi > 0xffff ) {
598 err = EINVAL;
599 break;
600 }
601 fwb = (struct fw_bind *)malloc(sizeof(struct fw_bind),
602 M_FW, M_WAITOK);
603 if (fwb == NULL) {
604 err = ENOMEM;
605 break;
606 }
607 fwb->start = ((u_int64_t)bindreq->start.hi << 32) |
608 bindreq->start.lo;
609 fwb->end = fwb->start + bindreq->len;
610 fwb->sc = (void *)d;
611 STAILQ_INIT(&fwb->xferlist);
612 err = fw_bindadd(fc, fwb);
613 if (err == 0) {
614 fw_xferlist_add(&fwb->xferlist, M_FW,
615 /* XXX */
616 PAGE_SIZE, PAGE_SIZE, 5, fc, (void *)fwb, fw_hand);
617 STAILQ_INSERT_TAIL(&d->binds, fwb, chlist);
618 }
619 break;
620
621 case FW_GDEVLST:
622 i = len = 1;
623 /* myself */
624 devinfo = fwdevlst->dev;
625 devinfo->dst = fc->nodeid;
626 devinfo->status = 0; /* XXX */
627 devinfo->eui.hi = fc->eui.hi;
628 devinfo->eui.lo = fc->eui.lo;
629 STAILQ_FOREACH(fwdev, &fc->devices, link) {
630 if (len < FW_MAX_DEVLST) {
631 devinfo = &fwdevlst->dev[len++];
632 devinfo->dst = fwdev->dst;
633 devinfo->status =
634 (fwdev->status == FWDEVINVAL) ? 0 : 1;
635 devinfo->eui.hi = fwdev->eui.hi;
636 devinfo->eui.lo = fwdev->eui.lo;
637 }
638 i++;
639 }
640 fwdevlst->n = i;
641 fwdevlst->info_len = len;
642 break;
643
644 case FW_GTPMAP:
645 memcpy(data, fc->topology_map,
646 (fc->topology_map->crc_len + 1) * 4);
647 break;
648
649 case FW_GCROM:
650 STAILQ_FOREACH(fwdev, &fc->devices, link)
651 if (FW_EUI64_EQUAL(fwdev->eui, crom_buf->eui))
652 break;
653 if (fwdev == NULL) {
654 if (!FW_EUI64_EQUAL(fc->eui, crom_buf->eui)) {
655 err = FWNODE_INVAL;
656 break;
657 }
658 /* myself */
659 ptr = malloc(CROMSIZE, M_FW, M_WAITOK);
660 len = CROMSIZE;
661 for (i = 0; i < CROMSIZE/4; i++)
662 ((uint32_t *)ptr)[i] = ntohl(fc->config_rom[i]);
663 } else {
664 /* found */
665 ptr = (void *)fwdev->csrrom;
666 if (fwdev->rommax < CSRROMOFF)
667 len = 0;
668 else
669 len = fwdev->rommax - CSRROMOFF + 4;
670 }
671 if (crom_buf->len < len)
672 len = crom_buf->len;
673 else
674 crom_buf->len = len;
675 err = copyout(ptr, crom_buf->ptr, len);
676 if (fwdev == NULL)
677 /* myself */
678 free(ptr, M_FW);
679 break;
680
681 default:
682 fc->ioctl(dev, cmd, data, flag, td);
683 break;
684 }
685 return err;
686 }
687
688 int
689 fw_poll(dev_t dev, int events, struct lwp *td)
690 {
691 struct firewire_softc *sc;
692 struct fw_xferq *ir;
693 int revents, tmp;
694
695 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
696 if (sc == NULL)
697 return ENXIO;
698
699 ir = ((struct fw_drv1 *)sc->si_drv1)->ir;
700 revents = 0;
701 tmp = POLLIN | POLLRDNORM;
702 if (events & tmp) {
703 if (STAILQ_FIRST(&ir->q) != NULL)
704 revents |= tmp;
705 else
706 selrecord(td, &ir->rsel);
707 }
708 tmp = POLLOUT | POLLWRNORM;
709 if (events & tmp)
710 /* XXX should be fixed */
711 revents |= tmp;
712
713 return revents;
714 }
715
716 paddr_t
717 fw_mmap(dev_t dev, off_t offset, int nproto)
718 {
719 struct firewire_softc *sc;
720
721 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
722 if (sc == NULL)
723 return ENXIO;
724
725 return EINVAL;
726 }
727
728 void
729 fw_strategy(struct bio *bp)
730 {
731 struct firewire_softc *sc;
732 dev_t dev = bp->bio_dev;
733
734 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
735 if (sc == NULL)
736 return;
737
738 if (DEV_FWMEM(dev)) {
739 fwmem_strategy(bp);
740 return;
741 }
742
743 bp->bio_error = EOPNOTSUPP;
744 bp->bio_resid = bp->bio_bcount;
745 biodone(bp);
746 }
747
748
749 static int
750 fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
751 struct fw_bufspec *b)
752 {
753 int i;
754
755 if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF))
756 return EBUSY;
757
758 q->bulkxfer =
759 (struct fw_bulkxfer *)malloc(sizeof(struct fw_bulkxfer) * b->nchunk,
760 M_FW, M_WAITOK);
761 if (q->bulkxfer == NULL)
762 return ENOMEM;
763
764 b->psize = roundup2(b->psize, sizeof(uint32_t));
765 q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t), b->psize,
766 b->nchunk * b->npacket, BUS_DMA_WAITOK);
767
768 if (q->buf == NULL) {
769 free(q->bulkxfer, M_FW);
770 q->bulkxfer = NULL;
771 return ENOMEM;
772 }
773 q->bnchunk = b->nchunk;
774 q->bnpacket = b->npacket;
775 q->psize = (b->psize + 3) & ~3;
776 q->queued = 0;
777
778 STAILQ_INIT(&q->stvalid);
779 STAILQ_INIT(&q->stfree);
780 STAILQ_INIT(&q->stdma);
781 q->stproc = NULL;
782
783 for (i = 0 ; i < q->bnchunk; i++) {
784 q->bulkxfer[i].poffset = i * q->bnpacket;
785 q->bulkxfer[i].mbuf = NULL;
786 STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link);
787 }
788
789 q->flag &= ~FWXFERQ_MODEMASK;
790 q->flag |= FWXFERQ_STREAM;
791 q->flag |= FWXFERQ_EXTBUF;
792
793 return 0;
794 }
795
796 static int
797 fwdev_freebuf(struct fw_xferq *q)
798 {
799
800 if (q->flag & FWXFERQ_EXTBUF) {
801 if (q->buf != NULL)
802 fwdma_free_multiseg(q->buf);
803 q->buf = NULL;
804 free(q->bulkxfer, M_FW);
805 q->bulkxfer = NULL;
806 q->flag &= ~FWXFERQ_EXTBUF;
807 q->psize = 0;
808 q->maxq = FWMAXQUEUE;
809 }
810 return 0;
811 }
812
813 static int
814 fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
815 {
816 struct fw_xfer *xfer;
817 struct fw_bind *fwb;
818 struct fw_pkt *fp;
819 const struct tcode_info *tinfo;
820 int err = 0;
821
822 mutex_enter(&d->fc->fc_mtx);
823
824 for (;;) {
825 xfer = STAILQ_FIRST(&d->rq);
826 if (xfer == NULL && err == 0) {
827 mutex_exit(&d->fc->fc_mtx);
828 err = tsleep(&d->rq, FWPRI, "fwra", 0);
829 if (err != 0)
830 return err;
831 mutex_enter(&d->fc->fc_mtx);
832 continue;
833 }
834 break;
835 }
836
837 STAILQ_REMOVE_HEAD(&d->rq, link);
838 mutex_exit(&d->fc->fc_mtx);
839 fp = &xfer->recv.hdr;
840 #if 0 /* for GASP ?? */
841 if (fc->irx_post != NULL)
842 fc->irx_post(fc, fp->mode.ld);
843 #endif
844 tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
845 err = uiomove((void *)fp, tinfo->hdr_len, uio);
846 if (err)
847 goto out;
848 err = uiomove((void *)xfer->recv.payload, xfer->recv.pay_len, uio);
849
850 out:
851 /* recycle this xfer */
852 fwb = (struct fw_bind *)xfer->sc;
853 fw_xfer_unload(xfer);
854 xfer->recv.pay_len = PAGE_SIZE;
855 mutex_enter(&d->fc->fc_mtx);
856 STAILQ_INSERT_TAIL(&fwb->xferlist, xfer, link);
857 mutex_exit(&d->fc->fc_mtx);
858 return err;
859 }
860
861 static int
862 fw_write_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
863 {
864 struct fw_xfer *xfer;
865 struct fw_pkt pkt;
866 const struct tcode_info *tinfo;
867 int err;
868
869 memset(&pkt, 0, sizeof(struct fw_pkt));
870 if ((err = uiomove((void *)&pkt, sizeof(uint32_t), uio)))
871 return err;
872 tinfo = &d->fc->tcode[pkt.mode.hdr.tcode];
873 if ((err = uiomove((char *)&pkt + sizeof(uint32_t),
874 tinfo->hdr_len - sizeof(uint32_t), uio)))
875 return err;
876
877 if ((xfer = fw_xfer_alloc_buf(M_FW, uio->uio_resid,
878 PAGE_SIZE/*XXX*/)) == NULL)
879 return ENOMEM;
880
881 memcpy(&xfer->send.hdr, &pkt, sizeof(struct fw_pkt));
882 xfer->send.pay_len = uio->uio_resid;
883 if (uio->uio_resid > 0) {
884 if ((err =
885 uiomove((void *)xfer->send.payload, uio->uio_resid, uio)))
886 goto out;
887 }
888
889 xfer->fc = d->fc;
890 xfer->sc = NULL;
891 xfer->hand = fw_xferwake;
892 xfer->send.spd = 2 /* XXX */;
893
894 if ((err = fw_asyreq(xfer->fc, -1, xfer)))
895 goto out;
896
897 if ((err = fw_xferwait(xfer)))
898 goto out;
899
900 if (xfer->resp != 0) {
901 err = xfer->resp;
902 goto out;
903 }
904
905 if (xfer->flag == FWXF_RCVD) {
906 mutex_enter(&xfer->fc->fc_mtx);
907 STAILQ_INSERT_TAIL(&d->rq, xfer, link);
908 mutex_exit(&xfer->fc->fc_mtx);
909 return 0;
910 }
911
912 out:
913 fw_xfer_free(xfer);
914 return err;
915 }
916
917 static void
918 fw_hand(struct fw_xfer *xfer)
919 {
920 struct fw_bind *fwb;
921 struct fw_drv1 *d;
922
923 fwb = (struct fw_bind *)xfer->sc;
924 d = (struct fw_drv1 *)fwb->sc;
925 mutex_enter(&xfer->fc->fc_mtx);
926 STAILQ_INSERT_TAIL(&d->rq, xfer, link);
927 mutex_exit(&xfer->fc->fc_mtx);
928 wakeup(&d->rq);
929 }
930