pxa2x0_i2s.c revision 1.11 1 /* $NetBSD: pxa2x0_i2s.c,v 1.11 2012/01/15 10:59:50 nonaka Exp $ */
2 /* $OpenBSD: pxa2x0_i2s.c,v 1.7 2006/04/04 11:45:40 pascoe Exp $ */
3
4 /*
5 * Copyright (c) 2005 Christopher Pascoe <pascoe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_i2s.c,v 1.11 2012/01/15 10:59:50 nonaka Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/kmem.h>
27 #include <sys/bus.h>
28
29 #include <arm/xscale/pxa2x0reg.h>
30 #include <arm/xscale/pxa2x0var.h>
31 #include <arm/xscale/pxa2x0_gpio.h>
32 #include <arm/xscale/pxa2x0_i2s.h>
33 #include <arm/xscale/pxa2x0_dmac.h>
34
35 struct pxa2x0_i2s_dma {
36 struct pxa2x0_i2s_dma *next;
37 void *addr;
38 size_t size;
39 bus_dmamap_t map;
40 #define I2S_N_SEGS 1
41 bus_dma_segment_t segs[I2S_N_SEGS];
42 int nsegs;
43 struct dmac_xfer *dx;
44 };
45
46 static void pxa2x0_i2s_dmac_ointr(struct dmac_xfer *, int);
47 static void pxa2x0_i2s_dmac_iintr(struct dmac_xfer *, int);
48
49 void
50 pxa2x0_i2s_init(struct pxa2x0_i2s_softc *sc)
51 {
52
53 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0, SACR0_RST);
54 delay(100);
55 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
56 SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7));
57 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR1, 0);
58 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, 0);
59 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
60 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
61 SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7) | SACR0_ENB);
62 }
63
64 int
65 pxa2x0_i2s_attach_sub(struct pxa2x0_i2s_softc *sc)
66 {
67 int rv;
68
69 KASSERT(sc->sc_intr_lock != NULL);
70
71 rv = bus_space_map(sc->sc_iot, PXA2X0_I2S_BASE, PXA2X0_I2S_SIZE, 0,
72 &sc->sc_ioh);
73 if (rv) {
74 sc->sc_size = 0;
75 return 1;
76 }
77
78 sc->sc_dr.ds_addr = PXA2X0_I2S_BASE + I2S_SADR;
79 sc->sc_dr.ds_len = 4;
80
81 sc->sc_sadiv = SADIV_3_058MHz;
82
83 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_size,
84 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
85
86 pxa2x0_i2s_init(sc);
87
88 return 0;
89 }
90
91 void
92 pxa2x0_i2s_open(struct pxa2x0_i2s_softc *sc)
93 {
94
95 if (sc->sc_open++ == 0) {
96 pxa2x0_clkman_config(CKEN_I2S, 1);
97 }
98 }
99
100 void
101 pxa2x0_i2s_close(struct pxa2x0_i2s_softc *sc)
102 {
103
104 if (--sc->sc_open == 0) {
105 pxa2x0_clkman_config(CKEN_I2S, 0);
106 }
107 }
108
109 int
110 pxa2x0_i2s_detach_sub(struct pxa2x0_i2s_softc *sc)
111 {
112
113 if (sc->sc_size > 0) {
114 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
115 sc->sc_size = 0;
116 }
117 pxa2x0_clkman_config(CKEN_I2S, 0);
118
119 return 0;
120 }
121
122 void
123 pxa2x0_i2s_write(struct pxa2x0_i2s_softc *sc, uint32_t data)
124 {
125
126 if (sc->sc_open == 0)
127 return;
128
129 /* Clear intr and underrun bit if set. */
130 if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TUR)
131 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SAICR, SAICR_TUR);
132
133 /* Wait for transmit fifo to have space. */
134 while ((bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TNF)
135 == 0)
136 continue; /* nothing */
137
138 /* Queue data */
139 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, data);
140 }
141
142 void
143 pxa2x0_i2s_setspeed(struct pxa2x0_i2s_softc *sc, u_int *argp)
144 {
145 /*
146 * The available speeds are in the following table.
147 * Keep the speeds in increasing order.
148 */
149 static const struct speed_struct {
150 int speed;
151 int div;
152 } speed_table[] = {
153 {8000, SADIV_513_25kHz},
154 {11025, SADIV_702_75kHz},
155 {16000, SADIV_1_026MHz},
156 {22050, SADIV_1_405MHz},
157 {44100, SADIV_2_836MHz},
158 {48000, SADIV_3_058MHz},
159 };
160 const int n = (int)__arraycount(speed_table);
161 u_int arg = (u_int)*argp;
162 int selected = -1;
163 int i;
164
165 if (arg < speed_table[0].speed)
166 selected = 0;
167 if (arg > speed_table[n - 1].speed)
168 selected = n - 1;
169
170 for (i = 1; selected == -1 && i < n; i++) {
171 if (speed_table[i].speed == arg)
172 selected = i;
173 else if (speed_table[i].speed > arg) {
174 int diff1, diff2;
175
176 diff1 = arg - speed_table[i - 1].speed;
177 diff2 = speed_table[i].speed - arg;
178 if (diff1 < diff2)
179 selected = i - 1;
180 else
181 selected = i;
182 }
183 }
184
185 if (selected == -1)
186 selected = 0;
187
188 *argp = speed_table[selected].speed;
189
190 sc->sc_sadiv = speed_table[selected].div;
191 bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
192 }
193
194 void *
195 pxa2x0_i2s_allocm(void *hdl, int direction, size_t size)
196 {
197 struct pxa2x0_i2s_softc *sc = hdl;
198 struct pxa2x0_i2s_dma *p;
199 struct dmac_xfer *dx;
200 int error;
201
202 p = kmem_alloc(sizeof(*p), KM_SLEEP);
203 if (p == NULL)
204 return NULL;
205
206 dx = pxa2x0_dmac_allocate_xfer();
207 if (dx == NULL) {
208 goto fail_alloc;
209 }
210 p->dx = dx;
211
212 p->size = size;
213 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0, p->segs,
214 I2S_N_SEGS, &p->nsegs, BUS_DMA_WAITOK)) != 0) {
215 goto fail_xfer;
216 }
217
218 if ((error = bus_dmamem_map(sc->sc_dmat, p->segs, p->nsegs, size,
219 &p->addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT)) != 0) {
220 goto fail_map;
221 }
222
223 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
224 BUS_DMA_WAITOK, &p->map)) != 0) {
225 goto fail_create;
226 }
227
228 if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
229 BUS_DMA_WAITOK)) != 0) {
230 goto fail_load;
231 }
232
233 dx->dx_cookie = sc;
234 dx->dx_priority = DMAC_PRIORITY_NORMAL;
235 dx->dx_dev_width = DMAC_DEV_WIDTH_4;
236 dx->dx_burst_size = DMAC_BURST_SIZE_32;
237
238 p->next = sc->sc_dmas;
239 sc->sc_dmas = p;
240
241 return p->addr;
242
243 fail_load:
244 bus_dmamap_destroy(sc->sc_dmat, p->map);
245 fail_create:
246 bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
247 fail_map:
248 bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
249 fail_xfer:
250 pxa2x0_dmac_free_xfer(dx);
251 fail_alloc:
252 kmem_free(p, sizeof(*p));
253 return NULL;
254 }
255
256 void
257 pxa2x0_i2s_freem(void *hdl, void *ptr, size_t size)
258 {
259 struct pxa2x0_i2s_softc *sc = hdl;
260 struct pxa2x0_i2s_dma **pp, *p;
261
262 for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
263 if (p->addr == ptr) {
264 pxa2x0_dmac_abort_xfer(p->dx);
265 pxa2x0_dmac_free_xfer(p->dx);
266 p->segs[0].ds_len = p->size; /* XXX */
267 bus_dmamap_unload(sc->sc_dmat, p->map);
268 bus_dmamap_destroy(sc->sc_dmat, p->map);
269 bus_dmamem_unmap(sc->sc_dmat, p->addr, p->size);
270 bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
271
272 *pp = p->next;
273 kmem_free(p, sizeof(*p));
274 return;
275 }
276 }
277 panic("pxa2x0_i2s_freem: trying to free unallocated memory");
278 }
279
280 paddr_t
281 pxa2x0_i2s_mappage(void *hdl, void *mem, off_t off, int prot)
282 {
283 struct pxa2x0_i2s_softc *sc = hdl;
284 struct pxa2x0_i2s_dma *p;
285
286 if (off < 0)
287 return -1;
288
289 for (p = sc->sc_dmas; p && p->addr != mem; p = p->next)
290 continue;
291 if (p == NULL)
292 return -1;
293
294 if (off > p->size)
295 return -1;
296
297 return bus_dmamem_mmap(sc->sc_dmat, p->segs, p->nsegs, off, prot,
298 BUS_DMA_WAITOK);
299 }
300
301 int
302 pxa2x0_i2s_round_blocksize(void *hdl, int bs, int mode,
303 const struct audio_params *param)
304 {
305
306 /* Enforce individual DMA block size limit */
307 if (bs > DCMD_LENGTH_MASK)
308 return (DCMD_LENGTH_MASK & ~0x07);
309
310 return (bs + 0x07) & ~0x07; /* XXX: 64-bit multiples */
311 }
312
313 size_t
314 pxa2x0_i2s_round_buffersize(void *hdl, int direction, size_t bufsize)
315 {
316
317 return bufsize;
318 }
319
320 int
321 pxa2x0_i2s_halt_output(void *hdl)
322 {
323 struct pxa2x0_i2s_softc *sc = hdl;
324
325 if (sc->sc_txdma) {
326 pxa2x0_dmac_abort_xfer(sc->sc_txdma->dx);
327 sc->sc_txdma = NULL;
328 }
329
330 return 0;
331 }
332
333 int
334 pxa2x0_i2s_halt_input(void *hdl)
335 {
336 struct pxa2x0_i2s_softc *sc = hdl;
337
338 if (sc->sc_rxdma) {
339 pxa2x0_dmac_abort_xfer(sc->sc_rxdma->dx);
340 sc->sc_rxdma = NULL;
341 }
342
343 return 0;
344 }
345
346 int
347 pxa2x0_i2s_start_output(void *hdl, void *block, int bsize,
348 void (*tx_func)(void *), void *tx_arg)
349 {
350 struct pxa2x0_i2s_softc *sc = hdl;
351 struct pxa2x0_i2s_dma *p;
352 struct dmac_xfer *dx;
353
354 if (sc->sc_txdma)
355 return EBUSY;
356
357 /* Find mapping which contains block completely */
358 for (p = sc->sc_dmas;
359 p != NULL &&
360 (((char*)block < (char *)p->addr) ||
361 ((char *)block + bsize > (char *)p->addr + p->size));
362 p = p->next) {
363 continue; /* Nothing */
364 }
365 if (p == NULL) {
366 aprint_error("pxa2x0_i2s_start_output: "
367 "request with bad start address: %p, size: %d\n",
368 block, bsize);
369 return ENXIO;
370 }
371 sc->sc_txdma = p;
372
373 p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
374 ((char *)block - (char *)p->addr);
375 p->segs[0].ds_len = bsize;
376
377 dx = p->dx;
378 dx->dx_done = pxa2x0_i2s_dmac_ointr;
379 dx->dx_peripheral = DMAC_PERIPH_I2STX;
380 dx->dx_flow = DMAC_FLOW_CTRL_DEST;
381 dx->dx_loop_notify = DMAC_DONT_LOOP;
382 dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = false;
383 dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = p->nsegs;
384 dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = p->segs;
385 dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = true;
386 dx->dx_desc[DMAC_DESC_DST].xd_nsegs = 1;
387 dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = &sc->sc_dr;
388
389 sc->sc_txfunc = tx_func;
390 sc->sc_txarg = tx_arg;
391
392 /* Start DMA */
393 return pxa2x0_dmac_start_xfer(dx);
394 }
395
396 int
397 pxa2x0_i2s_start_input(void *hdl, void *block, int bsize,
398 void (*rx_func)(void *), void *rx_arg)
399 {
400 struct pxa2x0_i2s_softc *sc = hdl;
401 struct pxa2x0_i2s_dma *p;
402 struct dmac_xfer *dx;
403
404 if (sc->sc_rxdma)
405 return EBUSY;
406
407 /* Find mapping which contains block completely */
408 for (p = sc->sc_dmas;
409 p != NULL &&
410 (((char*)block < (char *)p->addr) ||
411 ((char *)block + bsize > (char *)p->addr + p->size));
412 p = p->next) {
413 continue; /* Nothing */
414 }
415 if (p == NULL) {
416 aprint_error("pxa2x0_i2s_start_input: "
417 "request with bad start address: %p, size: %d\n",
418 block, bsize);
419 return ENXIO;
420 }
421 sc->sc_rxdma = p;
422
423 p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
424 ((char *)block - (char *)p->addr);
425 p->segs[0].ds_len = bsize;
426
427 dx = p->dx;
428 dx->dx_done = pxa2x0_i2s_dmac_iintr;
429 dx->dx_peripheral = DMAC_PERIPH_I2SRX;
430 dx->dx_flow = DMAC_FLOW_CTRL_SRC;
431 dx->dx_loop_notify = DMAC_DONT_LOOP;
432 dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = true;
433 dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = 1;
434 dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = &sc->sc_dr;
435 dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = false;
436 dx->dx_desc[DMAC_DESC_DST].xd_nsegs = p->nsegs;
437 dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = p->segs;
438
439 sc->sc_rxfunc = rx_func;
440 sc->sc_rxarg = rx_arg;
441
442 /* Start DMA */
443 return pxa2x0_dmac_start_xfer(dx);
444 }
445
446 static void
447 pxa2x0_i2s_dmac_ointr(struct dmac_xfer *dx, int status)
448 {
449 struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
450
451 if (sc->sc_txdma == NULL) {
452 panic("pxa2x_i2s_dmac_ointr: bad TX DMA descriptor!");
453 }
454 if (sc->sc_txdma->dx != dx) {
455 panic("pxa2x_i2s_dmac_ointr: xfer mismatch!");
456 }
457 sc->sc_txdma = NULL;
458
459 if (status) {
460 aprint_error("pxa2x0_i2s_dmac_ointr: "
461 "non-zero completion status %d\n", status);
462 }
463
464 mutex_spin_enter(sc->sc_intr_lock);
465 (sc->sc_txfunc)(sc->sc_txarg);
466 mutex_spin_exit(sc->sc_intr_lock);
467 }
468
469 static void
470 pxa2x0_i2s_dmac_iintr(struct dmac_xfer *dx, int status)
471 {
472 struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
473
474 if (sc->sc_rxdma == NULL) {
475 panic("pxa2x_i2s_dmac_iintr: bad RX DMA descriptor!");
476 }
477 if (sc->sc_rxdma->dx != dx) {
478 panic("pxa2x_i2s_dmac_iintr: xfer mismatch!");
479 }
480 sc->sc_rxdma = NULL;
481
482 if (status) {
483 aprint_error("pxa2x0_i2s_dmac_iintr: "
484 "non-zero completion status %d\n", status);
485 }
486
487
488 mutex_spin_enter(sc->sc_intr_lock);
489 (sc->sc_rxfunc)(sc->sc_rxarg);
490 mutex_spin_exit(sc->sc_intr_lock);
491 }
492