Home | History | Annotate | Line # | Download | only in s3c2xx0
      1 /*-
      2  * Copyright (c) 2012 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Paul Fleischer <paul (at) xpg.dk>
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/device.h>
     35 #include <sys/kernel.h>
     36 #include <sys/kmem.h>
     37 #include <sys/queue.h>
     38 
     39 #include <sys/mutex.h>
     40 #include <sys/condvar.h>
     41 
     42 #include <machine/cpu.h>
     43 #include <sys/bus.h>
     44 
     45 #include <arch/arm/s3c2xx0/s3c2440_dma.h>
     46 
     47 #include <arm/s3c2xx0/s3c2440var.h>
     48 #include <arch/arm/s3c2xx0/s3c2440reg.h>
     49 
     50 #include <uvm/uvm_extern.h>
     51 #include <machine/pmap.h>
     52 
     53 //#define S3C2440_DMA_DEBUG
     54 #ifdef S3C2440_DMA_DEBUG
     55 #define DPRINTF(s) do {printf s; } while (/*CONSTCOND*/0)
     56 #else
     57 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
     58 #endif
     59 
     60 #define DMAC_N_CHANNELS 4
     61 
     62 struct dmac_desc_segs {
     63 	bus_dma_segment_t	*ds_curseg;
     64 	uint8_t			ds_nsegs;
     65 };
     66 
     67 SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state);
     68 
     69 struct dmac_xfer_state {
     70 	struct dmac_xfer		dxs_xfer;
     71 	SIMPLEQ_ENTRY(dmac_xfer_state)	dxs_link;
     72 	uint8_t				dxs_channel;
     73 #define DMAC_NO_CHANNEL (~0)
     74 	uint8_t				dxs_width;
     75 	bool				dxs_complete;
     76 	struct dmac_desc_segs		dxs_segs[2];
     77 	uint32_t			dxs_options;
     78 };
     79 
     80 struct s3c2440_dmac_peripheral {
     81 	uint8_t	dp_id;
     82 	uint8_t	dp_channel_order[DMAC_N_CHANNELS+1];
     83 #define PERPH_LAST DMAC_N_CHANNELS+1
     84 	uint8_t	dp_channel_source[DMAC_N_CHANNELS];
     85 #define PERPH_NA 7
     86 };
     87 
     88 struct s3c2440_dmac_channel {
     89 	struct dmac_xfer_state		*dc_active; /* Active transfer, NULL if none */
     90 
     91 	/* Request queue. Can easily be extended to support multiple
     92 	   priorities */
     93 	struct dmac_xfer_state_head	dc_queue;
     94 };
     95 
     96 struct s3c2440_dmac_softc {
     97 	bus_space_tag_t			sc_iot;
     98 	bus_space_handle_t		sc_dmach;
     99 	bus_dma_tag_t			sc_dmat;
    100 	struct kmutex			sc_mutex;
    101 	struct s3c2440_dmac_channel	sc_channels[DMAC_N_CHANNELS];
    102 	struct kmutex			sc_intr_mutex;
    103 	struct kcondvar			sc_intr_cv;
    104 };
    105 
    106 static struct s3c2440_dmac_softc _s3c2440_dmac_sc;
    107 static struct s3c2440_dmac_softc *s3c2440_dmac_sc = &_s3c2440_dmac_sc;
    108 
    109 /* TODO: Consider making the order configurable. */
    110 static struct s3c2440_dmac_peripheral s3c2440_peripherals[] = {
    111 {DMAC_PERIPH_NONE, {0,1,2,3}, {0, 0, 0, 0}},
    112 {DMAC_PERIPH_XDREQ0, {0,PERPH_LAST}, {0, PERPH_NA, PERPH_NA, PERPH_NA}},
    113 {DMAC_PERIPH_XDREQ1, {1,PERPH_LAST}, {PERPH_NA, 0, PERPH_NA, PERPH_NA}},
    114 {DMAC_PERIPH_UART0, {0,PERPH_LAST}, {1, PERPH_NA, PERPH_NA, PERPH_NA}},
    115 {DMAC_PERIPH_UART1, {1,PERPH_LAST}, {PERPH_NA, 1, PERPH_NA, PERPH_NA}},
    116 {DMAC_PERIPH_UART2, {3,PERPH_LAST}, {PERPH_NA, PERPH_NA, PERPH_NA, 0}},
    117 {DMAC_PERIPH_I2SSDO, {0, 2, PERPH_LAST}, {5, PERPH_NA, 0, PERPH_NA}},
    118 {DMAC_PERIPH_I2SSDI, {1, 2, PERPH_LAST}, {PERPH_NA, 2, 1, PERPH_NA}},
    119 {DMAC_PERIPH_SDI, {3, 2, 1, PERPH_LAST}, {2, 6, 2, 1}},
    120 {DMAC_PERIPH_SPI0, {1, PERPH_LAST}, {PERPH_NA, 3, PERPH_NA, PERPH_NA}},
    121 {DMAC_PERIPH_SPI1, {3, PERPH_LAST}, {PERPH_NA, PERPH_NA, PERPH_NA, 2}},
    122 {DMAC_PERIPH_PCMIN, {0, 2, PERPH_LAST}, {6, PERPH_NA, 5, PERPH_NA}},
    123 {DMAC_PERIPH_PCMOUT, {1, 3, PERPH_LAST}, {PERPH_NA, 5, PERPH_NA, 6}},
    124 {DMAC_PERIPH_MICIN, {2, 3, PERPH_LAST}, {PERPH_NA, PERPH_NA, 6, 5}},
    125 {DMAC_PERIPH_MICOUT, {2, 3, PERPH_LAST}, {PERPH_NA, PERPH_NA, 6, 5}},
    126 {DMAC_PERIPH_TIMER, {0, 2, 3, PERPH_LAST}, {3, PERPH_NA, 3, 3}},
    127 {DMAC_PERIPH_USBEP1, {0, PERPH_LAST}, {4, PERPH_NA, PERPH_NA, PERPH_NA}},
    128 {DMAC_PERIPH_USBEP2, {1, PERPH_LAST}, {PERPH_NA, 4, PERPH_NA, PERPH_NA}},
    129 {DMAC_PERIPH_USBEP3, {2, PERPH_LAST}, {PERPH_NA, PERPH_NA, 4, PERPH_NA}},
    130 {DMAC_PERIPH_USBEP4, {3, PERPH_LAST}, {PERPH_NA, PERPH_NA, PERPH_NA, 4}}
    131 };
    132 
    133 static void dmac_start(uint8_t channel_no, struct dmac_xfer_state*);
    134 static void dmac_transfer_segment(uint8_t channel_no, struct dmac_xfer_state*);
    135 static void dmac_channel_done(uint8_t channel_no);
    136 
    137 void
    138 s3c2440_dma_init(void)
    139 {
    140 	struct s3c2440_dmac_softc *sc = s3c2440_dmac_sc;
    141 	int i;
    142 
    143 	sc->sc_iot = s3c2xx0_softc->sc_iot;
    144 	sc->sc_dmach = s3c2xx0_softc->sc_dmach;
    145 	sc->sc_dmat = s3c2xx0_softc->sc_dmat;
    146 	for(i = 0; i<DMAC_N_CHANNELS; i++) {
    147 		sc->sc_channels[i].dc_active = NULL;
    148 		SIMPLEQ_INIT(&sc->sc_channels[i].dc_queue);
    149 	}
    150 
    151 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_BIO);
    152 
    153 	mutex_init(&sc->sc_intr_mutex, MUTEX_DEFAULT, IPL_BIO);
    154 	cv_init(&sc->sc_intr_cv, "s3c2440_dmaintr");
    155 
    156 	/* Setup interrupt handler for DMA controller */
    157 	s3c24x0_intr_establish(S3C24X0_INT_DMA0, IPL_BIO,
    158 			       IST_EDGE_RISING, s3c2440_dma_intr, (void*)1);
    159 	s3c24x0_intr_establish(S3C24X0_INT_DMA1, IPL_BIO,
    160 			       IST_EDGE_RISING, s3c2440_dma_intr, (void*)2);
    161 	s3c24x0_intr_establish(S3C24X0_INT_DMA2, IPL_BIO,
    162 			       IST_EDGE_RISING, s3c2440_dma_intr, (void*)3);
    163 	s3c24x0_intr_establish(S3C24X0_INT_DMA3, IPL_BIO,
    164 			       IST_EDGE_RISING, s3c2440_dma_intr, (void*)4);
    165 }
    166 
    167 int
    168 s3c2440_dma_intr(void *arg)
    169 {
    170 	/*struct s3c2xx0_softc *sc = s3c2xx0_softc;*/
    171 	struct s3c2440_dmac_softc *sc;
    172 	uint32_t status;
    173 	int channel;
    174 	struct s3c2440_dmac_channel *dc;
    175 
    176 	sc = s3c2440_dmac_sc;
    177 
    178 	channel = (int)arg - 1;
    179 	dc = &sc->sc_channels[channel];
    180 
    181 	DPRINTF(("s3c2440_dma_intr\n"));
    182 	DPRINTF(("Channel %d\n", channel));
    183 
    184 	status = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_STAT(channel));
    185 	DPRINTF(("Channel %d status: %d\n", channel, status));
    186 
    187 	if ( !(status & DMASTAT_BUSY) ) {
    188 		struct dmac_xfer_state *dxs;
    189 		struct dmac_xfer *dx;
    190 
    191 		dxs = dc->dc_active;
    192 		KASSERT(dxs != NULL);
    193 
    194 		dx = &dxs->dxs_xfer;
    195 
    196 		if (dx->dx_desc[DMAC_DESC_SRC].xd_increment) {
    197 			dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs--;
    198 			if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0) {
    199 				dxs->dxs_complete = TRUE;
    200 			} else {
    201 				dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg++;
    202 			}
    203 		}
    204 		if (dx->dx_desc[DMAC_DESC_DST].xd_increment) {
    205 			dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs--;
    206 			if (dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0) {
    207 				dxs->dxs_complete = TRUE;
    208 			} else {
    209 				dxs->dxs_segs[DMAC_DESC_DST].ds_curseg++;
    210 			}
    211 		}
    212 
    213 		if (dxs->dxs_complete) {
    214 			dxs->dxs_channel = DMAC_NO_CHANNEL;
    215 
    216 			/* Lock the DMA mutex before tampering with
    217 			   the channel.
    218 			*/
    219 			mutex_enter(&sc->sc_mutex);
    220 			dmac_channel_done(channel);
    221 			mutex_exit(&sc->sc_mutex);
    222 
    223 			DPRINTF(("dx_done: %p\n", (void*)dx->dx_done));
    224 			if (dx->dx_done != NULL) {
    225 				(dx->dx_done)(dx, dx->dx_cookie);
    226 			}
    227 		} else {
    228 			dmac_transfer_segment(channel, dxs);
    229 		}
    230 	}
    231 #if 0
    232 	if ( !(status & DMASTAT_BUSY) ) {
    233 		s3c2440_dma_xfer_t xfer;
    234 
    235 		xfer = dma_channel_xfer[channel];
    236 		dma_channel_xfer[channel] = NULL;
    237 
    238 		DPRINTF((" Channel %d completed transfer\n", channel));
    239 
    240 		if (xfer->dx_remaining > 0 &&
    241 		    xfer->dx_aborted == FALSE) {
    242 
    243 			DPRINTF(("Preparing next transfer\n"));
    244 
    245 			s3c2440_dma_xfer_start(xfer);
    246 		} else {
    247 			if (!xfer->dx_aborted && xfer->dx_callback != NULL)
    248 				(xfer->dx_callback)(xfer->dx_callback_arg);
    249 
    250 			xfer->dx_complete = TRUE;
    251 		}
    252 
    253 	}
    254 #endif
    255 
    256 #ifdef S3C2440_DMA_DEBUG
    257 	status = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_CSRC(channel));
    258 	printf("Current source for channel %d: 0x%x\n", channel, status);
    259 
    260 	status = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_CDST(channel));
    261 	printf("Current dest   for channel %d: 0x%x\n", channel, status);
    262 #endif
    263 
    264 	/* TODO: Remove this as it activates any thread waiting for a transfer
    265 	   to complete */
    266 	mutex_enter(&sc->sc_intr_mutex);
    267 	DPRINTF(("cv_broadcast\n"));
    268 	cv_broadcast(&sc->sc_intr_cv);
    269 	DPRINTF(("cv_broadcast done\n"));
    270 	mutex_exit(&sc->sc_intr_mutex);
    271 
    272 	return 1;
    273 }
    274 
    275 dmac_xfer_t
    276 s3c2440_dmac_allocate_xfer(void) {
    277 	struct dmac_xfer_state *dxs;
    278 
    279 	dxs = kmem_alloc(sizeof(struct dmac_xfer_state), KM_SLEEP);
    280 
    281 	dxs->dxs_xfer.dx_done = NULL;
    282 	dxs->dxs_xfer.dx_sync_bus = DMAC_SYNC_BUS_AUTO;
    283 	dxs->dxs_xfer.dx_xfer_mode = DMAC_XFER_MODE_DEMAND;
    284 	dxs->dxs_channel = DMAC_NO_CHANNEL;
    285 
    286 	return ((dmac_xfer_t)dxs);
    287 }
    288 
    289 void
    290 s3c2440_dmac_free_xfer(dmac_xfer_t dx) {
    291 	kmem_free(dx, sizeof(struct dmac_xfer_state));
    292 }
    293 
    294 int
    295 s3c2440_dmac_start_xfer(dmac_xfer_t dx) {
    296 	struct s3c2440_dmac_softc *sc = s3c2440_dmac_sc;
    297 	struct dmac_xfer_state *dxs = (struct dmac_xfer_state*)dx;
    298 	struct s3c2440_dmac_peripheral *perph;
    299 	int i;
    300 	bool transfer_started = FALSE;
    301 
    302 	if (dxs->dxs_xfer.dx_peripheral != DMAC_PERIPH_NONE &&
    303 	    dxs->dxs_xfer.dx_peripheral >= DMAC_N_PERIPH)
    304 		return EINVAL;
    305 
    306 	dxs->dxs_complete = FALSE;
    307 
    308 	perph = &s3c2440_peripherals[dxs->dxs_xfer.dx_peripheral];
    309 #ifdef DIAGNOSTIC
    310 	DPRINTF(("dp_id: %d, dx_peripheral: %d\n", perph->dp_id, dxs->dxs_xfer.dx_peripheral));
    311 	KASSERT(perph->dp_id == dxs->dxs_xfer.dx_peripheral);
    312 #endif
    313 
    314 	mutex_enter(&sc->sc_mutex);
    315 	/* Get list of possible channels for this peripheral.
    316 	   If none of the channels are ready to transmit, queue
    317 	   the transfer in the one with the highest priority
    318 	   (first in the order list).
    319 	 */
    320 	for(i=0;
    321 	    perph->dp_channel_order[i] != PERPH_LAST;
    322 	    i++) {
    323 		uint8_t channel_no = perph->dp_channel_order[i];
    324 
    325 #ifdef DIAGNOSTIC
    326 		/* Check that there is a mapping for the given channel.
    327 		   If this fails, there is something wrong in
    328 		   s3c2440_peripherals.
    329 		 */
    330 		KASSERT(perph->dp_channel_source[channel_no] != PERPH_NA);
    331 #endif
    332 
    333 		if (sc->sc_channels[channel_no].dc_active == NULL) {
    334 			/* Transfer can start right away */
    335 			dmac_start(channel_no, dxs);
    336 			transfer_started = TRUE;
    337 			break;
    338 		}
    339 	}
    340 
    341 	if (transfer_started == FALSE) {
    342 		uint8_t channel_no = perph->dp_channel_order[0];
    343 		/* Enqueue the transfer, as none of the DMA channels were
    344 		   available.
    345 		   The highest priority channel is used.
    346 		*/
    347 		dxs->dxs_channel = channel_no;
    348 		SIMPLEQ_INSERT_TAIL(&sc->sc_channels[channel_no].dc_queue, dxs, dxs_link);
    349 		DPRINTF(("Enqueued transfer on channel %d\n", channel_no));
    350 	}
    351 
    352 	mutex_exit(&sc->sc_mutex);
    353 
    354 	return 0;
    355 }
    356 
    357 static void
    358 dmac_start(uint8_t channel_no, struct dmac_xfer_state *dxs) {
    359 	struct s3c2440_dmac_softc	*sc = s3c2440_dmac_sc;
    360 	struct s3c2440_dmac_channel	*dc = &sc->sc_channels[channel_no];
    361 	uint32_t			options;
    362 #ifdef DIAGNOSTIC
    363 	uint32_t			reg;
    364 #endif
    365 	dmac_sync_bus_t			sync_bus;
    366 	struct dmac_xfer		*dx = &dxs->dxs_xfer;
    367 
    368 	/* Must be called with sc_mutex locked */
    369 
    370 	DPRINTF(("Starting DMA transfer (%p) on channel %d\n", dxs, channel_no));
    371 
    372 	KASSERT(dc->dc_active == NULL);
    373 
    374 #ifdef DIAGNOSTIC
    375 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_STAT(channel_no));
    376 	if (reg & DMASTAT_BUSY)
    377 		panic("DMA channel is busy, cannot start new transfer!");
    378 
    379 #endif
    380 
    381 	dc->dc_active = dxs;
    382 	dxs->dxs_channel = channel_no;
    383 	dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg = dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs;
    384 	dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs = dx->dx_desc[DMAC_DESC_SRC].xd_nsegs;
    385 	dxs->dxs_segs[DMAC_DESC_DST].ds_curseg = dx->dx_desc[DMAC_DESC_DST].xd_dma_segs;
    386 	dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs = dx->dx_desc[DMAC_DESC_DST].xd_nsegs;
    387 
    388 	options = DMACON_INT_INT |
    389 		DMACON_RELOAD_NO_AUTO;
    390 
    391 	if (dxs->dxs_xfer.dx_peripheral == DMAC_PERIPH_NONE) {
    392 		options |= DMACON_SERVMODE_WHOLE;
    393 	} else {
    394 		options |= DMACON_SERVMODE_SINGLE;
    395 	}
    396 
    397 	switch (dxs->dxs_xfer.dx_xfer_mode) {
    398 	case DMAC_XFER_MODE_DEMAND:
    399 		options |= DMACON_DEMAND;
    400 		break;
    401 	case DMAC_XFER_MODE_HANDSHAKE:
    402 		options |= DMACON_HANDSHAKE;
    403 		break;
    404 	default:
    405 		panic("Unknown dx_xfer_mode");
    406 	}
    407 
    408 	sync_bus = dxs->dxs_xfer.dx_sync_bus;
    409 
    410 	switch (dxs->dxs_xfer.dx_xfer_width) {
    411 	case DMAC_XFER_WIDTH_8BIT:
    412 		DPRINTF(("8-Bit (BYTE) transfer width\n"));
    413 		options |= DMACON_DSZ_B;
    414 		dxs->dxs_width = 1;
    415 		break;
    416 	case DMAC_XFER_WIDTH_16BIT:
    417 		DPRINTF(("16-Bit (HALF-WORD) transfer width\n"));
    418 		options |= DMACON_DSZ_HW;
    419 		dxs->dxs_width = 2;
    420 		break;
    421 	case DMAC_XFER_WIDTH_32BIT:
    422 		DPRINTF(("32-Bit (WORD) transfer width\n"));
    423 		options |= DMACON_DSZ_W;
    424 		dxs->dxs_width = 4;
    425 		break;
    426 	default:
    427 		panic("Unknown transfer width");
    428 	}
    429 
    430 	if (dxs->dxs_xfer.dx_peripheral == DMAC_PERIPH_NONE) {
    431 		options |= DMACON_SW_REQ;
    432 		if (sync_bus == DMAC_SYNC_BUS_AUTO)
    433 			sync_bus = DMAC_SYNC_BUS_SYSTEM;
    434 	} else {
    435 		uint8_t source = s3c2440_peripherals[dxs->dxs_xfer.dx_peripheral].dp_channel_source[channel_no];
    436 		DPRINTF(("Hw request source: %d, channel: %d\n", source, channel_no));
    437 		options |= DMACON_HW_REQ | DMACON_HW_SRCSEL(source);
    438 		if (sync_bus == DMAC_SYNC_BUS_AUTO)
    439 			sync_bus = DMAC_SYNC_BUS_PERIPHERAL;
    440 	}
    441 
    442 	if (sync_bus == DMAC_SYNC_BUS_SYSTEM) {
    443 		DPRINTF(("Syncing with system bus\n"));
    444 		options |= DMACON_SYNC_AHB;
    445 	} else if (sync_bus == DMAC_SYNC_BUS_PERIPHERAL) {
    446 		DPRINTF(("Syncing with peripheral bus\n"));
    447 		options |= DMACON_SYNC_APB;
    448 	} else {
    449 		panic("No sync bus given");
    450 	}
    451 
    452 	dxs->dxs_options = options;
    453 
    454 	/* We have now configured the options that will hold for all segment transfers.
    455 	   Next, we prepare and start the transfer for the first segment */
    456 	dmac_transfer_segment(channel_no, dxs);
    457 
    458 }
    459 
    460 static void
    461 dmac_transfer_segment(uint8_t channel_no, struct dmac_xfer_state *dxs)
    462 {
    463 	struct s3c2440_dmac_softc	*sc = s3c2440_dmac_sc;
    464 	/*	struct s3c2440_dmac_channel	*dc = &sc->sc_channels[channel_no];*/
    465 	uint32_t			reg, transfer_size;
    466 	struct dmac_xfer		*dx = &dxs->dxs_xfer;
    467 
    468 	DPRINTF(("dmac_transfer_segment\n"));
    469 
    470 	/* Prepare the source */
    471 	bus_space_write_4(sc->sc_iot, sc->sc_dmach,
    472 			  DMA_DISRC(channel_no),
    473 			  dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg->ds_addr);
    474 
    475 	DPRINTF(("Source address: 0x%x\n", (unsigned)dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg->ds_addr));
    476 	DPRINTF(("Dest.  address: 0x%x\n", (unsigned)dxs->dxs_segs[DMAC_DESC_DST].ds_curseg->ds_addr));
    477 	reg = 0;
    478 	if (dx->dx_desc[DMAC_DESC_SRC].xd_bus_type == DMAC_BUS_TYPE_PERIPHERAL) {
    479 		reg |= DISRCC_LOC_APB;
    480 	} else {
    481 		reg |= DISRCC_LOC_AHB;
    482 	}
    483 	if (dx->dx_desc[DMAC_DESC_SRC].xd_increment) {
    484 		reg |= DISRCC_INC_INC;
    485 	} else {
    486 		reg |= DISRCC_INC_FIXED;
    487 	}
    488 	bus_space_write_4(sc->sc_iot, sc->sc_dmach, DMA_DISRCC(channel_no), reg);
    489 
    490 	/* Prepare the destination */
    491 	bus_space_write_4(sc->sc_iot, sc->sc_dmach,
    492 			  DMA_DIDST(channel_no),
    493 			  dxs->dxs_segs[DMAC_DESC_DST].ds_curseg->ds_addr);
    494 	reg = 0;
    495 	if (dx->dx_desc[DMAC_DESC_DST].xd_bus_type == DMAC_BUS_TYPE_PERIPHERAL) {
    496 		reg |= DIDSTC_LOC_APB;
    497 	} else {
    498 		reg |= DIDSTC_LOC_AHB;
    499 	}
    500 	if (dx->dx_desc[DMAC_DESC_DST].xd_increment) {
    501 		reg |= DIDSTC_INC_INC;
    502 	} else {
    503 		reg |= DIDSTC_INC_FIXED;
    504 	}
    505 	bus_space_write_4(sc->sc_iot, sc->sc_dmach, DMA_DIDSTC(channel_no), reg);
    506 
    507 	/* Let the incrementing party decide how much data to transfer.
    508 	   If both are incrementing, set the transfer size to the smallest one.
    509 	 */
    510 	if (dx->dx_desc[DMAC_DESC_SRC].xd_increment) {
    511 		if (!dx->dx_desc[DMAC_DESC_DST].xd_increment) {
    512 			transfer_size = dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg->ds_len;
    513 		} else {
    514 			transfer_size = uimin(dxs->dxs_segs[DMAC_DESC_DST].ds_curseg->ds_len,
    515 					    dxs->dxs_segs[DMAC_DESC_SRC].ds_curseg->ds_len);
    516 		}
    517 	} else {
    518 		if (dx->dx_desc[DMAC_DESC_DST].xd_increment) {
    519 			transfer_size = dxs->dxs_segs[DMAC_DESC_DST].ds_curseg->ds_len;
    520 		} else {
    521 			panic("S3C2440 DMA code does not support both source and destination being non-incrementing");
    522 		}
    523 	}
    524 
    525 	/* Set options as prepared by dmac_start and add the transfer size.
    526 	   If the transfer_size is not an even number of dxs_width,
    527 	   ensure that all bytes are transferred by adding an extra transfer
    528 	   of dxs_width.
    529 	 */
    530 	bus_space_write_4(sc->sc_iot, sc->sc_dmach, DMA_CON(channel_no),
    531 			  dxs->dxs_options |
    532 			  DMACON_TC(((transfer_size/dxs->dxs_width)+
    533 				     uimin((transfer_size % dxs->dxs_width), 1))));
    534 
    535 	DPRINTF(("Transfer size: %d (%d)\n", transfer_size, transfer_size/dxs->dxs_width));
    536 
    537 	/* Start the transfer */
    538 	reg = DMAMASKTRIG_ON;
    539 	if (dxs->dxs_xfer.dx_peripheral == DMAC_PERIPH_NONE) {
    540 		reg |= DMAMASKTRIG_SW_TRIG;
    541 	}
    542 	bus_space_write_4(sc->sc_iot, sc->sc_dmach, DMA_MASKTRIG(channel_no),
    543 			  reg);
    544 
    545 #if defined(S3C2440_DMA_DEBUG)
    546 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_DISRC(channel_no));
    547 	printf("DMA_DISRC: 0x%X\n", reg);
    548 
    549 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_DISRCC(channel_no));
    550 	printf("DMA_DISRCC: 0x%X\n", reg);
    551 
    552 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_DIDST(channel_no));
    553 	printf("DMA_DIDST: 0x%X\n", reg);
    554 
    555 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_DIDSTC(channel_no));
    556 	printf("DMA_DIDSTC: 0x%X\n", reg);
    557 
    558 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_CON(channel_no));
    559 	printf("DMA_CON: 0x%X\n", reg);
    560 
    561 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_MASKTRIG(channel_no));
    562 	printf("DMA_MASKTRIG: 0x%X\n", reg);
    563 
    564 	reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach, DMA_STAT(channel_no));
    565 	printf("DMA_STAT: 0x%X\n", reg);
    566 #endif
    567 }
    568 
    569 static void
    570 dmac_channel_done(uint8_t channel_no)
    571 {
    572 	struct s3c2440_dmac_softc	*sc;
    573 	struct s3c2440_dmac_channel	*dc;
    574 
    575 	sc = s3c2440_dmac_sc;
    576 
    577 	/* sc->sc_mutex must be held when calling this function */
    578 
    579 	dc = &sc->sc_channels[channel_no];
    580 
    581 	dc->dc_active = NULL;
    582 	/* We deal with the queue before calling the
    583 	   done callback, as it might start a new DMA
    584 	   transfer.
    585 	*/
    586 	if ( SIMPLEQ_EMPTY(&dc->dc_queue) ) {
    587 		DPRINTF(("DMA Queue empty for channel %d\n", channel_no));
    588 	} else {
    589 		/* There is a transfer in the queue. Start it*/
    590 		struct dmac_xfer_state *dxs;
    591 		DPRINTF(("Took a transfer from the queue\n"));
    592 		dxs = SIMPLEQ_FIRST(&dc->dc_queue);
    593 		SIMPLEQ_REMOVE_HEAD(&dc->dc_queue, dxs_link);
    594 
    595 		dmac_start(channel_no, dxs);
    596 	}
    597 }
    598 
    599 int
    600 s3c2440_dmac_wait_xfer(dmac_xfer_t dx, int timeout) {
    601 	uint32_t		  complete;
    602 	int			  err = 0;
    603 	struct s3c2440_dmac_softc *sc = s3c2440_dmac_sc;
    604 	struct dmac_xfer_state	  *dxs = (struct dmac_xfer_state*)dx;
    605 
    606 	mutex_enter(&sc->sc_intr_mutex);
    607 	complete = dxs->dxs_complete;
    608 	while(complete == 0) {
    609 		int status;
    610 		DPRINTF(("s3c2440_dma_xfer_wait: Complete: %x\n", complete));
    611 
    612 		if ( (status = cv_timedwait(&sc->sc_intr_cv,
    613 					    &sc->sc_intr_mutex, timeout)) ==
    614 		    EWOULDBLOCK ) {
    615 			DPRINTF(("s3c2440_dma_xfer_wait: Timed out\n"));
    616 			complete = 1;
    617 			err = ETIMEDOUT;
    618 			break;
    619 		}
    620 
    621 		complete = dxs->dxs_complete;
    622 	}
    623 
    624 	mutex_exit(&sc->sc_intr_mutex);
    625 
    626 #if 0
    627 	if (err == 0 && dxs->dxs_aborted == 1) {
    628 		/* Transfer was aborted */
    629 		err = EIO;
    630 	}
    631 #endif
    632 
    633 	return err;
    634 }
    635 
    636 void
    637 s3c2440_dmac_abort_xfer(dmac_xfer_t dx) {
    638 	struct s3c2440_dmac_softc	*sc = s3c2440_dmac_sc;
    639 	struct dmac_xfer_state		*dxs = (struct dmac_xfer_state*)dx;
    640 	struct s3c2440_dmac_channel	*dc;
    641 	bool				wait = FALSE;
    642 
    643 	KASSERT(dxs->dxs_channel != (uint8_t)DMAC_NO_CHANNEL);
    644 
    645 	dc = &sc->sc_channels[dxs->dxs_channel];
    646 
    647 	mutex_enter(&sc->sc_mutex);
    648 
    649 	if (dc->dc_active == dxs) {
    650 		uint32_t reg;
    651 
    652 		bus_space_write_4(sc->sc_iot, sc->sc_dmach,
    653 				  DMA_MASKTRIG(dxs->dxs_channel),
    654 				  DMAMASKTRIG_STOP);
    655 		reg = bus_space_read_4(sc->sc_iot, sc->sc_dmach,
    656 				       DMA_MASKTRIG(dxs->dxs_channel));
    657 		DPRINTF(("s3c2440_dma: channel %d mask trigger %x\n", dxs->dxs_channel, reg));
    658 
    659 		if ( !(reg & DMAMASKTRIG_ON) ) {
    660 			DPRINTF(("No wait for abort"));
    661 
    662 			/* The transfer was aborted and the interrupt
    663 			   was thus not triggered. We need to cleanup the
    664 			   channel here. */
    665 			dmac_channel_done(dxs->dxs_channel);
    666 		} else {
    667 			wait = TRUE;
    668 		}
    669 	} else {
    670 		/* Transfer is not active, simply remove it from the queue */
    671 		DPRINTF(("Removed transfer from queue\n"));
    672 		SIMPLEQ_REMOVE(&dc->dc_queue, dxs, dmac_xfer_state, dxs_link);
    673 	}
    674 
    675 	mutex_exit(&sc->sc_mutex);
    676 
    677 	if (wait == TRUE) {
    678 		DPRINTF(("Abort: Wait for transfer to complete\n"));
    679 		s3c2440_dmac_wait_xfer(dx, 0);
    680 	}
    681 }
    682