1 1.21 andvar /* $NetBSD: gtidmac.c,v 1.21 2024/06/12 20:03:56 andvar Exp $ */ 2 1.1 kiyohara /* 3 1.13 kiyohara * Copyright (c) 2008, 2012, 2016 KIYOHARA Takashi 4 1.1 kiyohara * All rights reserved. 5 1.1 kiyohara * 6 1.1 kiyohara * Redistribution and use in source and binary forms, with or without 7 1.1 kiyohara * modification, are permitted provided that the following conditions 8 1.1 kiyohara * are met: 9 1.1 kiyohara * 1. Redistributions of source code must retain the above copyright 10 1.1 kiyohara * notice, this list of conditions and the following disclaimer. 11 1.1 kiyohara * 2. Redistributions in binary form must reproduce the above copyright 12 1.1 kiyohara * notice, this list of conditions and the following disclaimer in the 13 1.1 kiyohara * documentation and/or other materials provided with the distribution. 14 1.1 kiyohara * 15 1.1 kiyohara * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 1.1 kiyohara * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 1.1 kiyohara * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 1.1 kiyohara * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 1.1 kiyohara * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 1.1 kiyohara * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 1.1 kiyohara * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 1.1 kiyohara * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 1.1 kiyohara * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 1.1 kiyohara * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 1.1 kiyohara * POSSIBILITY OF SUCH DAMAGE. 26 1.1 kiyohara */ 27 1.1 kiyohara 28 1.1 kiyohara #include <sys/cdefs.h> 29 1.21 andvar __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.21 2024/06/12 20:03:56 andvar Exp $"); 30 1.1 kiyohara 31 1.1 kiyohara #include <sys/param.h> 32 1.1 kiyohara #include <sys/bus.h> 33 1.1 kiyohara #include <sys/device.h> 34 1.1 kiyohara #include <sys/errno.h> 35 1.1 kiyohara #include <sys/endian.h> 36 1.2 kiyohara #include <sys/kmem.h> 37 1.1 kiyohara 38 1.1 kiyohara #include <uvm/uvm_param.h> /* For PAGE_SIZE */ 39 1.1 kiyohara 40 1.1 kiyohara #include <dev/dmover/dmovervar.h> 41 1.1 kiyohara 42 1.1 kiyohara #include <dev/marvell/gtidmacreg.h> 43 1.1 kiyohara #include <dev/marvell/gtidmacvar.h> 44 1.1 kiyohara #include <dev/marvell/marvellreg.h> 45 1.1 kiyohara #include <dev/marvell/marvellvar.h> 46 1.1 kiyohara 47 1.1 kiyohara #include <prop/proplib.h> 48 1.1 kiyohara 49 1.1 kiyohara #include "locators.h" 50 1.1 kiyohara 51 1.1 kiyohara #ifdef GTIDMAC_DEBUG 52 1.1 kiyohara #define DPRINTF(x) if (gtidmac_debug) printf x 53 1.1 kiyohara int gtidmac_debug = 0; 54 1.1 kiyohara #else 55 1.1 kiyohara #define DPRINTF(x) 56 1.1 kiyohara #endif 57 1.1 kiyohara 58 1.1 kiyohara #define GTIDMAC_NDESC 64 59 1.1 kiyohara #define GTIDMAC_MAXCHAN 8 60 1.1 kiyohara #define MVXORE_NDESC 128 61 1.1 kiyohara #define MVXORE_MAXCHAN 2 62 1.1 kiyohara 63 1.1 kiyohara #define GTIDMAC_NSEGS ((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE) 64 1.1 kiyohara #define MVXORE_NSEGS ((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE) 65 1.1 kiyohara 66 1.1 kiyohara 67 1.1 kiyohara struct gtidmac_softc; 68 1.1 kiyohara 69 1.1 kiyohara struct gtidmac_function { 70 1.1 kiyohara int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *); 71 1.1 kiyohara void (*chan_free)(void *, int); 72 1.1 kiyohara int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *, 73 1.1 kiyohara bus_size_t); 74 1.1 kiyohara void (*dma_start)(void *, int, 75 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *, 76 1.1 kiyohara bus_dmamap_t *, int)); 77 1.1 kiyohara uint32_t (*dma_finish)(void *, int, int); 78 1.1 kiyohara }; 79 1.1 kiyohara 80 1.1 kiyohara struct gtidmac_dma_desc { 81 1.1 kiyohara int dd_index; 82 1.1 kiyohara union { 83 1.1 kiyohara struct gtidmac_desc *idmac_vaddr; 84 1.1 kiyohara struct mvxore_desc *xore_vaddr; 85 1.1 kiyohara } dd_vaddr; 86 1.1 kiyohara #define dd_idmac_vaddr dd_vaddr.idmac_vaddr 87 1.1 kiyohara #define dd_xore_vaddr dd_vaddr.xore_vaddr 88 1.1 kiyohara paddr_t dd_paddr; 89 1.1 kiyohara SLIST_ENTRY(gtidmac_dma_desc) dd_next; 90 1.1 kiyohara }; 91 1.1 kiyohara 92 1.1 kiyohara struct gtidmac_softc { 93 1.1 kiyohara device_t sc_dev; 94 1.1 kiyohara 95 1.1 kiyohara bus_space_tag_t sc_iot; 96 1.1 kiyohara bus_space_handle_t sc_ioh; 97 1.1 kiyohara 98 1.1 kiyohara bus_dma_tag_t sc_dmat; 99 1.1 kiyohara struct gtidmac_dma_desc *sc_dd_buffer; 100 1.1 kiyohara bus_dma_segment_t sc_pattern_segment; 101 1.1 kiyohara struct { 102 1.1 kiyohara u_char pbuf[16]; /* 16byte/pattern */ 103 1.1 kiyohara } *sc_pbuf; /* x256 pattern */ 104 1.1 kiyohara 105 1.1 kiyohara int sc_gtidmac_nchan; 106 1.1 kiyohara struct gtidmac_desc *sc_dbuf; 107 1.1 kiyohara bus_dmamap_t sc_dmap; 108 1.1 kiyohara SLIST_HEAD(, gtidmac_dma_desc) sc_dlist; 109 1.1 kiyohara struct { 110 1.1 kiyohara bus_dmamap_t chan_in; /* In dmamap */ 111 1.1 kiyohara bus_dmamap_t chan_out; /* Out dmamap */ 112 1.17 msaitoh uint64_t chan_totalcnt; /* total transferred byte */ 113 1.1 kiyohara int chan_ddidx; 114 1.1 kiyohara void *chan_running; /* opaque object data */ 115 1.1 kiyohara void (*chan_dma_done)(void *, int, bus_dmamap_t *, 116 1.1 kiyohara bus_dmamap_t *, int); 117 1.1 kiyohara } sc_cdesc[GTIDMAC_MAXCHAN]; 118 1.1 kiyohara struct gtidmac_intr_arg { 119 1.1 kiyohara struct gtidmac_softc *ia_sc; 120 1.1 kiyohara uint32_t ia_cause; 121 1.1 kiyohara uint32_t ia_mask; 122 1.1 kiyohara uint32_t ia_eaddr; 123 1.1 kiyohara uint32_t ia_eselect; 124 1.1 kiyohara } sc_intrarg[GTIDMAC_NINTRRUPT]; 125 1.1 kiyohara 126 1.1 kiyohara int sc_mvxore_nchan; 127 1.1 kiyohara struct mvxore_desc *sc_dbuf_xore; 128 1.1 kiyohara bus_dmamap_t sc_dmap_xore; 129 1.1 kiyohara SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore; 130 1.1 kiyohara struct { 131 1.1 kiyohara bus_dmamap_t chan_in[MVXORE_NSRC]; /* In dmamap */ 132 1.1 kiyohara bus_dmamap_t chan_out; /* Out dmamap */ 133 1.18 andvar uint64_t chan_totalcnt; /* total transferred */ 134 1.1 kiyohara int chan_ddidx; 135 1.1 kiyohara void *chan_running; /* opaque object data */ 136 1.1 kiyohara void (*chan_dma_done)(void *, int, bus_dmamap_t *, 137 1.1 kiyohara bus_dmamap_t *, int); 138 1.1 kiyohara } sc_cdesc_xore[MVXORE_MAXCHAN]; 139 1.1 kiyohara 140 1.1 kiyohara struct dmover_backend sc_dmb; 141 1.1 kiyohara struct dmover_backend sc_dmb_xore; 142 1.1 kiyohara int sc_dmb_busy; 143 1.1 kiyohara }; 144 1.1 kiyohara struct gtidmac_softc *gtidmac_softc = NULL; 145 1.1 kiyohara 146 1.1 kiyohara static int gtidmac_match(device_t, struct cfdata *, void *); 147 1.1 kiyohara static void gtidmac_attach(device_t, device_t, void *); 148 1.1 kiyohara 149 1.1 kiyohara static int gtidmac_intr(void *); 150 1.8 kiyohara static int mvxore_port0_intr(void *); 151 1.8 kiyohara static int mvxore_port1_intr(void *); 152 1.8 kiyohara static int mvxore_intr(struct gtidmac_softc *, int); 153 1.1 kiyohara 154 1.1 kiyohara static void gtidmac_process(struct dmover_backend *); 155 1.1 kiyohara static void gtidmac_dmover_run(struct dmover_backend *); 156 1.1 kiyohara static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *, 157 1.1 kiyohara int); 158 1.9 msaitoh static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t, 159 1.1 kiyohara dmover_buffer_type, dmover_buffer *, int); 160 1.9 msaitoh static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int); 161 1.1 kiyohara 162 1.1 kiyohara static uint32_t gtidmac_finish(void *, int, int); 163 1.1 kiyohara static uint32_t mvxore_finish(void *, int, int); 164 1.1 kiyohara 165 1.11 kiyohara static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *); 166 1.11 kiyohara static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *); 167 1.1 kiyohara 168 1.8 kiyohara static int gtidmac_buffer_setup(struct gtidmac_softc *); 169 1.8 kiyohara static int mvxore_buffer_setup(struct gtidmac_softc *); 170 1.8 kiyohara 171 1.1 kiyohara #ifdef GTIDMAC_DEBUG 172 1.1 kiyohara static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int); 173 1.1 kiyohara static void gtidmac_dump_idmacdesc(struct gtidmac_softc *, 174 1.1 kiyohara struct gtidmac_dma_desc *, uint32_t, int); 175 1.1 kiyohara static void gtidmac_dump_xorereg(struct gtidmac_softc *, int); 176 1.1 kiyohara static void gtidmac_dump_xoredesc(struct gtidmac_softc *, 177 1.1 kiyohara struct gtidmac_dma_desc *, uint32_t, int); 178 1.1 kiyohara #endif 179 1.1 kiyohara 180 1.1 kiyohara 181 1.1 kiyohara static struct gtidmac_function gtidmac_functions = { 182 1.1 kiyohara .chan_alloc = gtidmac_chan_alloc, 183 1.1 kiyohara .chan_free = gtidmac_chan_free, 184 1.1 kiyohara .dma_setup = gtidmac_setup, 185 1.1 kiyohara .dma_start = gtidmac_start, 186 1.1 kiyohara .dma_finish = gtidmac_finish, 187 1.1 kiyohara }; 188 1.1 kiyohara 189 1.1 kiyohara static struct gtidmac_function mvxore_functions = { 190 1.1 kiyohara .chan_alloc = mvxore_chan_alloc, 191 1.1 kiyohara .chan_free = mvxore_chan_free, 192 1.1 kiyohara .dma_setup = mvxore_setup, 193 1.1 kiyohara .dma_start = mvxore_start, 194 1.1 kiyohara .dma_finish = mvxore_finish, 195 1.1 kiyohara }; 196 1.1 kiyohara 197 1.1 kiyohara static const struct dmover_algdesc gtidmac_algdescs[] = { 198 1.1 kiyohara { 199 1.1 kiyohara .dad_name = DMOVER_FUNC_ZERO, 200 1.1 kiyohara .dad_data = >idmac_functions, 201 1.1 kiyohara .dad_ninputs = 0 202 1.1 kiyohara }, 203 1.1 kiyohara { 204 1.1 kiyohara .dad_name = DMOVER_FUNC_FILL8, 205 1.1 kiyohara .dad_data = >idmac_functions, 206 1.1 kiyohara .dad_ninputs = 0 207 1.1 kiyohara }, 208 1.1 kiyohara { 209 1.1 kiyohara .dad_name = DMOVER_FUNC_COPY, 210 1.1 kiyohara .dad_data = >idmac_functions, 211 1.1 kiyohara .dad_ninputs = 1 212 1.1 kiyohara }, 213 1.1 kiyohara }; 214 1.1 kiyohara 215 1.1 kiyohara static const struct dmover_algdesc mvxore_algdescs[] = { 216 1.1 kiyohara #if 0 217 1.1 kiyohara /* 218 1.1 kiyohara * As for these operations, there are a lot of restrictions. It is 219 1.1 kiyohara * necessary to use IDMAC. 220 1.1 kiyohara */ 221 1.1 kiyohara { 222 1.1 kiyohara .dad_name = DMOVER_FUNC_ZERO, 223 1.1 kiyohara .dad_data = &mvxore_functions, 224 1.1 kiyohara .dad_ninputs = 0 225 1.1 kiyohara }, 226 1.1 kiyohara { 227 1.1 kiyohara .dad_name = DMOVER_FUNC_FILL8, 228 1.1 kiyohara .dad_data = &mvxore_functions, 229 1.1 kiyohara .dad_ninputs = 0 230 1.1 kiyohara }, 231 1.1 kiyohara #endif 232 1.1 kiyohara { 233 1.1 kiyohara .dad_name = DMOVER_FUNC_COPY, 234 1.1 kiyohara .dad_data = &mvxore_functions, 235 1.1 kiyohara .dad_ninputs = 1 236 1.1 kiyohara }, 237 1.1 kiyohara { 238 1.1 kiyohara .dad_name = DMOVER_FUNC_ISCSI_CRC32C, 239 1.1 kiyohara .dad_data = &mvxore_functions, 240 1.1 kiyohara .dad_ninputs = 1 241 1.1 kiyohara }, 242 1.1 kiyohara { 243 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR2, 244 1.1 kiyohara .dad_data = &mvxore_functions, 245 1.1 kiyohara .dad_ninputs = 2 246 1.1 kiyohara }, 247 1.1 kiyohara { 248 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR3, 249 1.1 kiyohara .dad_data = &mvxore_functions, 250 1.1 kiyohara .dad_ninputs = 3 251 1.1 kiyohara }, 252 1.1 kiyohara { 253 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR4, 254 1.1 kiyohara .dad_data = &mvxore_functions, 255 1.1 kiyohara .dad_ninputs = 4 256 1.1 kiyohara }, 257 1.1 kiyohara { 258 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR5, 259 1.1 kiyohara .dad_data = &mvxore_functions, 260 1.1 kiyohara .dad_ninputs = 5 261 1.1 kiyohara }, 262 1.1 kiyohara { 263 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR6, 264 1.1 kiyohara .dad_data = &mvxore_functions, 265 1.1 kiyohara .dad_ninputs = 6 266 1.1 kiyohara }, 267 1.1 kiyohara { 268 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR7, 269 1.1 kiyohara .dad_data = &mvxore_functions, 270 1.1 kiyohara .dad_ninputs = 7 271 1.1 kiyohara }, 272 1.1 kiyohara { 273 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR8, 274 1.1 kiyohara .dad_data = &mvxore_functions, 275 1.1 kiyohara .dad_ninputs = 8 276 1.1 kiyohara }, 277 1.1 kiyohara }; 278 1.1 kiyohara 279 1.13 kiyohara static int orion_88f5182_xore_irqs[] = { 30, 31 }; 280 1.13 kiyohara static int kirkwood_xore_irqs[] = { 5, 6, 7, 8 }; 281 1.13 kiyohara static int dove_xore_irqs[] = { 39, 40, 42, 43 }; 282 1.13 kiyohara static int armadaxp_xore_irqs0[] = { 51, 52 }; 283 1.13 kiyohara static int armadaxp_xore_irqs1[] = { 94, 95 }; 284 1.13 kiyohara 285 1.8 kiyohara static struct { 286 1.8 kiyohara int model; 287 1.8 kiyohara int idmac_nchan; 288 1.8 kiyohara int idmac_irq; 289 1.8 kiyohara int xore_nchan; 290 1.13 kiyohara int *xore_irqs; 291 1.8 kiyohara } channels[] = { 292 1.8 kiyohara /* 293 1.8 kiyohara * Marvell System Controllers: 294 1.8 kiyohara * need irqs in attach_args. 295 1.8 kiyohara */ 296 1.13 kiyohara { MARVELL_DISCOVERY, 8, -1, 0, NULL }, 297 1.13 kiyohara { MARVELL_DISCOVERY_II, 8, -1, 0, NULL }, 298 1.13 kiyohara { MARVELL_DISCOVERY_III, 8, -1, 0, NULL }, 299 1.8 kiyohara #if 0 300 1.13 kiyohara { MARVELL_DISCOVERY_LT, 4, -1, 2, NULL }, 301 1.13 kiyohara { MARVELL_DISCOVERY_V, 4, -1, 2, NULL }, 302 1.13 kiyohara { MARVELL_DISCOVERY_VI, 4, -1, 2, NULL }, ???? 303 1.8 kiyohara #endif 304 1.8 kiyohara 305 1.8 kiyohara /* 306 1.8 kiyohara * Marvell System on Chips: 307 1.8 kiyohara * No need irqs in attach_args. We always connecting to interrupt-pin 308 1.8 kiyohara * statically. 309 1.8 kiyohara */ 310 1.13 kiyohara { MARVELL_ORION_1_88F1181, 4, 24, 0, NULL }, 311 1.13 kiyohara { MARVELL_ORION_2_88F1281, 4, 24, 0, NULL }, 312 1.13 kiyohara { MARVELL_ORION_1_88F5082, 4, 24, 0, NULL }, 313 1.13 kiyohara { MARVELL_ORION_1_88F5180N, 4, 24, 0, NULL }, 314 1.13 kiyohara { MARVELL_ORION_1_88F5181, 4, 24, 0, NULL }, 315 1.13 kiyohara { MARVELL_ORION_1_88F5182, 4, 24, 2, orion_88f5182_xore_irqs }, 316 1.13 kiyohara { MARVELL_ORION_2_88F5281, 4, 24, 0, NULL }, 317 1.13 kiyohara { MARVELL_ORION_1_88W8660, 4, 24, 0, NULL }, 318 1.13 kiyohara { MARVELL_KIRKWOOD_88F6180, 0, -1, 4, kirkwood_xore_irqs }, 319 1.13 kiyohara { MARVELL_KIRKWOOD_88F6192, 0, -1, 4, kirkwood_xore_irqs }, 320 1.13 kiyohara { MARVELL_KIRKWOOD_88F6281, 0, -1, 4, kirkwood_xore_irqs }, 321 1.13 kiyohara { MARVELL_KIRKWOOD_88F6282, 0, -1, 4, kirkwood_xore_irqs }, 322 1.13 kiyohara { MARVELL_DOVE_88AP510, 0, -1, 4, dove_xore_irqs }, 323 1.13 kiyohara { MARVELL_ARMADAXP_MV78130, 4, 33, 2, armadaxp_xore_irqs0 }, 324 1.13 kiyohara { MARVELL_ARMADAXP_MV78130, 0, -1, 2, armadaxp_xore_irqs1 }, 325 1.13 kiyohara { MARVELL_ARMADAXP_MV78160, 4, 33, 2, armadaxp_xore_irqs0 }, 326 1.13 kiyohara { MARVELL_ARMADAXP_MV78160, 0, -1, 2, armadaxp_xore_irqs1 }, 327 1.13 kiyohara { MARVELL_ARMADAXP_MV78230, 4, 33, 2, armadaxp_xore_irqs0 }, 328 1.13 kiyohara { MARVELL_ARMADAXP_MV78230, 0, -1, 2, armadaxp_xore_irqs1 }, 329 1.13 kiyohara { MARVELL_ARMADAXP_MV78260, 4, 33, 2, armadaxp_xore_irqs0 }, 330 1.13 kiyohara { MARVELL_ARMADAXP_MV78260, 0, -1, 2, armadaxp_xore_irqs1 }, 331 1.13 kiyohara { MARVELL_ARMADAXP_MV78460, 4, 33, 2, armadaxp_xore_irqs0 }, 332 1.13 kiyohara { MARVELL_ARMADAXP_MV78460, 0, -1, 2, armadaxp_xore_irqs1 }, 333 1.8 kiyohara }; 334 1.8 kiyohara 335 1.11 kiyohara struct gtidmac_winacctbl *gtidmac_winacctbl; 336 1.11 kiyohara struct gtidmac_winacctbl *mvxore_winacctbl; 337 1.11 kiyohara 338 1.1 kiyohara CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc), 339 1.1 kiyohara gtidmac_match, gtidmac_attach, NULL, NULL); 340 1.1 kiyohara CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc), 341 1.1 kiyohara gtidmac_match, gtidmac_attach, NULL, NULL); 342 1.1 kiyohara 343 1.1 kiyohara 344 1.1 kiyohara /* ARGSUSED */ 345 1.1 kiyohara static int 346 1.1 kiyohara gtidmac_match(device_t parent, struct cfdata *match, void *aux) 347 1.1 kiyohara { 348 1.1 kiyohara struct marvell_attach_args *mva = aux; 349 1.10 kiyohara int unit, i; 350 1.1 kiyohara 351 1.1 kiyohara if (strcmp(mva->mva_name, match->cf_name) != 0) 352 1.1 kiyohara return 0; 353 1.8 kiyohara if (mva->mva_offset == MVA_OFFSET_DEFAULT) 354 1.1 kiyohara return 0; 355 1.10 kiyohara unit = 0; 356 1.8 kiyohara for (i = 0; i < __arraycount(channels); i++) 357 1.8 kiyohara if (mva->mva_model == channels[i].model) { 358 1.10 kiyohara if (mva->mva_unit == unit) { 359 1.10 kiyohara mva->mva_size = GTIDMAC_SIZE; 360 1.10 kiyohara return 1; 361 1.10 kiyohara } 362 1.10 kiyohara unit++; 363 1.8 kiyohara } 364 1.8 kiyohara return 0; 365 1.1 kiyohara } 366 1.1 kiyohara 367 1.1 kiyohara /* ARGSUSED */ 368 1.1 kiyohara static void 369 1.1 kiyohara gtidmac_attach(device_t parent, device_t self, void *aux) 370 1.1 kiyohara { 371 1.1 kiyohara struct gtidmac_softc *sc = device_private(self); 372 1.1 kiyohara struct marvell_attach_args *mva = aux; 373 1.1 kiyohara prop_dictionary_t dict = device_properties(self); 374 1.13 kiyohara uint32_t idmac_irq, xore_irq, *xore_irqs, dmb_speed; 375 1.10 kiyohara int unit, idmac_nchan, xore_nchan, nsegs, i, j, n; 376 1.1 kiyohara 377 1.10 kiyohara unit = 0; 378 1.8 kiyohara for (i = 0; i < __arraycount(channels); i++) 379 1.10 kiyohara if (mva->mva_model == channels[i].model) { 380 1.10 kiyohara if (mva->mva_unit == unit) 381 1.10 kiyohara break; 382 1.10 kiyohara unit++; 383 1.10 kiyohara } 384 1.8 kiyohara idmac_nchan = channels[i].idmac_nchan; 385 1.8 kiyohara idmac_irq = channels[i].idmac_irq; 386 1.8 kiyohara if (idmac_nchan != 0) { 387 1.8 kiyohara if (idmac_irq == -1) 388 1.8 kiyohara idmac_irq = mva->mva_irq; 389 1.8 kiyohara if (idmac_irq == -1) 390 1.8 kiyohara /* Discovery */ 391 1.8 kiyohara if (!prop_dictionary_get_uint32(dict, 392 1.8 kiyohara "idmac-irq", &idmac_irq)) { 393 1.8 kiyohara aprint_error(": no idmac-irq property\n"); 394 1.8 kiyohara return; 395 1.8 kiyohara } 396 1.8 kiyohara } 397 1.8 kiyohara xore_nchan = channels[i].xore_nchan; 398 1.13 kiyohara xore_irqs = channels[i].xore_irqs; 399 1.13 kiyohara xore_irq = MVA_IRQ_DEFAULT; 400 1.8 kiyohara if (xore_nchan != 0) { 401 1.13 kiyohara if (xore_irqs == NULL) 402 1.8 kiyohara xore_irq = mva->mva_irq; 403 1.13 kiyohara if (xore_irqs == NULL && xore_irq == MVA_IRQ_DEFAULT) 404 1.8 kiyohara /* Discovery LT/V/VI */ 405 1.8 kiyohara if (!prop_dictionary_get_uint32(dict, 406 1.8 kiyohara "xore-irq", &xore_irq)) { 407 1.8 kiyohara aprint_error(": no xore-irq property\n"); 408 1.8 kiyohara return; 409 1.8 kiyohara } 410 1.1 kiyohara } 411 1.1 kiyohara 412 1.1 kiyohara aprint_naive("\n"); 413 1.1 kiyohara aprint_normal(": Marvell IDMA Controller%s\n", 414 1.1 kiyohara xore_nchan ? "/XOR Engine" : ""); 415 1.8 kiyohara if (idmac_nchan > 0) 416 1.8 kiyohara aprint_normal_dev(self, 417 1.8 kiyohara "IDMA Controller %d channels, intr %d...%d\n", 418 1.8 kiyohara idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1); 419 1.13 kiyohara if (xore_nchan > 0) { 420 1.13 kiyohara aprint_normal_dev(self, "XOR Engine %d channels", xore_nchan); 421 1.13 kiyohara if (xore_irqs == NULL) 422 1.13 kiyohara aprint_normal(", intr %d...%d\n", 423 1.13 kiyohara xore_irq, xore_irq + xore_nchan - 1); 424 1.13 kiyohara else { 425 1.13 kiyohara aprint_normal(", intr %d", xore_irqs[0]); 426 1.13 kiyohara for (i = 1; i < xore_nchan; i++) 427 1.13 kiyohara aprint_normal(", %d", xore_irqs[i]); 428 1.13 kiyohara aprint_normal("\n"); 429 1.13 kiyohara } 430 1.13 kiyohara } 431 1.1 kiyohara 432 1.1 kiyohara sc->sc_dev = self; 433 1.1 kiyohara sc->sc_iot = mva->mva_iot; 434 1.1 kiyohara 435 1.1 kiyohara /* Map I/O registers */ 436 1.1 kiyohara if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 437 1.1 kiyohara mva->mva_size, &sc->sc_ioh)) { 438 1.1 kiyohara aprint_error_dev(self, "can't map registers\n"); 439 1.1 kiyohara return; 440 1.1 kiyohara } 441 1.1 kiyohara 442 1.1 kiyohara /* 443 1.1 kiyohara * Initialise DMA descriptors and associated metadata 444 1.1 kiyohara */ 445 1.1 kiyohara sc->sc_dmat = mva->mva_dmat; 446 1.1 kiyohara n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC; 447 1.1 kiyohara sc->sc_dd_buffer = 448 1.2 kiyohara kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP); 449 1.15 chs 450 1.1 kiyohara /* pattern buffer */ 451 1.1 kiyohara if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 452 1.1 kiyohara &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) { 453 1.1 kiyohara aprint_error_dev(self, 454 1.1 kiyohara "bus_dmamem_alloc failed: pattern buffer\n"); 455 1.1 kiyohara goto fail2; 456 1.1 kiyohara } 457 1.1 kiyohara if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE, 458 1.1 kiyohara (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) { 459 1.1 kiyohara aprint_error_dev(self, 460 1.1 kiyohara "bus_dmamem_map failed: pattern buffer\n"); 461 1.1 kiyohara goto fail3; 462 1.1 kiyohara } 463 1.1 kiyohara for (i = 0; i < 0x100; i++) 464 1.1 kiyohara for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++) 465 1.1 kiyohara sc->sc_pbuf[i].pbuf[j] = i; 466 1.1 kiyohara 467 1.1 kiyohara if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) { 468 1.1 kiyohara aprint_error_dev(self, "no dmb_speed property\n"); 469 1.8 kiyohara dmb_speed = 10; /* More than fast swdmover perhaps. */ 470 1.1 kiyohara } 471 1.1 kiyohara 472 1.8 kiyohara /* IDMAC DMA descriptor buffer */ 473 1.8 kiyohara sc->sc_gtidmac_nchan = idmac_nchan; 474 1.8 kiyohara if (sc->sc_gtidmac_nchan > 0) { 475 1.8 kiyohara if (gtidmac_buffer_setup(sc) != 0) 476 1.8 kiyohara goto fail4; 477 1.8 kiyohara 478 1.8 kiyohara if (mva->mva_model != MARVELL_DISCOVERY) 479 1.11 kiyohara gtidmac_wininit(sc, mva->mva_tags); 480 1.8 kiyohara 481 1.8 kiyohara /* Setup interrupt */ 482 1.8 kiyohara for (i = 0; i < GTIDMAC_NINTRRUPT; i++) { 483 1.8 kiyohara j = i * idmac_nchan / GTIDMAC_NINTRRUPT; 484 1.8 kiyohara 485 1.8 kiyohara sc->sc_intrarg[i].ia_sc = sc; 486 1.8 kiyohara sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j); 487 1.8 kiyohara sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j); 488 1.8 kiyohara sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j); 489 1.8 kiyohara marvell_intr_establish(idmac_irq + i, IPL_BIO, 490 1.8 kiyohara gtidmac_intr, &sc->sc_intrarg[i]); 491 1.1 kiyohara } 492 1.1 kiyohara 493 1.8 kiyohara /* Register us with dmover. */ 494 1.8 kiyohara sc->sc_dmb.dmb_name = device_xname(self); 495 1.8 kiyohara sc->sc_dmb.dmb_speed = dmb_speed; 496 1.8 kiyohara sc->sc_dmb.dmb_cookie = sc; 497 1.8 kiyohara sc->sc_dmb.dmb_algdescs = gtidmac_algdescs; 498 1.8 kiyohara sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs); 499 1.8 kiyohara sc->sc_dmb.dmb_process = gtidmac_process; 500 1.8 kiyohara dmover_backend_register(&sc->sc_dmb); 501 1.8 kiyohara sc->sc_dmb_busy = 0; 502 1.8 kiyohara } 503 1.8 kiyohara 504 1.8 kiyohara /* XORE DMA descriptor buffer */ 505 1.8 kiyohara sc->sc_mvxore_nchan = xore_nchan; 506 1.8 kiyohara if (sc->sc_mvxore_nchan > 0) { 507 1.8 kiyohara if (mvxore_buffer_setup(sc) != 0) 508 1.8 kiyohara goto fail5; 509 1.8 kiyohara 510 1.8 kiyohara /* Setup interrupt */ 511 1.8 kiyohara for (i = 0; i < sc->sc_mvxore_nchan; i++) 512 1.13 kiyohara marvell_intr_establish( 513 1.13 kiyohara xore_irqs != NULL ? xore_irqs[i] : xore_irq + i, 514 1.13 kiyohara IPL_BIO, 515 1.8 kiyohara (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr, 516 1.8 kiyohara sc); 517 1.1 kiyohara 518 1.11 kiyohara mvxore_wininit(sc, mva->mva_tags); 519 1.1 kiyohara 520 1.1 kiyohara /* Register us with dmover. */ 521 1.1 kiyohara sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev); 522 1.1 kiyohara sc->sc_dmb_xore.dmb_speed = dmb_speed; 523 1.1 kiyohara sc->sc_dmb_xore.dmb_cookie = sc; 524 1.1 kiyohara sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs; 525 1.13 kiyohara sc->sc_dmb_xore.dmb_nalgdescs = __arraycount(mvxore_algdescs); 526 1.1 kiyohara sc->sc_dmb_xore.dmb_process = gtidmac_process; 527 1.1 kiyohara dmover_backend_register(&sc->sc_dmb_xore); 528 1.1 kiyohara } 529 1.1 kiyohara 530 1.1 kiyohara gtidmac_softc = sc; 531 1.1 kiyohara 532 1.1 kiyohara return; 533 1.1 kiyohara 534 1.8 kiyohara fail5: 535 1.8 kiyohara for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) { 536 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in); 537 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out); 538 1.1 kiyohara } 539 1.1 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 540 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap); 541 1.1 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf, 542 1.1 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC); 543 1.8 kiyohara bus_dmamem_free(sc->sc_dmat, 544 1.8 kiyohara sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs); 545 1.1 kiyohara fail4: 546 1.1 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE); 547 1.1 kiyohara fail3: 548 1.1 kiyohara bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1); 549 1.1 kiyohara fail2: 550 1.2 kiyohara kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n); 551 1.1 kiyohara bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size); 552 1.1 kiyohara return; 553 1.1 kiyohara } 554 1.1 kiyohara 555 1.1 kiyohara 556 1.1 kiyohara static int 557 1.1 kiyohara gtidmac_intr(void *arg) 558 1.1 kiyohara { 559 1.1 kiyohara struct gtidmac_intr_arg *ia = arg; 560 1.1 kiyohara struct gtidmac_softc *sc = ia->ia_sc; 561 1.1 kiyohara uint32_t cause; 562 1.1 kiyohara int handled = 0, chan, error; 563 1.1 kiyohara 564 1.1 kiyohara cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause); 565 1.1 kiyohara DPRINTF(("IDMAC intr: cause=0x%x\n", cause)); 566 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause); 567 1.1 kiyohara 568 1.1 kiyohara chan = 0; 569 1.1 kiyohara while (cause) { 570 1.1 kiyohara error = 0; 571 1.1 kiyohara if (cause & GTIDMAC_I_ADDRMISS) { 572 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Address Miss"); 573 1.1 kiyohara error = EINVAL; 574 1.1 kiyohara } 575 1.1 kiyohara if (cause & GTIDMAC_I_ACCPROT) { 576 1.1 kiyohara aprint_error_dev(sc->sc_dev, 577 1.1 kiyohara "Access Protect Violation"); 578 1.1 kiyohara error = EACCES; 579 1.1 kiyohara } 580 1.1 kiyohara if (cause & GTIDMAC_I_WRPROT) { 581 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Write Protect"); 582 1.1 kiyohara error = EACCES; 583 1.1 kiyohara } 584 1.1 kiyohara if (cause & GTIDMAC_I_OWN) { 585 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Ownership Violation"); 586 1.1 kiyohara error = EINVAL; 587 1.1 kiyohara } 588 1.1 kiyohara 589 1.1 kiyohara #define GTIDMAC_I_ERROR \ 590 1.1 kiyohara (GTIDMAC_I_ADDRMISS | \ 591 1.1 kiyohara GTIDMAC_I_ACCPROT | \ 592 1.1 kiyohara GTIDMAC_I_WRPROT | \ 593 1.1 kiyohara GTIDMAC_I_OWN) 594 1.1 kiyohara if (cause & GTIDMAC_I_ERROR) { 595 1.1 kiyohara uint32_t sel; 596 1.1 kiyohara int select; 597 1.1 kiyohara 598 1.1 kiyohara sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 599 1.1 kiyohara ia->ia_eselect) & GTIDMAC_ESR_SEL; 600 1.1 kiyohara select = sel - chan * GTIDMAC_I_BITS; 601 1.1 kiyohara if (select >= 0 && select < GTIDMAC_I_BITS) { 602 1.1 kiyohara uint32_t ear; 603 1.1 kiyohara 604 1.1 kiyohara ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 605 1.1 kiyohara ia->ia_eaddr); 606 1.1 kiyohara aprint_error(": Error Address 0x%x\n", ear); 607 1.1 kiyohara } else 608 1.1 kiyohara aprint_error(": lost Error Address\n"); 609 1.1 kiyohara } 610 1.1 kiyohara 611 1.1 kiyohara if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) { 612 1.1 kiyohara sc->sc_cdesc[chan].chan_dma_done( 613 1.1 kiyohara sc->sc_cdesc[chan].chan_running, chan, 614 1.1 kiyohara &sc->sc_cdesc[chan].chan_in, 615 1.1 kiyohara &sc->sc_cdesc[chan].chan_out, error); 616 1.1 kiyohara handled++; 617 1.1 kiyohara } 618 1.1 kiyohara 619 1.1 kiyohara cause >>= GTIDMAC_I_BITS; 620 1.1 kiyohara } 621 1.1 kiyohara DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not ")); 622 1.1 kiyohara 623 1.1 kiyohara return handled; 624 1.1 kiyohara } 625 1.1 kiyohara 626 1.1 kiyohara static int 627 1.8 kiyohara mvxore_port0_intr(void *arg) 628 1.1 kiyohara { 629 1.1 kiyohara struct gtidmac_softc *sc = arg; 630 1.8 kiyohara 631 1.8 kiyohara return mvxore_intr(sc, 0); 632 1.8 kiyohara } 633 1.8 kiyohara 634 1.8 kiyohara static int 635 1.8 kiyohara mvxore_port1_intr(void *arg) 636 1.8 kiyohara { 637 1.8 kiyohara struct gtidmac_softc *sc = arg; 638 1.8 kiyohara 639 1.8 kiyohara return mvxore_intr(sc, 1); 640 1.8 kiyohara } 641 1.8 kiyohara 642 1.8 kiyohara static int 643 1.8 kiyohara mvxore_intr(struct gtidmac_softc *sc, int port) 644 1.8 kiyohara { 645 1.1 kiyohara uint32_t cause; 646 1.1 kiyohara int handled = 0, chan, error; 647 1.1 kiyohara 648 1.8 kiyohara cause = 649 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port)); 650 1.8 kiyohara DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause)); 651 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 652 1.8 kiyohara MVXORE_XEICR(sc, port), ~cause); 653 1.1 kiyohara 654 1.1 kiyohara chan = 0; 655 1.1 kiyohara while (cause) { 656 1.1 kiyohara error = 0; 657 1.1 kiyohara if (cause & MVXORE_I_ADDRDECODE) { 658 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Failed address decoding"); 659 1.1 kiyohara error = EINVAL; 660 1.1 kiyohara } 661 1.1 kiyohara if (cause & MVXORE_I_ACCPROT) { 662 1.1 kiyohara aprint_error_dev(sc->sc_dev, 663 1.1 kiyohara "Access Protect Violation"); 664 1.1 kiyohara error = EACCES; 665 1.1 kiyohara } 666 1.1 kiyohara if (cause & MVXORE_I_WRPROT) { 667 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Write Protect"); 668 1.1 kiyohara error = EACCES; 669 1.1 kiyohara } 670 1.1 kiyohara if (cause & MVXORE_I_OWN) { 671 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Ownership Violation"); 672 1.1 kiyohara error = EINVAL; 673 1.1 kiyohara } 674 1.1 kiyohara if (cause & MVXORE_I_INTPARITY) { 675 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Parity Error"); 676 1.1 kiyohara error = EIO; 677 1.1 kiyohara } 678 1.1 kiyohara if (cause & MVXORE_I_XBAR) { 679 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Crossbar Parity Error"); 680 1.1 kiyohara error = EINVAL; 681 1.1 kiyohara } 682 1.1 kiyohara 683 1.1 kiyohara #define MVXORE_I_ERROR \ 684 1.1 kiyohara (MVXORE_I_ADDRDECODE | \ 685 1.1 kiyohara MVXORE_I_ACCPROT | \ 686 1.1 kiyohara MVXORE_I_WRPROT | \ 687 1.1 kiyohara MVXORE_I_OWN | \ 688 1.1 kiyohara MVXORE_I_INTPARITY | \ 689 1.1 kiyohara MVXORE_I_XBAR) 690 1.1 kiyohara if (cause & MVXORE_I_ERROR) { 691 1.1 kiyohara uint32_t type; 692 1.1 kiyohara int event; 693 1.1 kiyohara 694 1.1 kiyohara type = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 695 1.8 kiyohara MVXORE_XEECR(sc, port)); 696 1.8 kiyohara type &= MVXORE_XEECR_ERRORTYPE_MASK; 697 1.1 kiyohara event = type - chan * MVXORE_I_BITS; 698 1.1 kiyohara if (event >= 0 && event < MVXORE_I_BITS) { 699 1.1 kiyohara uint32_t xeear; 700 1.1 kiyohara 701 1.1 kiyohara xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 702 1.8 kiyohara MVXORE_XEEAR(sc, port)); 703 1.1 kiyohara aprint_error(": Error Address 0x%x\n", xeear); 704 1.1 kiyohara } else 705 1.1 kiyohara aprint_error(": lost Error Address\n"); 706 1.1 kiyohara } 707 1.1 kiyohara 708 1.1 kiyohara if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) { 709 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_dma_done( 710 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running, chan, 711 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_in, 712 1.1 kiyohara &sc->sc_cdesc_xore[chan].chan_out, error); 713 1.1 kiyohara handled++; 714 1.1 kiyohara } 715 1.1 kiyohara 716 1.1 kiyohara cause >>= MVXORE_I_BITS; 717 1.1 kiyohara } 718 1.8 kiyohara DPRINTF(("XORE port %d intr: %shandled\n", 719 1.8 kiyohara port, handled ? "" : "not ")); 720 1.1 kiyohara 721 1.1 kiyohara return handled; 722 1.1 kiyohara } 723 1.1 kiyohara 724 1.1 kiyohara 725 1.1 kiyohara /* 726 1.1 kiyohara * dmover(9) backend function. 727 1.1 kiyohara */ 728 1.1 kiyohara static void 729 1.1 kiyohara gtidmac_process(struct dmover_backend *dmb) 730 1.1 kiyohara { 731 1.1 kiyohara struct gtidmac_softc *sc = dmb->dmb_cookie; 732 1.1 kiyohara int s; 733 1.1 kiyohara 734 1.1 kiyohara /* If the backend is currently idle, go process the queue. */ 735 1.1 kiyohara s = splbio(); 736 1.1 kiyohara if (!sc->sc_dmb_busy) 737 1.1 kiyohara gtidmac_dmover_run(dmb); 738 1.1 kiyohara splx(s); 739 1.1 kiyohara } 740 1.1 kiyohara 741 1.1 kiyohara static void 742 1.1 kiyohara gtidmac_dmover_run(struct dmover_backend *dmb) 743 1.1 kiyohara { 744 1.1 kiyohara struct gtidmac_softc *sc = dmb->dmb_cookie; 745 1.1 kiyohara struct dmover_request *dreq; 746 1.1 kiyohara const struct dmover_algdesc *algdesc; 747 1.1 kiyohara struct gtidmac_function *df; 748 1.1 kiyohara bus_dmamap_t *dmamap_in, *dmamap_out; 749 1.1 kiyohara int chan, ninputs, error, i; 750 1.1 kiyohara 751 1.1 kiyohara sc->sc_dmb_busy = 1; 752 1.1 kiyohara 753 1.1 kiyohara for (;;) { 754 1.1 kiyohara dreq = TAILQ_FIRST(&dmb->dmb_pendreqs); 755 1.1 kiyohara if (dreq == NULL) 756 1.1 kiyohara break; 757 1.1 kiyohara algdesc = dreq->dreq_assignment->das_algdesc; 758 1.1 kiyohara df = algdesc->dad_data; 759 1.1 kiyohara chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq); 760 1.1 kiyohara if (chan == -1) 761 1.1 kiyohara return; 762 1.1 kiyohara 763 1.1 kiyohara dmover_backend_remque(dmb, dreq); 764 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_RUNNING; 765 1.1 kiyohara 766 1.1 kiyohara /* XXXUNLOCK */ 767 1.1 kiyohara 768 1.1 kiyohara error = 0; 769 1.1 kiyohara 770 1.1 kiyohara /* Load in/out buffers of dmover to bus_dmamap. */ 771 1.1 kiyohara ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs; 772 1.1 kiyohara if (ninputs == 0) { 773 1.1 kiyohara int pno = 0; 774 1.1 kiyohara 775 1.1 kiyohara if (algdesc->dad_name == DMOVER_FUNC_FILL8) 776 1.1 kiyohara pno = dreq->dreq_immediate[0]; 777 1.1 kiyohara 778 1.1 kiyohara i = 0; 779 1.1 kiyohara error = bus_dmamap_load(sc->sc_dmat, *dmamap_in, 780 1.1 kiyohara &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL, 781 1.1 kiyohara BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE); 782 1.1 kiyohara if (error == 0) { 783 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0, 784 1.13 kiyohara sizeof(sc->sc_pbuf[pno]), 785 1.13 kiyohara BUS_DMASYNC_PREWRITE); 786 1.1 kiyohara 787 1.1 kiyohara /* 788 1.1 kiyohara * We will call gtidmac_dmmap_unload() when 789 1.1 kiyohara * becoming an error. 790 1.1 kiyohara */ 791 1.1 kiyohara i = 1; 792 1.1 kiyohara } 793 1.1 kiyohara } else 794 1.1 kiyohara for (i = 0; i < ninputs; i++) { 795 1.1 kiyohara error = gtidmac_dmmap_load(sc, 796 1.1 kiyohara *(dmamap_in + i), dreq->dreq_inbuf_type, 797 1.1 kiyohara &dreq->dreq_inbuf[i], 0/*write*/); 798 1.1 kiyohara if (error != 0) 799 1.1 kiyohara break; 800 1.1 kiyohara } 801 1.1 kiyohara if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) { 802 1.1 kiyohara if (error == 0) 803 1.1 kiyohara error = gtidmac_dmmap_load(sc, *dmamap_out, 804 1.1 kiyohara dreq->dreq_outbuf_type, &dreq->dreq_outbuf, 805 1.1 kiyohara 1/*read*/); 806 1.1 kiyohara 807 1.1 kiyohara if (error == 0) { 808 1.1 kiyohara /* 809 1.1 kiyohara * The size of outbuf is always believed to be 810 1.1 kiyohara * DMA transfer size in dmover request. 811 1.1 kiyohara */ 812 1.1 kiyohara error = (*df->dma_setup)(sc, chan, ninputs, 813 1.1 kiyohara dmamap_in, dmamap_out, 814 1.1 kiyohara (*dmamap_out)->dm_mapsize); 815 1.1 kiyohara if (error != 0) 816 1.1 kiyohara gtidmac_dmmap_unload(sc, *dmamap_out, 817 1.1 kiyohara 1); 818 1.1 kiyohara } 819 1.1 kiyohara } else 820 1.1 kiyohara if (error == 0) 821 1.1 kiyohara error = (*df->dma_setup)(sc, chan, ninputs, 822 1.1 kiyohara dmamap_in, dmamap_out, 823 1.1 kiyohara (*dmamap_in)->dm_mapsize); 824 1.1 kiyohara 825 1.1 kiyohara /* XXXLOCK */ 826 1.1 kiyohara 827 1.1 kiyohara if (error != 0) { 828 1.1 kiyohara for (; i-- > 0;) 829 1.1 kiyohara gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0); 830 1.1 kiyohara (*df->chan_free)(sc, chan); 831 1.1 kiyohara 832 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_ERROR; 833 1.1 kiyohara dreq->dreq_error = error; 834 1.1 kiyohara /* XXXUNLOCK */ 835 1.1 kiyohara dmover_done(dreq); 836 1.1 kiyohara /* XXXLOCK */ 837 1.1 kiyohara continue; 838 1.1 kiyohara } 839 1.1 kiyohara 840 1.1 kiyohara (*df->dma_start)(sc, chan, gtidmac_dmover_done); 841 1.1 kiyohara break; 842 1.1 kiyohara } 843 1.1 kiyohara 844 1.1 kiyohara /* All done */ 845 1.1 kiyohara sc->sc_dmb_busy = 0; 846 1.1 kiyohara } 847 1.1 kiyohara 848 1.1 kiyohara static void 849 1.1 kiyohara gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in, 850 1.1 kiyohara bus_dmamap_t *dmamap_out, int error) 851 1.1 kiyohara { 852 1.1 kiyohara struct gtidmac_softc *sc; 853 1.1 kiyohara struct dmover_request *dreq = object; 854 1.1 kiyohara struct dmover_backend *dmb; 855 1.1 kiyohara struct gtidmac_function *df; 856 1.1 kiyohara uint32_t result; 857 1.1 kiyohara int ninputs, i; 858 1.1 kiyohara 859 1.1 kiyohara KASSERT(dreq != NULL); 860 1.1 kiyohara 861 1.1 kiyohara dmb = dreq->dreq_assignment->das_backend; 862 1.1 kiyohara df = dreq->dreq_assignment->das_algdesc->dad_data; 863 1.1 kiyohara ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs; 864 1.1 kiyohara sc = dmb->dmb_cookie; 865 1.1 kiyohara 866 1.1 kiyohara result = (*df->dma_finish)(sc, chan, error); 867 1.1 kiyohara for (i = 0; i < ninputs; i++) 868 1.1 kiyohara gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0); 869 1.1 kiyohara if (dreq->dreq_assignment->das_algdesc->dad_name == 870 1.1 kiyohara DMOVER_FUNC_ISCSI_CRC32C) 871 1.1 kiyohara memcpy(dreq->dreq_immediate, &result, sizeof(result)); 872 1.1 kiyohara else 873 1.1 kiyohara gtidmac_dmmap_unload(sc, *dmamap_out, 1); 874 1.1 kiyohara 875 1.1 kiyohara (*df->chan_free)(sc, chan); 876 1.1 kiyohara 877 1.1 kiyohara if (error) { 878 1.1 kiyohara dreq->dreq_error = error; 879 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_ERROR; 880 1.1 kiyohara } 881 1.1 kiyohara 882 1.1 kiyohara dmover_done(dreq); 883 1.1 kiyohara 884 1.1 kiyohara /* 885 1.1 kiyohara * See if we can start some more dmover(9) requests. 886 1.1 kiyohara * 887 1.1 kiyohara * Note: We're already at splbio() here. 888 1.1 kiyohara */ 889 1.1 kiyohara if (!sc->sc_dmb_busy) 890 1.1 kiyohara gtidmac_dmover_run(dmb); 891 1.1 kiyohara } 892 1.1 kiyohara 893 1.9 msaitoh static __inline int 894 1.1 kiyohara gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap, 895 1.1 kiyohara dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf, 896 1.1 kiyohara int read) 897 1.1 kiyohara { 898 1.1 kiyohara int error, flags; 899 1.1 kiyohara 900 1.1 kiyohara flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 901 1.1 kiyohara read ? BUS_DMA_READ : BUS_DMA_WRITE; 902 1.1 kiyohara 903 1.1 kiyohara switch (dmbuf_type) { 904 1.1 kiyohara case DMOVER_BUF_LINEAR: 905 1.1 kiyohara error = bus_dmamap_load(sc->sc_dmat, dmamap, 906 1.1 kiyohara dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len, 907 1.1 kiyohara NULL, flags); 908 1.1 kiyohara break; 909 1.1 kiyohara 910 1.1 kiyohara case DMOVER_BUF_UIO: 911 1.1 kiyohara if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) || 912 1.1 kiyohara (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ)) 913 1.1 kiyohara return (EINVAL); 914 1.1 kiyohara 915 1.1 kiyohara error = bus_dmamap_load_uio(sc->sc_dmat, dmamap, 916 1.1 kiyohara dmbuf->dmbuf_uio, flags); 917 1.1 kiyohara break; 918 1.1 kiyohara 919 1.1 kiyohara default: 920 1.1 kiyohara error = EINVAL; 921 1.1 kiyohara } 922 1.1 kiyohara 923 1.1 kiyohara if (error == 0) 924 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 925 1.1 kiyohara read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 926 1.1 kiyohara 927 1.1 kiyohara return error; 928 1.1 kiyohara } 929 1.1 kiyohara 930 1.9 msaitoh static __inline void 931 1.1 kiyohara gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read) 932 1.1 kiyohara { 933 1.1 kiyohara 934 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 935 1.1 kiyohara read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 936 1.1 kiyohara 937 1.1 kiyohara bus_dmamap_unload(sc->sc_dmat, dmamap); 938 1.1 kiyohara } 939 1.1 kiyohara 940 1.1 kiyohara 941 1.1 kiyohara /* 942 1.1 kiyohara * IDMAC functions 943 1.1 kiyohara */ 944 1.1 kiyohara int 945 1.1 kiyohara gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in, 946 1.1 kiyohara bus_dmamap_t **dmamap_out, void *object) 947 1.1 kiyohara { 948 1.1 kiyohara struct gtidmac_softc *sc = tag; 949 1.1 kiyohara int chan; 950 1.1 kiyohara 951 1.1 kiyohara /* maybe need lock */ 952 1.1 kiyohara 953 1.1 kiyohara for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++) 954 1.1 kiyohara if (sc->sc_cdesc[chan].chan_running == NULL) 955 1.1 kiyohara break; 956 1.1 kiyohara if (chan >= sc->sc_gtidmac_nchan) 957 1.1 kiyohara return -1; 958 1.1 kiyohara 959 1.1 kiyohara 960 1.1 kiyohara sc->sc_cdesc[chan].chan_running = object; 961 1.1 kiyohara 962 1.1 kiyohara /* unlock */ 963 1.1 kiyohara 964 1.1 kiyohara *dmamap_in = &sc->sc_cdesc[chan].chan_in; 965 1.1 kiyohara *dmamap_out = &sc->sc_cdesc[chan].chan_out; 966 1.1 kiyohara 967 1.1 kiyohara return chan; 968 1.1 kiyohara } 969 1.1 kiyohara 970 1.1 kiyohara void 971 1.1 kiyohara gtidmac_chan_free(void *tag, int chan) 972 1.1 kiyohara { 973 1.1 kiyohara struct gtidmac_softc *sc = tag; 974 1.1 kiyohara 975 1.1 kiyohara /* maybe need lock */ 976 1.1 kiyohara 977 1.1 kiyohara sc->sc_cdesc[chan].chan_running = NULL; 978 1.1 kiyohara 979 1.1 kiyohara /* unlock */ 980 1.1 kiyohara } 981 1.1 kiyohara 982 1.1 kiyohara /* ARGSUSED */ 983 1.1 kiyohara int 984 1.1 kiyohara gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in, 985 1.1 kiyohara bus_dmamap_t *dmamap_out, bus_size_t size) 986 1.1 kiyohara { 987 1.1 kiyohara struct gtidmac_softc *sc = tag; 988 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd; 989 1.1 kiyohara struct gtidmac_desc *desc; 990 1.1 kiyohara uint32_t ccl, bcnt, ires, ores; 991 1.1 kiyohara int n = 0, iidx, oidx; 992 1.1 kiyohara 993 1.1 kiyohara KASSERT(ninputs == 0 || ninputs == 1); 994 1.1 kiyohara 995 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan)); 996 1.1 kiyohara #ifdef DIAGNOSTIC 997 1.1 kiyohara if (ccl & GTIDMAC_CCLR_CHANACT) 998 1.1 kiyohara panic("gtidmac_setup: chan%d already active", chan); 999 1.1 kiyohara #endif 1000 1.1 kiyohara 1001 1.1 kiyohara /* We always Chain-mode and max (16M - 1)byte/desc */ 1002 1.1 kiyohara ccl = (GTIDMAC_CCLR_DESCMODE_16M | 1003 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1004 1.1 kiyohara GTIDMAC_CCLR_CDEN | 1005 1.1 kiyohara #endif 1006 1.1 kiyohara GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */ | 1007 1.1 kiyohara GTIDMAC_CCLR_INTMODE_NULL /* Intr Mode: Next Desc NULL */ | 1008 1.1 kiyohara GTIDMAC_CCLR_CHAINMODE_C /* Chain Mode: Chaind */); 1009 1.1 kiyohara if (size != (*dmamap_in)->dm_mapsize) { 1010 1.1 kiyohara ccl |= GTIDMAC_CCLR_SRCHOLD; 1011 1.1 kiyohara if ((*dmamap_in)->dm_mapsize == 8) 1012 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_8B; 1013 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 16) 1014 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_16B; 1015 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 32) 1016 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_32B; 1017 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 64) 1018 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_64B; 1019 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 128) 1020 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_128B; 1021 1.1 kiyohara else 1022 1.1 kiyohara panic("gtidmac_setup: chan%d source:" 1023 1.20 andvar " unsupported hold size", chan); 1024 1.1 kiyohara } else 1025 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_128B; 1026 1.1 kiyohara if (size != (*dmamap_out)->dm_mapsize) { 1027 1.1 kiyohara ccl |= GTIDMAC_CCLR_DESTHOLD; 1028 1.1 kiyohara if ((*dmamap_out)->dm_mapsize == 8) 1029 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_8B; 1030 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 16) 1031 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_16B; 1032 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 32) 1033 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_32B; 1034 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 64) 1035 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_64B; 1036 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 128) 1037 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_128B; 1038 1.1 kiyohara else 1039 1.1 kiyohara panic("gtidmac_setup: chan%d destination:" 1040 1.1 kiyohara " unsupport hold size", chan); 1041 1.1 kiyohara } else 1042 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_128B; 1043 1.1 kiyohara 1044 1.1 kiyohara fstdd = SLIST_FIRST(&sc->sc_dlist); 1045 1.1 kiyohara if (fstdd == NULL) { 1046 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no descriptor\n"); 1047 1.1 kiyohara return ENOMEM; 1048 1.1 kiyohara } 1049 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next); 1050 1.1 kiyohara sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index; 1051 1.1 kiyohara 1052 1.1 kiyohara dd = fstdd; 1053 1.1 kiyohara ires = ores = 0; 1054 1.1 kiyohara iidx = oidx = 0; 1055 1.1 kiyohara while (1 /*CONSTCOND*/) { 1056 1.1 kiyohara if (ccl & GTIDMAC_CCLR_SRCHOLD) { 1057 1.1 kiyohara if (ccl & GTIDMAC_CCLR_DESTHOLD) 1058 1.1 kiyohara bcnt = size; /* src/dst hold */ 1059 1.1 kiyohara else 1060 1.1 kiyohara bcnt = (*dmamap_out)->dm_segs[oidx].ds_len; 1061 1.1 kiyohara } else if (ccl & GTIDMAC_CCLR_DESTHOLD) 1062 1.1 kiyohara bcnt = (*dmamap_in)->dm_segs[iidx].ds_len; 1063 1.1 kiyohara else 1064 1.16 riastrad bcnt = uimin((*dmamap_in)->dm_segs[iidx].ds_len - ires, 1065 1.1 kiyohara (*dmamap_out)->dm_segs[oidx].ds_len - ores); 1066 1.1 kiyohara 1067 1.1 kiyohara desc = dd->dd_idmac_vaddr; 1068 1.1 kiyohara desc->bc.mode16m.bcnt = 1069 1.1 kiyohara bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN; 1070 1.1 kiyohara desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires; 1071 1.1 kiyohara desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores; 1072 1.1 kiyohara 1073 1.1 kiyohara n += bcnt; 1074 1.1 kiyohara if (n >= size) 1075 1.1 kiyohara break; 1076 1.1 kiyohara if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) { 1077 1.1 kiyohara ires += bcnt; 1078 1.1 kiyohara if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) { 1079 1.1 kiyohara ires = 0; 1080 1.1 kiyohara iidx++; 1081 1.1 kiyohara KASSERT(iidx < (*dmamap_in)->dm_nsegs); 1082 1.1 kiyohara } 1083 1.1 kiyohara } 1084 1.1 kiyohara if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) { 1085 1.1 kiyohara ores += bcnt; 1086 1.1 kiyohara if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) { 1087 1.1 kiyohara ores = 0; 1088 1.1 kiyohara oidx++; 1089 1.1 kiyohara KASSERT(oidx < (*dmamap_out)->dm_nsegs); 1090 1.1 kiyohara } 1091 1.1 kiyohara } 1092 1.1 kiyohara 1093 1.1 kiyohara nxtdd = SLIST_FIRST(&sc->sc_dlist); 1094 1.1 kiyohara if (nxtdd == NULL) { 1095 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no descriptor\n"); 1096 1.1 kiyohara return ENOMEM; 1097 1.1 kiyohara } 1098 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next); 1099 1.1 kiyohara 1100 1.1 kiyohara desc->nextdp = (uint32_t)nxtdd->dd_paddr; 1101 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 1102 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1103 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1104 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1105 1.1 kiyohara #else 1106 1.1 kiyohara BUS_DMASYNC_PREWRITE); 1107 1.1 kiyohara #endif 1108 1.1 kiyohara 1109 1.1 kiyohara SLIST_INSERT_AFTER(dd, nxtdd, dd_next); 1110 1.1 kiyohara dd = nxtdd; 1111 1.1 kiyohara } 1112 1.1 kiyohara desc->nextdp = (uint32_t)NULL; 1113 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc), 1114 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1115 1.1 kiyohara sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1116 1.1 kiyohara #else 1117 1.1 kiyohara sizeof(*desc), BUS_DMASYNC_PREWRITE); 1118 1.1 kiyohara #endif 1119 1.1 kiyohara 1120 1.1 kiyohara /* Set paddr of descriptor to Channel Next Descriptor Pointer */ 1121 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan), 1122 1.3 kiyohara fstdd->dd_paddr); 1123 1.1 kiyohara 1124 1.1 kiyohara #if BYTE_ORDER == LITTLE_ENDIAN 1125 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan), 1126 1.1 kiyohara GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE); 1127 1.3 kiyohara #else 1128 1.3 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan), 1129 1.3 kiyohara GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE); 1130 1.1 kiyohara #endif 1131 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl); 1132 1.1 kiyohara 1133 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1134 1.1 kiyohara gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/); 1135 1.1 kiyohara #endif 1136 1.1 kiyohara 1137 1.1 kiyohara sc->sc_cdesc[chan].chan_totalcnt += size; 1138 1.1 kiyohara 1139 1.1 kiyohara return 0; 1140 1.1 kiyohara } 1141 1.1 kiyohara 1142 1.1 kiyohara void 1143 1.1 kiyohara gtidmac_start(void *tag, int chan, 1144 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *, 1145 1.1 kiyohara int)) 1146 1.1 kiyohara { 1147 1.1 kiyohara struct gtidmac_softc *sc = tag; 1148 1.1 kiyohara uint32_t ccl; 1149 1.1 kiyohara 1150 1.1 kiyohara DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan)); 1151 1.1 kiyohara 1152 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1153 1.1 kiyohara gtidmac_dump_idmacreg(sc, chan); 1154 1.1 kiyohara #endif 1155 1.1 kiyohara 1156 1.1 kiyohara sc->sc_cdesc[chan].chan_dma_done = dma_done_cb; 1157 1.1 kiyohara 1158 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan)); 1159 1.1 kiyohara /* Start and 'Fetch Next Descriptor' */ 1160 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), 1161 1.1 kiyohara ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND); 1162 1.1 kiyohara } 1163 1.1 kiyohara 1164 1.1 kiyohara static uint32_t 1165 1.1 kiyohara gtidmac_finish(void *tag, int chan, int error) 1166 1.1 kiyohara { 1167 1.1 kiyohara struct gtidmac_softc *sc = tag; 1168 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd; 1169 1.1 kiyohara struct gtidmac_desc *desc; 1170 1.1 kiyohara 1171 1.1 kiyohara fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx]; 1172 1.1 kiyohara 1173 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1174 1.1 kiyohara if (error || gtidmac_debug > 1) { 1175 1.1 kiyohara uint32_t ccl; 1176 1.1 kiyohara 1177 1.1 kiyohara gtidmac_dump_idmacreg(sc, chan); 1178 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 1179 1.1 kiyohara GTIDMAC_CCLR(chan)); 1180 1.1 kiyohara gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/); 1181 1.1 kiyohara } 1182 1.1 kiyohara #endif 1183 1.1 kiyohara 1184 1.1 kiyohara dd = fstdd; 1185 1.1 kiyohara do { 1186 1.1 kiyohara desc = dd->dd_idmac_vaddr; 1187 1.1 kiyohara 1188 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 1189 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1190 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1191 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1192 1.1 kiyohara #else 1193 1.1 kiyohara BUS_DMASYNC_POSTWRITE); 1194 1.1 kiyohara #endif 1195 1.1 kiyohara 1196 1.1 kiyohara nxtdd = SLIST_NEXT(dd, dd_next); 1197 1.1 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next); 1198 1.1 kiyohara dd = nxtdd; 1199 1.1 kiyohara } while (desc->nextdp); 1200 1.1 kiyohara 1201 1.1 kiyohara return 0; 1202 1.1 kiyohara } 1203 1.1 kiyohara 1204 1.1 kiyohara /* 1205 1.1 kiyohara * XORE functions 1206 1.1 kiyohara */ 1207 1.1 kiyohara int 1208 1.1 kiyohara mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in, 1209 1.1 kiyohara bus_dmamap_t **dmamap_out, void *object) 1210 1.1 kiyohara { 1211 1.1 kiyohara struct gtidmac_softc *sc = tag; 1212 1.1 kiyohara int chan; 1213 1.1 kiyohara 1214 1.1 kiyohara /* maybe need lock */ 1215 1.1 kiyohara 1216 1.1 kiyohara for (chan = 0; chan < sc->sc_mvxore_nchan; chan++) 1217 1.1 kiyohara if (sc->sc_cdesc_xore[chan].chan_running == NULL) 1218 1.1 kiyohara break; 1219 1.1 kiyohara if (chan >= sc->sc_mvxore_nchan) 1220 1.1 kiyohara return -1; 1221 1.1 kiyohara 1222 1.1 kiyohara 1223 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running = object; 1224 1.1 kiyohara 1225 1.1 kiyohara /* unlock */ 1226 1.1 kiyohara 1227 1.1 kiyohara *dmamap_in = sc->sc_cdesc_xore[chan].chan_in; 1228 1.1 kiyohara *dmamap_out = &sc->sc_cdesc_xore[chan].chan_out; 1229 1.1 kiyohara 1230 1.1 kiyohara return chan; 1231 1.1 kiyohara } 1232 1.1 kiyohara 1233 1.1 kiyohara void 1234 1.1 kiyohara mvxore_chan_free(void *tag, int chan) 1235 1.1 kiyohara { 1236 1.1 kiyohara struct gtidmac_softc *sc = tag; 1237 1.1 kiyohara 1238 1.1 kiyohara /* maybe need lock */ 1239 1.1 kiyohara 1240 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running = NULL; 1241 1.1 kiyohara 1242 1.1 kiyohara /* unlock */ 1243 1.1 kiyohara } 1244 1.1 kiyohara 1245 1.1 kiyohara /* ARGSUSED */ 1246 1.1 kiyohara int 1247 1.1 kiyohara mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in, 1248 1.1 kiyohara bus_dmamap_t *dmamap_out, bus_size_t size) 1249 1.1 kiyohara { 1250 1.1 kiyohara struct gtidmac_softc *sc = tag; 1251 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd; 1252 1.1 kiyohara struct mvxore_desc *desc; 1253 1.1 kiyohara uint32_t xexc, bcnt, cmd, lastcmd; 1254 1.1 kiyohara int n = 0, i; 1255 1.1 kiyohara uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0; 1256 1.1 kiyohara int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0; 1257 1.1 kiyohara 1258 1.1 kiyohara #ifdef DIAGNOSTIC 1259 1.13 kiyohara uint32_t xexact = 1260 1.13 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan)); 1261 1.1 kiyohara 1262 1.1 kiyohara if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) == 1263 1.1 kiyohara MVXORE_XEXACTR_XESTATUS_ACT) 1264 1.1 kiyohara panic("mvxore_setup: chan%d already active." 1265 1.1 kiyohara " mvxore not support hot insertion", chan); 1266 1.1 kiyohara #endif 1267 1.1 kiyohara 1268 1.1 kiyohara xexc = 1269 1.1 kiyohara (MVXORE_XEXCR_REGACCPROTECT | 1270 1.1 kiyohara MVXORE_XEXCR_DBL_128B | 1271 1.1 kiyohara MVXORE_XEXCR_SBL_128B); 1272 1.1 kiyohara cmd = lastcmd = 0; 1273 1.1 kiyohara if (ninputs > 1) { 1274 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_XOR; 1275 1.1 kiyohara lastcmd = cmd = (1 << ninputs) - 1; 1276 1.1 kiyohara } else if (ninputs == 1) { 1277 1.1 kiyohara if ((*dmamap_out)->dm_nsegs == 0) { 1278 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_CRC32; 1279 1.1 kiyohara lastcmd = MVXORE_DESC_CMD_CRCLAST; 1280 1.1 kiyohara } else 1281 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_DMA; 1282 1.1 kiyohara } else if (ninputs == 0) { 1283 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 1) { 1284 1.1 kiyohara aprint_error_dev(sc->sc_dev, 1285 1.1 kiyohara "XORE not supports %d DMA segments\n", 1286 1.1 kiyohara (*dmamap_out)->dm_nsegs); 1287 1.1 kiyohara return EINVAL; 1288 1.1 kiyohara } 1289 1.1 kiyohara 1290 1.1 kiyohara if ((*dmamap_in)->dm_mapsize == 0) { 1291 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_ECC; 1292 1.1 kiyohara 1293 1.1 kiyohara /* XXXXX: Maybe need to set Timer Mode registers? */ 1294 1.1 kiyohara 1295 1.1 kiyohara #if 0 1296 1.1 kiyohara } else if ((*dmamap_in)->dm_mapsize == 8 || 1297 1.1 kiyohara (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */ 1298 1.1 kiyohara uint64_t pattern; 1299 1.1 kiyohara 1300 1.1 kiyohara /* XXXX: Get pattern data */ 1301 1.1 kiyohara 1302 1.1 kiyohara KASSERT((*dmamap_in)->dm_mapsize == 8 || 1303 1.1 kiyohara (void *)((uint32_t)(*dmamap_in)->_dm_origbuf & 1304 1.1 kiyohara ~PAGE_MASK) == sc->sc_pbuf); 1305 1.1 kiyohara pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf; 1306 1.1 kiyohara 1307 1.1 kiyohara /* XXXXX: XORE has a IVR. We should get this first. */ 1308 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL, 1309 1.1 kiyohara pattern); 1310 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH, 1311 1.1 kiyohara pattern >> 32); 1312 1.1 kiyohara 1313 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_MEMINIT; 1314 1.1 kiyohara #endif 1315 1.1 kiyohara } else { 1316 1.1 kiyohara aprint_error_dev(sc->sc_dev, 1317 1.1 kiyohara "XORE not supports DMA mapsize %zd\n", 1318 1.1 kiyohara (*dmamap_in)->dm_mapsize); 1319 1.1 kiyohara return EINVAL; 1320 1.1 kiyohara } 1321 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1322 1.8 kiyohara MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr); 1323 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1324 1.8 kiyohara MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize); 1325 1.1 kiyohara 1326 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1327 1.8 kiyohara MVXORE_XEXCR(sc, chan), xexc); 1328 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_totalcnt += size; 1329 1.1 kiyohara 1330 1.1 kiyohara return 0; 1331 1.1 kiyohara } 1332 1.1 kiyohara 1333 1.1 kiyohara /* Make descriptor for DMA/CRC32/XOR */ 1334 1.1 kiyohara 1335 1.1 kiyohara fstdd = SLIST_FIRST(&sc->sc_dlist_xore); 1336 1.1 kiyohara if (fstdd == NULL) { 1337 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no xore descriptor\n"); 1338 1.1 kiyohara return ENOMEM; 1339 1.1 kiyohara } 1340 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next); 1341 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_ddidx = 1342 1.1 kiyohara fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan; 1343 1.1 kiyohara 1344 1.1 kiyohara dd = fstdd; 1345 1.1 kiyohara while (1 /*CONSTCOND*/) { 1346 1.1 kiyohara desc = dd->dd_xore_vaddr; 1347 1.1 kiyohara desc->stat = MVXORE_DESC_STAT_OWN; 1348 1.1 kiyohara desc->cmd = cmd; 1349 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 0) { 1350 1.1 kiyohara desc->dstaddr = 1351 1.1 kiyohara (*dmamap_out)->dm_segs[oidx].ds_addr + ores; 1352 1.1 kiyohara bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores; 1353 1.1 kiyohara } else { 1354 1.1 kiyohara desc->dstaddr = 0; 1355 1.1 kiyohara bcnt = MVXORE_MAXXFER; /* XXXXX */ 1356 1.1 kiyohara } 1357 1.1 kiyohara for (i = 0; i < ninputs; i++) { 1358 1.1 kiyohara desc->srcaddr[i] = 1359 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i]; 1360 1.16 riastrad bcnt = uimin(bcnt, 1361 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]); 1362 1.1 kiyohara } 1363 1.1 kiyohara desc->bcnt = bcnt; 1364 1.1 kiyohara 1365 1.1 kiyohara n += bcnt; 1366 1.1 kiyohara if (n >= size) 1367 1.1 kiyohara break; 1368 1.1 kiyohara ores += bcnt; 1369 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 0 && 1370 1.1 kiyohara ores >= (*dmamap_out)->dm_segs[oidx].ds_len) { 1371 1.1 kiyohara ores = 0; 1372 1.1 kiyohara oidx++; 1373 1.1 kiyohara KASSERT(oidx < (*dmamap_out)->dm_nsegs); 1374 1.1 kiyohara } 1375 1.1 kiyohara for (i = 0; i < ninputs; i++) { 1376 1.1 kiyohara ires[i] += bcnt; 1377 1.1 kiyohara if (ires[i] >= 1378 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) { 1379 1.1 kiyohara ires[i] = 0; 1380 1.1 kiyohara iidx[i]++; 1381 1.1 kiyohara KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs); 1382 1.1 kiyohara } 1383 1.1 kiyohara } 1384 1.1 kiyohara 1385 1.1 kiyohara nxtdd = SLIST_FIRST(&sc->sc_dlist_xore); 1386 1.1 kiyohara if (nxtdd == NULL) { 1387 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no xore descriptor\n"); 1388 1.1 kiyohara return ENOMEM; 1389 1.1 kiyohara } 1390 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next); 1391 1.1 kiyohara 1392 1.1 kiyohara desc->nextda = (uint32_t)nxtdd->dd_paddr; 1393 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 1394 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1395 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1396 1.1 kiyohara 1397 1.1 kiyohara SLIST_INSERT_AFTER(dd, nxtdd, dd_next); 1398 1.1 kiyohara dd = nxtdd; 1399 1.1 kiyohara } 1400 1.1 kiyohara desc->cmd = lastcmd; 1401 1.1 kiyohara desc->nextda = (uint32_t)NULL; 1402 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 1403 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1404 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1405 1.1 kiyohara 1406 1.1 kiyohara /* Set paddr of descriptor to Channel Next Descriptor Pointer */ 1407 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan), 1408 1.1 kiyohara fstdd->dd_paddr); 1409 1.1 kiyohara 1410 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc); 1411 1.1 kiyohara 1412 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1413 1.3 kiyohara gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/); 1414 1.1 kiyohara #endif 1415 1.1 kiyohara 1416 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_totalcnt += size; 1417 1.1 kiyohara 1418 1.1 kiyohara return 0; 1419 1.1 kiyohara } 1420 1.1 kiyohara 1421 1.1 kiyohara void 1422 1.1 kiyohara mvxore_start(void *tag, int chan, 1423 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *, 1424 1.1 kiyohara int)) 1425 1.1 kiyohara { 1426 1.1 kiyohara struct gtidmac_softc *sc = tag; 1427 1.1 kiyohara uint32_t xexact; 1428 1.1 kiyohara 1429 1.1 kiyohara DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan)); 1430 1.1 kiyohara 1431 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1432 1.1 kiyohara gtidmac_dump_xorereg(sc, chan); 1433 1.1 kiyohara #endif 1434 1.1 kiyohara 1435 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb; 1436 1.1 kiyohara 1437 1.8 kiyohara xexact = 1438 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan)); 1439 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan), 1440 1.1 kiyohara xexact | MVXORE_XEXACTR_XESTART); 1441 1.1 kiyohara } 1442 1.1 kiyohara 1443 1.1 kiyohara static uint32_t 1444 1.1 kiyohara mvxore_finish(void *tag, int chan, int error) 1445 1.1 kiyohara { 1446 1.1 kiyohara struct gtidmac_softc *sc = tag; 1447 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd; 1448 1.1 kiyohara struct mvxore_desc *desc; 1449 1.1 kiyohara uint32_t xexc; 1450 1.1 kiyohara 1451 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1452 1.1 kiyohara if (error || gtidmac_debug > 1) 1453 1.1 kiyohara gtidmac_dump_xorereg(sc, chan); 1454 1.1 kiyohara #endif 1455 1.1 kiyohara 1456 1.8 kiyohara xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan)); 1457 1.1 kiyohara if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC || 1458 1.1 kiyohara (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT) 1459 1.1 kiyohara return 0; 1460 1.1 kiyohara 1461 1.1 kiyohara fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx]; 1462 1.1 kiyohara 1463 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1464 1.1 kiyohara if (error || gtidmac_debug > 1) 1465 1.1 kiyohara gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/); 1466 1.1 kiyohara #endif 1467 1.1 kiyohara 1468 1.1 kiyohara dd = fstdd; 1469 1.1 kiyohara do { 1470 1.1 kiyohara desc = dd->dd_xore_vaddr; 1471 1.1 kiyohara 1472 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 1473 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1474 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1475 1.1 kiyohara 1476 1.1 kiyohara nxtdd = SLIST_NEXT(dd, dd_next); 1477 1.1 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next); 1478 1.1 kiyohara dd = nxtdd; 1479 1.1 kiyohara } while (desc->nextda); 1480 1.1 kiyohara 1481 1.1 kiyohara if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32) 1482 1.1 kiyohara return desc->result; 1483 1.1 kiyohara return 0; 1484 1.1 kiyohara } 1485 1.1 kiyohara 1486 1.1 kiyohara static void 1487 1.11 kiyohara gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags) 1488 1.1 kiyohara { 1489 1.1 kiyohara device_t pdev = device_parent(sc->sc_dev); 1490 1.1 kiyohara uint64_t base; 1491 1.11 kiyohara uint32_t size, cxap, en, winacc; 1492 1.11 kiyohara int window, target, attr, rv, i, j; 1493 1.1 kiyohara 1494 1.1 kiyohara en = 0xff; 1495 1.1 kiyohara cxap = 0; 1496 1.1 kiyohara for (window = 0, i = 0; 1497 1.11 kiyohara tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) { 1498 1.11 kiyohara rv = marvell_winparams_by_tag(pdev, tags[i], 1499 1.1 kiyohara &target, &attr, &base, &size); 1500 1.1 kiyohara if (rv != 0 || size == 0) 1501 1.1 kiyohara continue; 1502 1.1 kiyohara 1503 1.1 kiyohara if (base > 0xffffffffULL) { 1504 1.1 kiyohara if (window >= GTIDMAC_NREMAP) { 1505 1.1 kiyohara aprint_error_dev(sc->sc_dev, 1506 1.1 kiyohara "can't remap window %d\n", window); 1507 1.1 kiyohara continue; 1508 1.1 kiyohara } 1509 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1510 1.1 kiyohara GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff); 1511 1.1 kiyohara } 1512 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window), 1513 1.1 kiyohara GTIDMAC_BARX_TARGET(target) | 1514 1.1 kiyohara GTIDMAC_BARX_ATTR(attr) | 1515 1.1 kiyohara GTIDMAC_BARX_BASE(base)); 1516 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window), 1517 1.1 kiyohara GTIDMAC_SRX_SIZE(size)); 1518 1.1 kiyohara en &= ~GTIDMAC_BAER_EN(window); 1519 1.11 kiyohara 1520 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_FA; 1521 1.11 kiyohara if (gtidmac_winacctbl != NULL) 1522 1.11 kiyohara for (j = 0; 1523 1.11 kiyohara gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED; 1524 1.11 kiyohara j++) { 1525 1.11 kiyohara if (gtidmac_winacctbl[j].tag != tags[i]) 1526 1.11 kiyohara continue; 1527 1.11 kiyohara 1528 1.11 kiyohara switch (gtidmac_winacctbl[j].winacc) { 1529 1.11 kiyohara case GTIDMAC_WINACC_NOACCESSALLOWED: 1530 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_NOAA; 1531 1.11 kiyohara break; 1532 1.11 kiyohara case GTIDMAC_WINACC_READONLY: 1533 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_RO; 1534 1.11 kiyohara break; 1535 1.11 kiyohara case GTIDMAC_WINACC_FULLACCESS: 1536 1.11 kiyohara default: /* XXXX: default is full access */ 1537 1.11 kiyohara break; 1538 1.11 kiyohara } 1539 1.11 kiyohara break; 1540 1.11 kiyohara } 1541 1.11 kiyohara cxap |= GTIDMAC_CXAPR_WINACC(window, winacc); 1542 1.11 kiyohara 1543 1.1 kiyohara window++; 1544 1.1 kiyohara } 1545 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en); 1546 1.1 kiyohara 1547 1.1 kiyohara for (i = 0; i < GTIDMAC_NACCPROT; i++) 1548 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i), 1549 1.1 kiyohara cxap); 1550 1.1 kiyohara } 1551 1.1 kiyohara 1552 1.1 kiyohara static void 1553 1.11 kiyohara mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags) 1554 1.1 kiyohara { 1555 1.1 kiyohara device_t pdev = device_parent(sc->sc_dev); 1556 1.1 kiyohara uint64_t base; 1557 1.11 kiyohara uint32_t target, attr, size, xexwc, winacc; 1558 1.11 kiyohara int window, rv, i, j, p; 1559 1.1 kiyohara 1560 1.1 kiyohara xexwc = 0; 1561 1.1 kiyohara for (window = 0, i = 0; 1562 1.11 kiyohara tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) { 1563 1.11 kiyohara rv = marvell_winparams_by_tag(pdev, tags[i], 1564 1.1 kiyohara &target, &attr, &base, &size); 1565 1.1 kiyohara if (rv != 0 || size == 0) 1566 1.1 kiyohara continue; 1567 1.1 kiyohara 1568 1.1 kiyohara if (base > 0xffffffffULL) { 1569 1.1 kiyohara if (window >= MVXORE_NREMAP) { 1570 1.1 kiyohara aprint_error_dev(sc->sc_dev, 1571 1.1 kiyohara "can't remap window %d\n", window); 1572 1.1 kiyohara continue; 1573 1.1 kiyohara } 1574 1.8 kiyohara for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) 1575 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1576 1.8 kiyohara MVXORE_XEHARRX(sc, p, window), 1577 1.8 kiyohara (base >> 32) & 0xffffffff); 1578 1.8 kiyohara } 1579 1.8 kiyohara 1580 1.8 kiyohara for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) { 1581 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1582 1.8 kiyohara MVXORE_XEBARX(sc, p, window), 1583 1.8 kiyohara MVXORE_XEBARX_TARGET(target) | 1584 1.8 kiyohara MVXORE_XEBARX_ATTR(attr) | 1585 1.8 kiyohara MVXORE_XEBARX_BASE(base)); 1586 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1587 1.8 kiyohara MVXORE_XESMRX(sc, p, window), 1588 1.8 kiyohara MVXORE_XESMRX_SIZE(size)); 1589 1.1 kiyohara } 1590 1.11 kiyohara 1591 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_FA; 1592 1.11 kiyohara if (mvxore_winacctbl != NULL) 1593 1.11 kiyohara for (j = 0; 1594 1.11 kiyohara mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED; 1595 1.11 kiyohara j++) { 1596 1.11 kiyohara if (gtidmac_winacctbl[j].tag != tags[i]) 1597 1.11 kiyohara continue; 1598 1.11 kiyohara 1599 1.11 kiyohara switch (gtidmac_winacctbl[j].winacc) { 1600 1.11 kiyohara case GTIDMAC_WINACC_NOACCESSALLOWED: 1601 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_NOAA; 1602 1.11 kiyohara break; 1603 1.11 kiyohara case GTIDMAC_WINACC_READONLY: 1604 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_RO; 1605 1.11 kiyohara break; 1606 1.11 kiyohara case GTIDMAC_WINACC_FULLACCESS: 1607 1.11 kiyohara default: /* XXXX: default is full access */ 1608 1.11 kiyohara break; 1609 1.11 kiyohara } 1610 1.11 kiyohara break; 1611 1.11 kiyohara } 1612 1.1 kiyohara xexwc |= (MVXORE_XEXWCR_WINEN(window) | 1613 1.11 kiyohara MVXORE_XEXWCR_WINACC(window, winacc)); 1614 1.1 kiyohara window++; 1615 1.1 kiyohara } 1616 1.1 kiyohara 1617 1.8 kiyohara for (i = 0; i < sc->sc_mvxore_nchan; i++) { 1618 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i), 1619 1.8 kiyohara xexwc); 1620 1.8 kiyohara 1621 1.8 kiyohara /* XXXXX: reset... */ 1622 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0), 1623 1.8 kiyohara 0); 1624 1.8 kiyohara } 1625 1.1 kiyohara } 1626 1.1 kiyohara 1627 1.8 kiyohara static int 1628 1.8 kiyohara gtidmac_buffer_setup(struct gtidmac_softc *sc) 1629 1.8 kiyohara { 1630 1.8 kiyohara bus_dma_segment_t segs; 1631 1.8 kiyohara struct gtidmac_dma_desc *dd; 1632 1.8 kiyohara uint32_t mask; 1633 1.8 kiyohara int nchan, nsegs, i; 1634 1.8 kiyohara 1635 1.8 kiyohara nchan = sc->sc_gtidmac_nchan; 1636 1.8 kiyohara 1637 1.8 kiyohara if (bus_dmamem_alloc(sc->sc_dmat, 1638 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1639 1.8 kiyohara PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) { 1640 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1641 1.8 kiyohara "bus_dmamem_alloc failed: descriptor buffer\n"); 1642 1.8 kiyohara goto fail0; 1643 1.8 kiyohara } 1644 1.8 kiyohara if (bus_dmamem_map(sc->sc_dmat, &segs, 1, 1645 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1646 1.8 kiyohara (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) { 1647 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1648 1.8 kiyohara "bus_dmamem_map failed: descriptor buffer\n"); 1649 1.8 kiyohara goto fail1; 1650 1.8 kiyohara } 1651 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, 1652 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1, 1653 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0, 1654 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_dmap)) { 1655 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1656 1.8 kiyohara "bus_dmamap_create failed: descriptor buffer\n"); 1657 1.8 kiyohara goto fail2; 1658 1.8 kiyohara } 1659 1.8 kiyohara if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf, 1660 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1661 1.8 kiyohara NULL, BUS_DMA_NOWAIT)) { 1662 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1663 1.8 kiyohara "bus_dmamap_load failed: descriptor buffer\n"); 1664 1.8 kiyohara goto fail3; 1665 1.8 kiyohara } 1666 1.8 kiyohara SLIST_INIT(&sc->sc_dlist); 1667 1.8 kiyohara for (i = 0; i < GTIDMAC_NDESC * nchan; i++) { 1668 1.8 kiyohara dd = &sc->sc_dd_buffer[i]; 1669 1.8 kiyohara dd->dd_index = i; 1670 1.8 kiyohara dd->dd_idmac_vaddr = &sc->sc_dbuf[i]; 1671 1.8 kiyohara dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr + 1672 1.8 kiyohara (sizeof(struct gtidmac_desc) * i); 1673 1.8 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next); 1674 1.8 kiyohara } 1675 1.8 kiyohara 1676 1.8 kiyohara /* Initialize IDMAC DMA channels */ 1677 1.8 kiyohara mask = 0; 1678 1.8 kiyohara for (i = 0; i < nchan; i++) { 1679 1.8 kiyohara if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) { 1680 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1681 1.8 kiyohara GTIDMAC_IMR(i - 1), mask); 1682 1.8 kiyohara mask = 0; 1683 1.8 kiyohara } 1684 1.8 kiyohara 1685 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER, 1686 1.8 kiyohara GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT, 1687 1.8 kiyohara &sc->sc_cdesc[i].chan_in)) { 1688 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1689 1.8 kiyohara "bus_dmamap_create failed: chan%d in\n", i); 1690 1.8 kiyohara goto fail4; 1691 1.8 kiyohara } 1692 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER, 1693 1.8 kiyohara GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT, 1694 1.8 kiyohara &sc->sc_cdesc[i].chan_out)) { 1695 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1696 1.8 kiyohara "bus_dmamap_create failed: chan%d out\n", i); 1697 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, 1698 1.8 kiyohara sc->sc_cdesc[i].chan_in); 1699 1.8 kiyohara goto fail4; 1700 1.8 kiyohara } 1701 1.8 kiyohara sc->sc_cdesc[i].chan_totalcnt = 0; 1702 1.8 kiyohara sc->sc_cdesc[i].chan_running = NULL; 1703 1.8 kiyohara 1704 1.8 kiyohara /* Ignore bits overflow. The mask is 32bit. */ 1705 1.8 kiyohara mask |= GTIDMAC_I(i, 1706 1.8 kiyohara GTIDMAC_I_COMP | 1707 1.8 kiyohara GTIDMAC_I_ADDRMISS | 1708 1.8 kiyohara GTIDMAC_I_ACCPROT | 1709 1.8 kiyohara GTIDMAC_I_WRPROT | 1710 1.8 kiyohara GTIDMAC_I_OWN); 1711 1.8 kiyohara 1712 1.8 kiyohara /* 8bits/channel * 4channels => 32bit */ 1713 1.8 kiyohara if ((i & 0x3) == 0x3) { 1714 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1715 1.8 kiyohara GTIDMAC_IMR(i), mask); 1716 1.8 kiyohara mask = 0; 1717 1.8 kiyohara } 1718 1.8 kiyohara } 1719 1.8 kiyohara 1720 1.8 kiyohara return 0; 1721 1.8 kiyohara 1722 1.8 kiyohara fail4: 1723 1.8 kiyohara for (; i-- > 0;) { 1724 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in); 1725 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out); 1726 1.8 kiyohara } 1727 1.8 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1728 1.8 kiyohara fail3: 1729 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap); 1730 1.8 kiyohara fail2: 1731 1.8 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf, 1732 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC); 1733 1.8 kiyohara fail1: 1734 1.8 kiyohara bus_dmamem_free(sc->sc_dmat, &segs, 1); 1735 1.8 kiyohara fail0: 1736 1.8 kiyohara return -1; 1737 1.8 kiyohara } 1738 1.8 kiyohara 1739 1.8 kiyohara static int 1740 1.8 kiyohara mvxore_buffer_setup(struct gtidmac_softc *sc) 1741 1.8 kiyohara { 1742 1.8 kiyohara bus_dma_segment_t segs; 1743 1.8 kiyohara struct gtidmac_dma_desc *dd; 1744 1.8 kiyohara uint32_t mask; 1745 1.8 kiyohara int nchan, nsegs, i, j; 1746 1.8 kiyohara 1747 1.8 kiyohara nchan = sc->sc_mvxore_nchan; 1748 1.8 kiyohara 1749 1.8 kiyohara if (bus_dmamem_alloc(sc->sc_dmat, 1750 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1751 1.8 kiyohara PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) { 1752 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1753 1.8 kiyohara "bus_dmamem_alloc failed: xore descriptor buffer\n"); 1754 1.8 kiyohara goto fail0; 1755 1.8 kiyohara } 1756 1.8 kiyohara if (bus_dmamem_map(sc->sc_dmat, &segs, 1, 1757 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1758 1.8 kiyohara (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) { 1759 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1760 1.8 kiyohara "bus_dmamem_map failed: xore descriptor buffer\n"); 1761 1.8 kiyohara goto fail1; 1762 1.8 kiyohara } 1763 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, 1764 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1, 1765 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0, 1766 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) { 1767 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1768 1.8 kiyohara "bus_dmamap_create failed: xore descriptor buffer\n"); 1769 1.8 kiyohara goto fail2; 1770 1.8 kiyohara } 1771 1.8 kiyohara if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore, 1772 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1773 1.8 kiyohara NULL, BUS_DMA_NOWAIT)) { 1774 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1775 1.8 kiyohara "bus_dmamap_load failed: xore descriptor buffer\n"); 1776 1.8 kiyohara goto fail3; 1777 1.8 kiyohara } 1778 1.8 kiyohara SLIST_INIT(&sc->sc_dlist_xore); 1779 1.8 kiyohara for (i = 0; i < MVXORE_NDESC * nchan; i++) { 1780 1.8 kiyohara dd = 1781 1.8 kiyohara &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan]; 1782 1.8 kiyohara dd->dd_index = i; 1783 1.8 kiyohara dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i]; 1784 1.8 kiyohara dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr + 1785 1.8 kiyohara (sizeof(struct mvxore_desc) * i); 1786 1.8 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next); 1787 1.8 kiyohara } 1788 1.8 kiyohara 1789 1.8 kiyohara /* Initialize XORE DMA channels */ 1790 1.8 kiyohara mask = 0; 1791 1.8 kiyohara for (i = 0; i < nchan; i++) { 1792 1.8 kiyohara for (j = 0; j < MVXORE_NSRC; j++) { 1793 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, 1794 1.8 kiyohara MVXORE_MAXXFER, MVXORE_NSEGS, 1795 1.8 kiyohara MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT, 1796 1.8 kiyohara &sc->sc_cdesc_xore[i].chan_in[j])) { 1797 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1798 1.8 kiyohara "bus_dmamap_create failed:" 1799 1.8 kiyohara " xore chan%d in[%d]\n", i, j); 1800 1.8 kiyohara goto fail4; 1801 1.8 kiyohara } 1802 1.8 kiyohara } 1803 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER, 1804 1.8 kiyohara MVXORE_NSEGS, MVXORE_MAXXFER, 0, 1805 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) { 1806 1.8 kiyohara aprint_error_dev(sc->sc_dev, 1807 1.8 kiyohara "bus_dmamap_create failed: chan%d out\n", i); 1808 1.8 kiyohara goto fail5; 1809 1.8 kiyohara } 1810 1.8 kiyohara sc->sc_cdesc_xore[i].chan_totalcnt = 0; 1811 1.8 kiyohara sc->sc_cdesc_xore[i].chan_running = NULL; 1812 1.8 kiyohara 1813 1.8 kiyohara mask |= MVXORE_I(i, 1814 1.8 kiyohara MVXORE_I_EOC | 1815 1.8 kiyohara MVXORE_I_ADDRDECODE | 1816 1.8 kiyohara MVXORE_I_ACCPROT | 1817 1.8 kiyohara MVXORE_I_WRPROT | 1818 1.8 kiyohara MVXORE_I_OWN | 1819 1.8 kiyohara MVXORE_I_INTPARITY | 1820 1.8 kiyohara MVXORE_I_XBAR); 1821 1.8 kiyohara 1822 1.8 kiyohara /* 16bits/channel * 2channels => 32bit */ 1823 1.8 kiyohara if (i & 0x1) { 1824 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1825 1.8 kiyohara MVXORE_XEIMR(sc, i >> 1), mask); 1826 1.8 kiyohara mask = 0; 1827 1.8 kiyohara } 1828 1.8 kiyohara } 1829 1.8 kiyohara 1830 1.8 kiyohara return 0; 1831 1.8 kiyohara 1832 1.8 kiyohara for (; i-- > 0;) { 1833 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out); 1834 1.8 kiyohara 1835 1.8 kiyohara fail5: 1836 1.8 kiyohara j = MVXORE_NSRC; 1837 1.8 kiyohara fail4: 1838 1.8 kiyohara for (; j-- > 0;) 1839 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, 1840 1.8 kiyohara sc->sc_cdesc_xore[i].chan_in[j]); 1841 1.8 kiyohara } 1842 1.8 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore); 1843 1.8 kiyohara fail3: 1844 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore); 1845 1.8 kiyohara fail2: 1846 1.8 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore, 1847 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC); 1848 1.8 kiyohara fail1: 1849 1.8 kiyohara bus_dmamem_free(sc->sc_dmat, &segs, 1); 1850 1.8 kiyohara fail0: 1851 1.8 kiyohara return -1; 1852 1.8 kiyohara } 1853 1.1 kiyohara 1854 1.1 kiyohara #ifdef GTIDMAC_DEBUG 1855 1.1 kiyohara static void 1856 1.1 kiyohara gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan) 1857 1.1 kiyohara { 1858 1.1 kiyohara uint32_t val; 1859 1.1 kiyohara char buf[256]; 1860 1.1 kiyohara 1861 1.1 kiyohara printf("IDMAC Registers\n"); 1862 1.1 kiyohara 1863 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan)); 1864 1.3 kiyohara snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val); 1865 1.3 kiyohara printf(" Byte Count : %s\n", buf); 1866 1.1 kiyohara printf(" ByteCnt : 0x%06x\n", 1867 1.1 kiyohara val & GTIDMAC_CIDMABCR_BYTECNT_MASK); 1868 1.1 kiyohara printf(" Source Address : 0x%08x\n", 1869 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan))); 1870 1.1 kiyohara printf(" Destination Address : 0x%08x\n", 1871 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan))); 1872 1.1 kiyohara printf(" Next Descriptor Pointer : 0x%08x\n", 1873 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan))); 1874 1.1 kiyohara printf(" Current Descriptor Pointer : 0x%08x\n", 1875 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan))); 1876 1.1 kiyohara 1877 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan)); 1878 1.3 kiyohara snprintb(buf, sizeof(buf), 1879 1.1 kiyohara "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0" 1880 1.1 kiyohara "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0", 1881 1.3 kiyohara val); 1882 1.3 kiyohara printf(" Channel Control (Low) : %s\n", buf); 1883 1.1 kiyohara printf(" SrcBurstLimit : %s Bytes\n", 1884 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" : 1885 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" : 1886 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" : 1887 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" : 1888 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" : 1889 1.19 msaitoh "unknown"); 1890 1.1 kiyohara printf(" DstBurstLimit : %s Bytes\n", 1891 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" : 1892 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" : 1893 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" : 1894 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" : 1895 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" : 1896 1.19 msaitoh "unknown"); 1897 1.1 kiyohara printf(" ChainMode : %sChained\n", 1898 1.1 kiyohara val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : ""); 1899 1.1 kiyohara printf(" TransferMode : %s\n", 1900 1.1 kiyohara val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand"); 1901 1.1 kiyohara printf(" DescMode : %s\n", 1902 1.1 kiyohara val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k"); 1903 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan)); 1904 1.3 kiyohara snprintb(buf, sizeof(buf), 1905 1.3 kiyohara "\177\020b\001DescByteSwap\0b\000Endianness\0", val); 1906 1.3 kiyohara printf(" Channel Control (High) : %s\n", buf); 1907 1.1 kiyohara } 1908 1.1 kiyohara 1909 1.1 kiyohara static void 1910 1.1 kiyohara gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd, 1911 1.1 kiyohara uint32_t mode, int post) 1912 1.1 kiyohara { 1913 1.1 kiyohara struct gtidmac_desc *desc; 1914 1.1 kiyohara int i; 1915 1.1 kiyohara char buf[256]; 1916 1.1 kiyohara 1917 1.1 kiyohara printf("IDMAC Descriptor\n"); 1918 1.1 kiyohara 1919 1.1 kiyohara i = 0; 1920 1.1 kiyohara while (1 /*CONSTCOND*/) { 1921 1.1 kiyohara if (post) 1922 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 1923 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1924 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1925 1.1 kiyohara 1926 1.1 kiyohara desc = dd->dd_idmac_vaddr; 1927 1.1 kiyohara 1928 1.1 kiyohara printf("%d (0x%lx)\n", i, dd->dd_paddr); 1929 1.1 kiyohara if (mode & GTIDMAC_CCLR_DESCMODE_16M) { 1930 1.3 kiyohara snprintb(buf, sizeof(buf), 1931 1.1 kiyohara "\177\020b\037Own\0b\036BCLeft\0", 1932 1.3 kiyohara desc->bc.mode16m.bcnt); 1933 1.3 kiyohara printf(" Byte Count : %s\n", buf); 1934 1.1 kiyohara printf(" ByteCount : 0x%06x\n", 1935 1.1 kiyohara desc->bc.mode16m.bcnt & 1936 1.1 kiyohara GTIDMAC_CIDMABCR_BYTECNT_MASK); 1937 1.1 kiyohara } else { 1938 1.1 kiyohara printf(" Byte Count : 0x%04x\n", 1939 1.1 kiyohara desc->bc.mode64k.bcnt); 1940 1.1 kiyohara printf(" Remind Byte Count : 0x%04x\n", 1941 1.1 kiyohara desc->bc.mode64k.rbc); 1942 1.1 kiyohara } 1943 1.1 kiyohara printf(" Source Address : 0x%08x\n", desc->srcaddr); 1944 1.1 kiyohara printf(" Destination Address : 0x%08x\n", desc->dstaddr); 1945 1.1 kiyohara printf(" Next Descriptor Pointer : 0x%08x\n", desc->nextdp); 1946 1.1 kiyohara 1947 1.1 kiyohara if (desc->nextdp == (uint32_t)NULL) 1948 1.1 kiyohara break; 1949 1.1 kiyohara 1950 1.1 kiyohara if (!post) 1951 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 1952 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1953 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1954 1.1 kiyohara 1955 1.1 kiyohara i++; 1956 1.1 kiyohara dd = SLIST_NEXT(dd, dd_next); 1957 1.1 kiyohara } 1958 1.1 kiyohara if (!post) 1959 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 1960 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 1961 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1962 1.1 kiyohara } 1963 1.1 kiyohara 1964 1.1 kiyohara static void 1965 1.1 kiyohara gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan) 1966 1.1 kiyohara { 1967 1.1 kiyohara uint32_t val, opmode; 1968 1.1 kiyohara char buf[64]; 1969 1.1 kiyohara 1970 1.1 kiyohara printf("XORE Registers\n"); 1971 1.1 kiyohara 1972 1.8 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan)); 1973 1.3 kiyohara snprintb(buf, sizeof(buf), 1974 1.3 kiyohara "\177\020" 1975 1.1 kiyohara "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0", 1976 1.3 kiyohara val); 1977 1.14 msaitoh printf(" Configuration : %s\n", buf); 1978 1.1 kiyohara opmode = val & MVXORE_XEXCR_OM_MASK; 1979 1.1 kiyohara printf(" OperationMode : %s operation\n", 1980 1.1 kiyohara opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" : 1981 1.1 kiyohara opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" : 1982 1.1 kiyohara opmode == MVXORE_XEXCR_OM_DMA ? "DMA" : 1983 1.1 kiyohara opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" : 1984 1.1 kiyohara opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" : 1985 1.1 kiyohara "unknown"); 1986 1.1 kiyohara printf(" SrcBurstLimit : %s Bytes\n", 1987 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" : 1988 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" : 1989 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" : 1990 1.19 msaitoh "unknown"); 1991 1.1 kiyohara printf(" DstBurstLimit : %s Bytes\n", 1992 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" : 1993 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" : 1994 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" : 1995 1.19 msaitoh "unknown"); 1996 1.8 kiyohara val = 1997 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan)); 1998 1.1 kiyohara printf(" Activation : 0x%08x\n", val); 1999 1.1 kiyohara val &= MVXORE_XEXACTR_XESTATUS_MASK; 2000 1.1 kiyohara printf(" XEstatus : %s\n", 2001 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" : 2002 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" : 2003 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???"); 2004 1.1 kiyohara 2005 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_XOR || 2006 1.1 kiyohara opmode == MVXORE_XEXCR_OM_CRC32 || 2007 1.1 kiyohara opmode == MVXORE_XEXCR_OM_DMA) { 2008 1.1 kiyohara printf(" NextDescPtr : 0x%08x\n", 2009 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2010 1.8 kiyohara MVXORE_XEXNDPR(sc, chan))); 2011 1.1 kiyohara printf(" CurrentDescPtr : 0x%08x\n", 2012 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2013 1.21 andvar MVXORE_XEXCDPR(sc, chan))); 2014 1.1 kiyohara } 2015 1.1 kiyohara printf(" ByteCnt : 0x%08x\n", 2016 1.21 andvar bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(sc, chan))); 2017 1.1 kiyohara 2018 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_ECC || 2019 1.1 kiyohara opmode == MVXORE_XEXCR_OM_MEMINIT) { 2020 1.1 kiyohara printf(" DstPtr : 0x%08x\n", 2021 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2022 1.8 kiyohara MVXORE_XEXDPR(sc, chan))); 2023 1.1 kiyohara printf(" BlockSize : 0x%08x\n", 2024 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2025 1.8 kiyohara MVXORE_XEXBSR(sc, chan))); 2026 1.1 kiyohara 2027 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_ECC) { 2028 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2029 1.1 kiyohara MVXORE_XETMCR); 2030 1.1 kiyohara if (val & MVXORE_XETMCR_TIMEREN) { 2031 1.1 kiyohara val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT; 2032 1.1 kiyohara val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK; 2033 1.1 kiyohara printf(" SectionSizeCtrl : 0x%08x\n", 2 ^ val); 2034 1.1 kiyohara printf(" TimerInitVal : 0x%08x\n", 2035 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2036 1.1 kiyohara MVXORE_XETMIVR)); 2037 1.1 kiyohara printf(" TimerCrntVal : 0x%08x\n", 2038 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2039 1.1 kiyohara MVXORE_XETMCVR)); 2040 1.1 kiyohara } 2041 1.1 kiyohara } else /* MVXORE_XEXCR_OM_MEMINIT */ 2042 1.1 kiyohara printf(" InitVal : 0x%08x%08x\n", 2043 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2044 1.1 kiyohara MVXORE_XEIVRH), 2045 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, 2046 1.1 kiyohara MVXORE_XEIVRL)); 2047 1.1 kiyohara } 2048 1.1 kiyohara } 2049 1.1 kiyohara 2050 1.1 kiyohara static void 2051 1.1 kiyohara gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd, 2052 1.1 kiyohara uint32_t mode, int post) 2053 1.1 kiyohara { 2054 1.3 kiyohara struct mvxore_desc *desc; 2055 1.1 kiyohara int i, j; 2056 1.1 kiyohara char buf[256]; 2057 1.1 kiyohara 2058 1.1 kiyohara printf("XORE Descriptor\n"); 2059 1.1 kiyohara 2060 1.1 kiyohara mode &= MVXORE_XEXCR_OM_MASK; 2061 1.1 kiyohara 2062 1.1 kiyohara i = 0; 2063 1.1 kiyohara while (1 /*CONSTCOND*/) { 2064 1.1 kiyohara if (post) 2065 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 2066 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 2067 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2068 1.1 kiyohara 2069 1.1 kiyohara desc = dd->dd_xore_vaddr; 2070 1.1 kiyohara 2071 1.1 kiyohara printf("%d (0x%lx)\n", i, dd->dd_paddr); 2072 1.1 kiyohara 2073 1.3 kiyohara snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0", 2074 1.3 kiyohara desc->stat); 2075 1.14 msaitoh printf(" Status : %s\n", buf); 2076 1.1 kiyohara if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post) 2077 1.1 kiyohara printf(" CRC-32 Result : 0x%08x\n", 2078 1.1 kiyohara desc->result); 2079 1.3 kiyohara snprintb(buf, sizeof(buf), 2080 1.1 kiyohara "\177\020b\037EODIntEn\0b\036CRCLast\0" 2081 1.1 kiyohara "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0" 2082 1.1 kiyohara "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0", 2083 1.3 kiyohara desc->cmd); 2084 1.14 msaitoh printf(" Command : %s\n", buf); 2085 1.1 kiyohara printf(" Next Descriptor Address : 0x%08x\n", desc->nextda); 2086 1.1 kiyohara printf(" Byte Count : 0x%06x\n", desc->bcnt); 2087 1.1 kiyohara printf(" Destination Address : 0x%08x\n", desc->dstaddr); 2088 1.1 kiyohara if (mode == MVXORE_XEXCR_OM_XOR) { 2089 1.1 kiyohara for (j = 0; j < MVXORE_NSRC; j++) 2090 1.1 kiyohara if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j)) 2091 1.1 kiyohara printf(" Source Address#%d :" 2092 1.1 kiyohara " 0x%08x\n", j, desc->srcaddr[j]); 2093 1.1 kiyohara } else 2094 1.1 kiyohara printf(" Source Address : 0x%08x\n", 2095 1.1 kiyohara desc->srcaddr[0]); 2096 1.1 kiyohara 2097 1.1 kiyohara if (desc->nextda == (uint32_t)NULL) 2098 1.1 kiyohara break; 2099 1.1 kiyohara 2100 1.1 kiyohara if (!post) 2101 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 2102 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 2103 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2104 1.1 kiyohara 2105 1.1 kiyohara i++; 2106 1.1 kiyohara dd = SLIST_NEXT(dd, dd_next); 2107 1.1 kiyohara } 2108 1.1 kiyohara if (!post) 2109 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore, 2110 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc), 2111 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2112 1.1 kiyohara } 2113 1.1 kiyohara #endif 2114