mvxpbm.c revision 1.2 1 1.2 maxv /* $NetBSD: mvxpbm.c,v 1.2 2018/05/07 09:41:10 maxv Exp $ */
2 1.1 hsuenaga /*
3 1.1 hsuenaga * Copyright (c) 2015 Internet Initiative Japan Inc.
4 1.1 hsuenaga * All rights reserved.
5 1.1 hsuenaga *
6 1.1 hsuenaga * Redistribution and use in source and binary forms, with or without
7 1.1 hsuenaga * modification, are permitted provided that the following conditions
8 1.1 hsuenaga * are met:
9 1.1 hsuenaga * 1. Redistributions of source code must retain the above copyright
10 1.1 hsuenaga * notice, this list of conditions and the following disclaimer.
11 1.1 hsuenaga * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 hsuenaga * notice, this list of conditions and the following disclaimer in the
13 1.1 hsuenaga * documentation and/or other materials provided with the distribution.
14 1.1 hsuenaga *
15 1.1 hsuenaga * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 1.1 hsuenaga * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 hsuenaga * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 hsuenaga * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 1.1 hsuenaga * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 1.1 hsuenaga * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 hsuenaga * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 hsuenaga * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 1.1 hsuenaga * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 1.1 hsuenaga * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.1 hsuenaga * POSSIBILITY OF SUCH DAMAGE.
26 1.1 hsuenaga */
27 1.1 hsuenaga #include <sys/cdefs.h>
28 1.2 maxv __KERNEL_RCSID(0, "$NetBSD: mvxpbm.c,v 1.2 2018/05/07 09:41:10 maxv Exp $");
29 1.1 hsuenaga
30 1.1 hsuenaga #include "opt_multiprocessor.h"
31 1.1 hsuenaga
32 1.1 hsuenaga #include <sys/param.h>
33 1.1 hsuenaga #include <sys/systm.h>
34 1.1 hsuenaga #include <sys/device.h>
35 1.1 hsuenaga #include <sys/mbuf.h>
36 1.1 hsuenaga
37 1.1 hsuenaga #include <dev/marvell/marvellreg.h>
38 1.1 hsuenaga #include <dev/marvell/marvellvar.h>
39 1.1 hsuenaga
40 1.1 hsuenaga #include "mvxpbmvar.h"
41 1.1 hsuenaga
42 1.1 hsuenaga #ifdef DEBUG
43 1.1 hsuenaga #define STATIC /* nothing */
44 1.1 hsuenaga #define DPRINTF(fmt, ...) \
45 1.1 hsuenaga do { \
46 1.1 hsuenaga if (mvxpbm_debug >= 1) { \
47 1.1 hsuenaga printf("%s: ", __func__); \
48 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \
49 1.1 hsuenaga } \
50 1.1 hsuenaga } while (/*CONSTCOND*/0)
51 1.1 hsuenaga #define DPRINTFN(level , fmt, ...) \
52 1.1 hsuenaga do { \
53 1.1 hsuenaga if (mvxpbm_debug >= (level)) { \
54 1.1 hsuenaga printf("%s: ", __func__); \
55 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \
56 1.1 hsuenaga } \
57 1.1 hsuenaga } while (/*CONSTCOND*/0)
58 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...) \
59 1.1 hsuenaga do { \
60 1.1 hsuenaga if (mvxpbm_debug >= (level)) { \
61 1.1 hsuenaga device_printf((dev), \
62 1.1 hsuenaga "%s: "fmt , __func__, ##__VA_ARGS__); \
63 1.1 hsuenaga } \
64 1.1 hsuenaga } while (/*CONSTCOND*/0)
65 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...) \
66 1.1 hsuenaga do { \
67 1.1 hsuenaga device_t dev = (sc)->sc_dev; \
68 1.1 hsuenaga if (mvxpbm_debug >= (level)) { \
69 1.1 hsuenaga device_printf(dev, \
70 1.1 hsuenaga "%s: " fmt, __func__, ##__VA_ARGS__); \
71 1.1 hsuenaga } \
72 1.1 hsuenaga } while (/*CONSTCOND*/0)
73 1.1 hsuenaga #else
74 1.1 hsuenaga #define STATIC static
75 1.1 hsuenaga #define DPRINTF(fmt, ...)
76 1.1 hsuenaga #define DPRINTFN(level, fmt, ...)
77 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...)
78 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...)
79 1.1 hsuenaga #endif
80 1.1 hsuenaga
81 1.1 hsuenaga /* autoconf(9) */
82 1.1 hsuenaga STATIC int mvxpbm_match(device_t, cfdata_t, void *);
83 1.1 hsuenaga STATIC void mvxpbm_attach(device_t, device_t, void *);
84 1.1 hsuenaga STATIC int mvxpbm_evcnt_attach(struct mvxpbm_softc *);
85 1.1 hsuenaga CFATTACH_DECL_NEW(mvxpbm_mbus, sizeof(struct mvxpbm_softc),
86 1.1 hsuenaga mvxpbm_match, mvxpbm_attach, NULL, NULL);
87 1.1 hsuenaga
88 1.1 hsuenaga /* DMA buffers */
89 1.1 hsuenaga STATIC int mvxpbm_alloc_buffer(struct mvxpbm_softc *);
90 1.1 hsuenaga
91 1.1 hsuenaga /* mbuf subroutines */
92 1.1 hsuenaga STATIC void mvxpbm_free_mbuf(struct mbuf *, void *, size_t, void *);
93 1.1 hsuenaga
94 1.1 hsuenaga /* singleton device instance */
95 1.1 hsuenaga static struct mvxpbm_softc sc_emul;
96 1.1 hsuenaga static struct mvxpbm_softc *sc0;
97 1.1 hsuenaga
98 1.1 hsuenaga /* debug level */
99 1.1 hsuenaga #ifdef DEBUG
100 1.1 hsuenaga static int mvxpbm_debug = 0;
101 1.1 hsuenaga #endif
102 1.1 hsuenaga
103 1.1 hsuenaga /*
104 1.1 hsuenaga * autoconf(9)
105 1.1 hsuenaga */
106 1.1 hsuenaga STATIC int
107 1.1 hsuenaga mvxpbm_match(device_t parent, cfdata_t match, void *aux)
108 1.1 hsuenaga {
109 1.1 hsuenaga struct marvell_attach_args *mva = aux;
110 1.1 hsuenaga
111 1.1 hsuenaga if (strcmp(mva->mva_name, match->cf_name) != 0)
112 1.1 hsuenaga return 0;
113 1.1 hsuenaga if (mva->mva_unit > MVXPBM_UNIT_MAX)
114 1.1 hsuenaga return 0;
115 1.1 hsuenaga if (sc0 != NULL)
116 1.1 hsuenaga return 0;
117 1.1 hsuenaga if (mva->mva_offset != MVA_OFFSET_DEFAULT) {
118 1.1 hsuenaga /* Hardware BM is not supported yet. */
119 1.1 hsuenaga return 0;
120 1.1 hsuenaga }
121 1.1 hsuenaga
122 1.1 hsuenaga return 1;
123 1.1 hsuenaga }
124 1.1 hsuenaga
125 1.1 hsuenaga STATIC void
126 1.1 hsuenaga mvxpbm_attach(device_t parnet, device_t self, void *aux)
127 1.1 hsuenaga {
128 1.1 hsuenaga struct marvell_attach_args *mva = aux;
129 1.1 hsuenaga struct mvxpbm_softc *sc = device_private(self);
130 1.1 hsuenaga
131 1.1 hsuenaga aprint_naive("\n");
132 1.1 hsuenaga aprint_normal(": Marvell ARMADA Buffer Manager\n");
133 1.1 hsuenaga memset(sc, 0, sizeof(*sc));
134 1.1 hsuenaga sc->sc_dev = self;
135 1.1 hsuenaga sc->sc_iot = mva->mva_iot;
136 1.1 hsuenaga sc->sc_dmat = mva->mva_dmat;
137 1.1 hsuenaga
138 1.1 hsuenaga if (mva->mva_offset == MVA_OFFSET_DEFAULT) {
139 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "Software emulation.\n");
140 1.1 hsuenaga sc->sc_emul = 1;
141 1.1 hsuenaga }
142 1.1 hsuenaga
143 1.1 hsuenaga mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
144 1.1 hsuenaga LIST_INIT(&sc->sc_free);
145 1.1 hsuenaga LIST_INIT(&sc->sc_inuse);
146 1.1 hsuenaga
147 1.1 hsuenaga /* DMA buffers */
148 1.1 hsuenaga if (mvxpbm_alloc_buffer(sc) != 0)
149 1.1 hsuenaga return;
150 1.1 hsuenaga
151 1.1 hsuenaga /* event counters */
152 1.1 hsuenaga mvxpbm_evcnt_attach(sc);
153 1.1 hsuenaga
154 1.1 hsuenaga sc0 = sc;
155 1.1 hsuenaga return;
156 1.1 hsuenaga
157 1.1 hsuenaga }
158 1.1 hsuenaga
159 1.1 hsuenaga STATIC int
160 1.1 hsuenaga mvxpbm_evcnt_attach(struct mvxpbm_softc *sc)
161 1.1 hsuenaga {
162 1.1 hsuenaga return 0;
163 1.1 hsuenaga }
164 1.1 hsuenaga
165 1.1 hsuenaga /*
166 1.1 hsuenaga * DMA buffers
167 1.1 hsuenaga */
168 1.1 hsuenaga STATIC int
169 1.1 hsuenaga mvxpbm_alloc_buffer(struct mvxpbm_softc *sc)
170 1.1 hsuenaga {
171 1.1 hsuenaga bus_dma_segment_t segs;
172 1.1 hsuenaga char *kva, *ptr, *ptr_next, *ptr_data;
173 1.1 hsuenaga char *bm_buf_end;
174 1.1 hsuenaga uint32_t align, pad;
175 1.1 hsuenaga int nsegs;
176 1.1 hsuenaga int error;
177 1.1 hsuenaga
178 1.1 hsuenaga /*
179 1.1 hsuenaga * set default buffer sizes. this will changed to satisfy
180 1.1 hsuenaga * alignment restrictions.
181 1.1 hsuenaga */
182 1.1 hsuenaga sc->sc_chunk_count = 0;
183 1.1 hsuenaga sc->sc_chunk_size = MVXPBM_PACKET_SIZE;
184 1.1 hsuenaga sc->sc_chunk_header_size = sizeof(struct mvxpbm_chunk);
185 1.1 hsuenaga sc->sc_chunk_packet_offset = 64;
186 1.1 hsuenaga
187 1.1 hsuenaga /*
188 1.1 hsuenaga * adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
189 1.1 hsuenaga * to satisfy alignemnt restrictions.
190 1.1 hsuenaga *
191 1.1 hsuenaga * <---------------- bm_slotsize [oct.] ------------------>
192 1.1 hsuenaga * <--- bm_chunk_size[oct.] ---->
193 1.1 hsuenaga * <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] --->
194 1.1 hsuenaga * +-----------------+--------+---------+-----------------+--+
195 1.1 hsuenaga * | bm_chunk hdr |pad |pkt_off | packet data | |
196 1.1 hsuenaga * +-----------------+--------+---------+-----------------+--+
197 1.1 hsuenaga * ^ ^ ^ ^
198 1.1 hsuenaga * | | | |
199 1.1 hsuenaga * ptr ptr_data DMA here ptr_next
200 1.1 hsuenaga *
201 1.1 hsuenaga * Restrictions:
202 1.1 hsuenaga * - total buffer size must be multiple of MVXPBM_BUF_ALIGN
203 1.1 hsuenaga * - ptr must be aligned to MVXPBM_CHUNK_ALIGN
204 1.1 hsuenaga * - ptr_data must be aligned to MVXPEBM_DATA_ALIGN
205 1.1 hsuenaga * - bm_chunk_size must be multiple of 8[bytes].
206 1.1 hsuenaga */
207 1.1 hsuenaga /* start calclation from 0x0000.0000 */
208 1.1 hsuenaga ptr = (char *)0;
209 1.1 hsuenaga
210 1.1 hsuenaga /* align start of packet data */
211 1.1 hsuenaga ptr_data = ptr + sc->sc_chunk_header_size;
212 1.1 hsuenaga align = (unsigned long)ptr_data & MVXPBM_DATA_MASK;
213 1.1 hsuenaga if (align != 0) {
214 1.1 hsuenaga pad = MVXPBM_DATA_ALIGN - align;
215 1.1 hsuenaga sc->sc_chunk_header_size += pad;
216 1.1 hsuenaga DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
217 1.1 hsuenaga }
218 1.1 hsuenaga
219 1.1 hsuenaga /* align size of packet data */
220 1.1 hsuenaga ptr_data = ptr + sc->sc_chunk_header_size;
221 1.1 hsuenaga ptr_next = ptr_data + MVXPBM_PACKET_SIZE;
222 1.1 hsuenaga align = (unsigned long)ptr_next & MVXPBM_CHUNK_MASK;
223 1.1 hsuenaga if (align != 0) {
224 1.1 hsuenaga pad = MVXPBM_CHUNK_ALIGN - align;
225 1.1 hsuenaga ptr_next += pad;
226 1.1 hsuenaga DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
227 1.1 hsuenaga }
228 1.1 hsuenaga sc->sc_slotsize = ptr_next - ptr;
229 1.1 hsuenaga sc->sc_chunk_size = ptr_next - ptr_data;
230 1.1 hsuenaga KASSERT((sc->sc_chunk_size % MVXPBM_DATA_UNIT) == 0);
231 1.1 hsuenaga
232 1.1 hsuenaga /* align total buffer size to Mbus window boundary */
233 1.1 hsuenaga sc->sc_buf_size = sc->sc_slotsize * MVXPBM_NUM_SLOTS;
234 1.1 hsuenaga align = (unsigned long)sc->sc_buf_size & MVXPBM_BUF_MASK;
235 1.1 hsuenaga if (align != 0) {
236 1.1 hsuenaga pad = MVXPBM_BUF_ALIGN - align;
237 1.1 hsuenaga sc->sc_buf_size += pad;
238 1.1 hsuenaga DPRINTSC(sc, 1,
239 1.1 hsuenaga "expand buffer to fit page boundary, %u bytes\n", pad);
240 1.1 hsuenaga }
241 1.1 hsuenaga
242 1.1 hsuenaga /*
243 1.1 hsuenaga * get the aligned buffer from busdma(9) framework
244 1.1 hsuenaga */
245 1.1 hsuenaga if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_buf_size, MVXPBM_BUF_ALIGN, 0,
246 1.1 hsuenaga &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
247 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
248 1.1 hsuenaga return ENOBUFS;
249 1.1 hsuenaga }
250 1.1 hsuenaga if (bus_dmamem_map(sc->sc_dmat, &segs, nsegs, sc->sc_buf_size,
251 1.1 hsuenaga (void **)&kva, BUS_DMA_NOWAIT)) {
252 1.1 hsuenaga aprint_error_dev(sc->sc_dev,
253 1.1 hsuenaga "can't map dma buffers (%zu bytes)\n", sc->sc_buf_size);
254 1.1 hsuenaga error = ENOBUFS;
255 1.1 hsuenaga goto fail1;
256 1.1 hsuenaga }
257 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat, sc->sc_buf_size, 1, sc->sc_buf_size,
258 1.1 hsuenaga 0, BUS_DMA_NOWAIT, &sc->sc_buf_map)) {
259 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't create dma map\n");
260 1.1 hsuenaga error = ENOBUFS;
261 1.1 hsuenaga goto fail2;
262 1.1 hsuenaga }
263 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, sc->sc_buf_map,
264 1.1 hsuenaga kva, sc->sc_buf_size, NULL, BUS_DMA_NOWAIT)) {
265 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't load dma map\n");
266 1.1 hsuenaga error = ENOBUFS;
267 1.1 hsuenaga goto fail3;
268 1.1 hsuenaga }
269 1.1 hsuenaga sc->sc_buf = (void *)kva;
270 1.1 hsuenaga sc->sc_buf_pa = segs.ds_addr;
271 1.1 hsuenaga bm_buf_end = (void *)(kva + sc->sc_buf_size);
272 1.1 hsuenaga DPRINTSC(sc, 1, "memory pool at %p\n", sc->sc_buf);
273 1.1 hsuenaga
274 1.1 hsuenaga /* slice the buffer */
275 1.1 hsuenaga mvxpbm_lock(sc);
276 1.1 hsuenaga for (ptr = sc->sc_buf; ptr + sc->sc_slotsize <= bm_buf_end;
277 1.1 hsuenaga ptr += sc->sc_slotsize) {
278 1.1 hsuenaga struct mvxpbm_chunk *chunk;
279 1.1 hsuenaga
280 1.1 hsuenaga /* initialzie chunk */
281 1.1 hsuenaga ptr_data = ptr + sc->sc_chunk_header_size;
282 1.1 hsuenaga chunk = (struct mvxpbm_chunk *)ptr;
283 1.1 hsuenaga chunk->m = NULL;
284 1.1 hsuenaga chunk->sc = sc;
285 1.1 hsuenaga chunk->off = (ptr - sc->sc_buf);
286 1.1 hsuenaga chunk->pa = (paddr_t)(sc->sc_buf_pa + chunk->off);
287 1.1 hsuenaga chunk->buf_off = (ptr_data - sc->sc_buf);
288 1.1 hsuenaga chunk->buf_pa = (paddr_t)(sc->sc_buf_pa + chunk->buf_off);
289 1.1 hsuenaga chunk->buf_va = (vaddr_t)(sc->sc_buf + chunk->buf_off);
290 1.1 hsuenaga chunk->buf_size = sc->sc_chunk_size;
291 1.1 hsuenaga
292 1.1 hsuenaga /* add to free list (for software management) */
293 1.1 hsuenaga LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
294 1.1 hsuenaga mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
295 1.1 hsuenaga sc->sc_chunk_count++;
296 1.1 hsuenaga
297 1.1 hsuenaga DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
298 1.1 hsuenaga }
299 1.1 hsuenaga mvxpbm_unlock(sc);
300 1.1 hsuenaga return 0;
301 1.1 hsuenaga
302 1.1 hsuenaga fail3:
303 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, sc->sc_buf_map);
304 1.1 hsuenaga fail2:
305 1.1 hsuenaga bus_dmamem_unmap(sc->sc_dmat, kva, sc->sc_buf_size);
306 1.1 hsuenaga fail1:
307 1.1 hsuenaga bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
308 1.1 hsuenaga
309 1.1 hsuenaga return error;
310 1.1 hsuenaga }
311 1.1 hsuenaga
312 1.1 hsuenaga /*
313 1.1 hsuenaga * mbuf subroutines
314 1.1 hsuenaga */
315 1.1 hsuenaga STATIC void
316 1.1 hsuenaga mvxpbm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
317 1.1 hsuenaga {
318 1.1 hsuenaga struct mvxpbm_chunk *chunk = (struct mvxpbm_chunk *)arg;
319 1.1 hsuenaga int s;
320 1.1 hsuenaga
321 1.1 hsuenaga KASSERT(m != NULL);
322 1.1 hsuenaga KASSERT(arg != NULL);
323 1.1 hsuenaga
324 1.1 hsuenaga DPRINTFN(3, "free packet %p\n", m);
325 1.2 maxv
326 1.1 hsuenaga chunk->m = NULL;
327 1.1 hsuenaga s = splvm();
328 1.1 hsuenaga pool_cache_put(mb_cache, m);
329 1.1 hsuenaga splx(s);
330 1.1 hsuenaga return mvxpbm_free_chunk(chunk);
331 1.1 hsuenaga }
332 1.1 hsuenaga
333 1.1 hsuenaga /*
334 1.1 hsuenaga * Exported APIs
335 1.1 hsuenaga */
336 1.1 hsuenaga /* get mvxpbm device context */
337 1.1 hsuenaga struct mvxpbm_softc *
338 1.1 hsuenaga mvxpbm_device(struct marvell_attach_args *mva)
339 1.1 hsuenaga {
340 1.1 hsuenaga struct mvxpbm_softc *sc;
341 1.1 hsuenaga
342 1.1 hsuenaga if (sc0 != NULL)
343 1.1 hsuenaga return sc0;
344 1.1 hsuenaga if (mva == NULL)
345 1.1 hsuenaga return NULL;
346 1.1 hsuenaga
347 1.1 hsuenaga /* allocate software emulation context */
348 1.1 hsuenaga sc = &sc_emul;
349 1.1 hsuenaga memset(sc, 0, sizeof(*sc));
350 1.1 hsuenaga sc->sc_emul = 1;
351 1.1 hsuenaga sc->sc_iot = mva->mva_iot;
352 1.1 hsuenaga sc->sc_dmat = mva->mva_dmat;
353 1.1 hsuenaga
354 1.1 hsuenaga mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
355 1.1 hsuenaga LIST_INIT(&sc->sc_free);
356 1.1 hsuenaga LIST_INIT(&sc->sc_inuse);
357 1.1 hsuenaga
358 1.1 hsuenaga if (mvxpbm_alloc_buffer(sc) != 0)
359 1.1 hsuenaga return NULL;
360 1.1 hsuenaga mvxpbm_evcnt_attach(sc);
361 1.1 hsuenaga sc0 = sc;
362 1.1 hsuenaga return sc0;
363 1.1 hsuenaga }
364 1.1 hsuenaga
365 1.1 hsuenaga /* allocate new memory chunk */
366 1.1 hsuenaga struct mvxpbm_chunk *
367 1.1 hsuenaga mvxpbm_alloc(struct mvxpbm_softc *sc)
368 1.1 hsuenaga {
369 1.1 hsuenaga struct mvxpbm_chunk *chunk;
370 1.1 hsuenaga
371 1.1 hsuenaga mvxpbm_lock(sc);
372 1.1 hsuenaga
373 1.1 hsuenaga chunk = LIST_FIRST(&sc->sc_free);
374 1.1 hsuenaga if (chunk == NULL) {
375 1.1 hsuenaga mvxpbm_unlock(sc);
376 1.1 hsuenaga return NULL;
377 1.1 hsuenaga }
378 1.1 hsuenaga
379 1.1 hsuenaga LIST_REMOVE(chunk, link);
380 1.1 hsuenaga LIST_INSERT_HEAD(&sc->sc_inuse, chunk, link);
381 1.1 hsuenaga
382 1.1 hsuenaga mvxpbm_unlock(sc);
383 1.1 hsuenaga return chunk;
384 1.1 hsuenaga }
385 1.1 hsuenaga
386 1.1 hsuenaga /* free memory chunk */
387 1.1 hsuenaga void
388 1.1 hsuenaga mvxpbm_free_chunk(struct mvxpbm_chunk *chunk)
389 1.1 hsuenaga {
390 1.1 hsuenaga struct mvxpbm_softc *sc = chunk->sc;
391 1.1 hsuenaga
392 1.1 hsuenaga KASSERT(chunk->m == NULL);
393 1.1 hsuenaga DPRINTFN(3, "bm chunk free\n");
394 1.1 hsuenaga
395 1.1 hsuenaga mvxpbm_lock(sc);
396 1.1 hsuenaga
397 1.1 hsuenaga LIST_REMOVE(chunk, link);
398 1.1 hsuenaga LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
399 1.1 hsuenaga
400 1.1 hsuenaga mvxpbm_unlock(sc);
401 1.1 hsuenaga }
402 1.1 hsuenaga
403 1.1 hsuenaga /* prepare mbuf header after Rx */
404 1.1 hsuenaga int
405 1.1 hsuenaga mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk *chunk)
406 1.1 hsuenaga {
407 1.1 hsuenaga struct mvxpbm_softc *sc = chunk->sc;
408 1.1 hsuenaga
409 1.1 hsuenaga KASSERT(chunk->m == NULL);
410 1.1 hsuenaga
411 1.1 hsuenaga /* add new mbuf header */
412 1.1 hsuenaga MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
413 1.1 hsuenaga if (chunk->m == NULL) {
414 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
415 1.1 hsuenaga return ENOBUFS;
416 1.1 hsuenaga }
417 1.1 hsuenaga MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
418 1.1 hsuenaga mvxpbm_free_mbuf, chunk);
419 1.1 hsuenaga chunk->m->m_flags |= M_EXT_RW;
420 1.1 hsuenaga chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
421 1.1 hsuenaga if (sc->sc_chunk_packet_offset)
422 1.1 hsuenaga m_adj(chunk->m, sc->sc_chunk_packet_offset);
423 1.1 hsuenaga
424 1.1 hsuenaga return 0;
425 1.1 hsuenaga }
426 1.1 hsuenaga
427 1.1 hsuenaga /* sync DMA seguments */
428 1.1 hsuenaga void
429 1.1 hsuenaga mvxpbm_dmamap_sync(struct mvxpbm_chunk *chunk, size_t size, int ops)
430 1.1 hsuenaga {
431 1.1 hsuenaga struct mvxpbm_softc *sc = chunk->sc;
432 1.1 hsuenaga
433 1.1 hsuenaga KASSERT(size <= chunk->buf_size);
434 1.1 hsuenaga if (size == 0)
435 1.1 hsuenaga size = chunk->buf_size;
436 1.1 hsuenaga
437 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_map, chunk->buf_off, size, ops);
438 1.1 hsuenaga }
439 1.1 hsuenaga
440 1.1 hsuenaga /* lock */
441 1.1 hsuenaga void
442 1.1 hsuenaga mvxpbm_lock(struct mvxpbm_softc *sc)
443 1.1 hsuenaga {
444 1.1 hsuenaga mutex_enter(&sc->sc_mtx);
445 1.1 hsuenaga }
446 1.1 hsuenaga
447 1.1 hsuenaga void
448 1.1 hsuenaga mvxpbm_unlock(struct mvxpbm_softc *sc)
449 1.1 hsuenaga {
450 1.1 hsuenaga mutex_exit(&sc->sc_mtx);
451 1.1 hsuenaga }
452 1.1 hsuenaga
453 1.1 hsuenaga /* get params */
454 1.1 hsuenaga const char *
455 1.1 hsuenaga mvxpbm_xname(struct mvxpbm_softc *sc)
456 1.1 hsuenaga {
457 1.1 hsuenaga if (sc->sc_emul) {
458 1.1 hsuenaga return "software_bm";
459 1.1 hsuenaga }
460 1.1 hsuenaga return device_xname(sc->sc_dev);
461 1.1 hsuenaga }
462 1.1 hsuenaga
463 1.1 hsuenaga size_t
464 1.1 hsuenaga mvxpbm_chunk_size(struct mvxpbm_softc *sc)
465 1.1 hsuenaga {
466 1.1 hsuenaga return sc->sc_chunk_size;
467 1.1 hsuenaga }
468 1.1 hsuenaga
469 1.1 hsuenaga uint32_t
470 1.1 hsuenaga mvxpbm_chunk_count(struct mvxpbm_softc *sc)
471 1.1 hsuenaga {
472 1.1 hsuenaga return sc->sc_chunk_count;
473 1.1 hsuenaga }
474 1.1 hsuenaga
475 1.1 hsuenaga off_t
476 1.1 hsuenaga mvxpbm_packet_offset(struct mvxpbm_softc *sc)
477 1.1 hsuenaga {
478 1.1 hsuenaga return sc->sc_chunk_packet_offset;
479 1.1 hsuenaga }
480 1.1 hsuenaga
481 1.1 hsuenaga paddr_t
482 1.1 hsuenaga mvxpbm_buf_pbase(struct mvxpbm_softc *sc)
483 1.1 hsuenaga {
484 1.1 hsuenaga return sc->sc_buf_pa;
485 1.1 hsuenaga }
486 1.1 hsuenaga
487 1.1 hsuenaga size_t
488 1.1 hsuenaga mvxpbm_buf_size(struct mvxpbm_softc *sc)
489 1.1 hsuenaga {
490 1.1 hsuenaga return sc->sc_buf_size;
491 1.1 hsuenaga }
492