dwc_gmac.c revision 1.9 1 /*-
2 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * This driver supports the Synopsis Designware GMAC core, as found
32 * on Allwinner A20 cores and others.
33 *
34 * Real documentation seems to not be available, the marketing product
35 * documents could be found here:
36 *
37 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 */
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.9 2014/10/13 08:24:52 martin Exp $");
43
44 /* #define DWC_GMAC_DEBUG 1 */
45
46 #include "opt_inet.h"
47
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/systm.h>
53 #include <sys/sockio.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #ifdef INET
60 #include <netinet/if_inarp.h>
61 #endif
62
63 #include <dev/mii/miivar.h>
64
65 #include <dev/ic/dwc_gmac_reg.h>
66 #include <dev/ic/dwc_gmac_var.h>
67
68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 static void dwc_gmac_miibus_statchg(struct ifnet *);
71
72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 uint8_t enaddr[ETHER_ADDR_LEN]);
75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 static int dwc_gmac_init(struct ifnet *ifp);
85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 static void dwc_gmac_start(struct ifnet *ifp);
87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
91
92 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
93 *sizeof(struct dwc_gmac_dev_dmadesc))
94 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
95
96 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
97 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
98
99
100
101 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
102 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
103 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
104
105 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
106 GMAC_DMA_INT_FBE|GMAC_DMA_INT_ETE| \
107 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
108 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
109 GMAC_DMA_INT_TJE|GMAC_DMA_INT_TUE)
110
111 #define AWIN_DEF_MAC_INTRMASK \
112 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
113 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
114
115
116 #ifdef DWC_GMAC_DEBUG
117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
119 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
120 #endif
121
122 void
123 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
124 {
125 uint8_t enaddr[ETHER_ADDR_LEN];
126 uint32_t maclo, machi;
127 struct mii_data * const mii = &sc->sc_mii;
128 struct ifnet * const ifp = &sc->sc_ec.ec_if;
129 prop_dictionary_t dict;
130
131 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
132 sc->sc_mii_clk = mii_clk & 7;
133
134 dict = device_properties(sc->sc_dev);
135 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
136 if (ea != NULL) {
137 /*
138 * If the MAC address is overriden by a device property,
139 * use that.
140 */
141 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
142 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
143 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
144 } else {
145 /*
146 * If we did not get an externaly configure address,
147 * try to read one from the current filter setup,
148 * before resetting the chip.
149 */
150 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
151 AWIN_GMAC_MAC_ADDR0LO);
152 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
153 AWIN_GMAC_MAC_ADDR0HI);
154 enaddr[0] = maclo & 0x0ff;
155 enaddr[1] = (maclo >> 8) & 0x0ff;
156 enaddr[2] = (maclo >> 16) & 0x0ff;
157 enaddr[3] = (maclo >> 24) & 0x0ff;
158 enaddr[4] = machi & 0x0ff;
159 enaddr[5] = (machi >> 8) & 0x0ff;
160 }
161
162 /*
163 * Init chip and do intial setup
164 */
165 if (dwc_gmac_reset(sc) != 0)
166 return; /* not much to cleanup, haven't attached yet */
167 dwc_gmac_write_hwaddr(sc, enaddr);
168 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
169 ether_sprintf(enaddr));
170
171 /*
172 * Allocate Tx and Rx rings
173 */
174 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
175 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
176 goto fail;
177 }
178
179 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
180 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
181 goto fail;
182 }
183
184 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
185 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
186 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
187 goto fail;
188 }
189
190 /*
191 * Prepare interface data
192 */
193 ifp->if_softc = sc;
194 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
195 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
196 ifp->if_ioctl = dwc_gmac_ioctl;
197 ifp->if_start = dwc_gmac_start;
198 ifp->if_init = dwc_gmac_init;
199 ifp->if_stop = dwc_gmac_stop;
200 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
201 IFQ_SET_READY(&ifp->if_snd);
202
203 /*
204 * Attach MII subdevices
205 */
206 sc->sc_ec.ec_mii = &sc->sc_mii;
207 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
208 mii->mii_ifp = ifp;
209 mii->mii_readreg = dwc_gmac_miibus_read_reg;
210 mii->mii_writereg = dwc_gmac_miibus_write_reg;
211 mii->mii_statchg = dwc_gmac_miibus_statchg;
212 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
213
214 if (LIST_EMPTY(&mii->mii_phys)) {
215 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
216 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
217 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
218 } else {
219 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
220 }
221
222 /*
223 * Ready, attach interface
224 */
225 if_attach(ifp);
226 ether_ifattach(ifp, enaddr);
227
228 /*
229 * Enable interrupts
230 */
231 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
232 AWIN_DEF_MAC_INTRMASK);
233 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
234 GMAC_DEF_DMA_INT_MASK);
235
236 return;
237
238 fail:
239 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
240 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
241 }
242
243
244
245 static int
246 dwc_gmac_reset(struct dwc_gmac_softc *sc)
247 {
248 size_t cnt;
249 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
250 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
251 for (cnt = 0; cnt < 3000; cnt++) {
252 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
253 & GMAC_BUSMODE_RESET) == 0)
254 return 0;
255 delay(10);
256 }
257
258 aprint_error_dev(sc->sc_dev, "reset timed out\n");
259 return EIO;
260 }
261
262 static void
263 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
264 uint8_t enaddr[ETHER_ADDR_LEN])
265 {
266 uint32_t lo, hi;
267
268 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
269 | (enaddr[3] << 24);
270 hi = enaddr[4] | (enaddr[5] << 8);
271 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
272 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
273 }
274
275 static int
276 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
277 {
278 struct dwc_gmac_softc * const sc = device_private(self);
279 uint16_t mii;
280 size_t cnt;
281 int rv = 0;
282
283 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
284 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
285 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
286 | GMAC_MII_BUSY;
287
288 mutex_enter(&sc->sc_mdio_lock);
289 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
290
291 for (cnt = 0; cnt < 1000; cnt++) {
292 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
293 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
294 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
295 AWIN_GMAC_MAC_MIIDATA);
296 break;
297 }
298 delay(10);
299 }
300
301 mutex_exit(&sc->sc_mdio_lock);
302
303 return rv;
304 }
305
306 static void
307 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
308 {
309 struct dwc_gmac_softc * const sc = device_private(self);
310 uint16_t mii;
311 size_t cnt;
312
313 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
314 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
315 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
316 | GMAC_MII_BUSY | GMAC_MII_WRITE;
317
318 mutex_enter(&sc->sc_mdio_lock);
319 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
320 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
321
322 for (cnt = 0; cnt < 1000; cnt++) {
323 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
324 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
325 break;
326 delay(10);
327 }
328
329 mutex_exit(&sc->sc_mdio_lock);
330 }
331
332 static int
333 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
334 struct dwc_gmac_rx_ring *ring)
335 {
336 struct dwc_gmac_rx_data *data;
337 bus_addr_t physaddr;
338 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
339 int error, i, next;
340
341 ring->r_cur = ring->r_next = 0;
342 memset(ring->r_desc, 0, descsize);
343
344 /*
345 * Pre-allocate Rx buffers and populate Rx ring.
346 */
347 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
348 struct dwc_gmac_dev_dmadesc *desc;
349
350 data = &sc->sc_rxq.r_data[i];
351
352 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
353 if (data->rd_m == NULL) {
354 aprint_error_dev(sc->sc_dev,
355 "could not allocate rx mbuf #%d\n", i);
356 error = ENOMEM;
357 goto fail;
358 }
359 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
360 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
361 if (error != 0) {
362 aprint_error_dev(sc->sc_dev,
363 "could not create DMA map\n");
364 data->rd_map = NULL;
365 goto fail;
366 }
367 MCLGET(data->rd_m, M_DONTWAIT);
368 if (!(data->rd_m->m_flags & M_EXT)) {
369 aprint_error_dev(sc->sc_dev,
370 "could not allocate mbuf cluster #%d\n", i);
371 error = ENOMEM;
372 goto fail;
373 }
374
375 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
376 mtod(data->rd_m, void *), MCLBYTES, NULL,
377 BUS_DMA_READ | BUS_DMA_NOWAIT);
378 if (error != 0) {
379 aprint_error_dev(sc->sc_dev,
380 "could not load rx buf DMA map #%d", i);
381 goto fail;
382 }
383 physaddr = data->rd_map->dm_segs[0].ds_addr;
384
385 desc = &sc->sc_rxq.r_desc[i];
386 desc->ddesc_data = htole32(physaddr);
387 next = RX_NEXT(i);
388 desc->ddesc_next = htole32(ring->r_physaddr
389 + next * sizeof(*desc));
390 desc->ddesc_cntl = htole32(
391 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
392 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
393 }
394
395 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
396 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
397 BUS_DMASYNC_PREREAD);
398 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
399 ring->r_physaddr);
400
401 return 0;
402
403 fail:
404 dwc_gmac_free_rx_ring(sc, ring);
405 return error;
406 }
407
408 static void
409 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
410 struct dwc_gmac_rx_ring *ring)
411 {
412 struct dwc_gmac_dev_dmadesc *desc;
413 int i;
414
415 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
416 desc = &sc->sc_rxq.r_desc[i];
417 desc->ddesc_cntl = htole32(
418 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
419 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
420 }
421
422 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
423 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
424 BUS_DMASYNC_PREWRITE);
425
426 ring->r_cur = ring->r_next = 0;
427 }
428
429 static int
430 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
431 {
432 const size_t descsize = AWGE_TOTAL_RING_COUNT *
433 sizeof(struct dwc_gmac_dev_dmadesc);
434 int error, nsegs;
435 void *rings;
436
437 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
438 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
439 if (error != 0) {
440 aprint_error_dev(sc->sc_dev,
441 "could not create desc DMA map\n");
442 sc->sc_dma_ring_map = NULL;
443 goto fail;
444 }
445
446 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
447 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
448 if (error != 0) {
449 aprint_error_dev(sc->sc_dev,
450 "could not map DMA memory\n");
451 goto fail;
452 }
453
454 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
455 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
456 if (error != 0) {
457 aprint_error_dev(sc->sc_dev,
458 "could not allocate DMA memory\n");
459 goto fail;
460 }
461
462 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
463 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
464 if (error != 0) {
465 aprint_error_dev(sc->sc_dev,
466 "could not load desc DMA map\n");
467 goto fail;
468 }
469
470 /* give first AWGE_RX_RING_COUNT to the RX side */
471 sc->sc_rxq.r_desc = rings;
472 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
473
474 /* and next rings to the TX side */
475 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
476 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
477 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
478
479 return 0;
480
481 fail:
482 dwc_gmac_free_dma_rings(sc);
483 return error;
484 }
485
486 static void
487 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
488 {
489 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
490 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
491 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
492 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
493 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
494 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
495 }
496
497 static void
498 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
499 {
500 struct dwc_gmac_rx_data *data;
501 int i;
502
503 if (ring->r_desc == NULL)
504 return;
505
506
507 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
508 data = &ring->r_data[i];
509
510 if (data->rd_map != NULL) {
511 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
512 AWGE_RX_RING_COUNT
513 *sizeof(struct dwc_gmac_dev_dmadesc),
514 BUS_DMASYNC_POSTREAD);
515 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
516 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
517 }
518 if (data->rd_m != NULL)
519 m_freem(data->rd_m);
520 }
521 }
522
523 static int
524 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
525 struct dwc_gmac_tx_ring *ring)
526 {
527 int i, error = 0;
528
529 ring->t_queued = 0;
530 ring->t_cur = ring->t_next = 0;
531
532 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
533 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
534 TX_DESC_OFFSET(0),
535 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
536 BUS_DMASYNC_POSTWRITE);
537
538 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
539 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
540 AWGE_TX_RING_COUNT, MCLBYTES, 0,
541 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
542 &ring->t_data[i].td_map);
543 if (error != 0) {
544 aprint_error_dev(sc->sc_dev,
545 "could not create TX DMA map #%d\n", i);
546 ring->t_data[i].td_map = NULL;
547 goto fail;
548 }
549 ring->t_desc[i].ddesc_next = htole32(
550 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
551 *TX_NEXT(i));
552 }
553
554 return 0;
555
556 fail:
557 dwc_gmac_free_tx_ring(sc, ring);
558 return error;
559 }
560
561 static void
562 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
563 {
564 /* 'end' is pointing one descriptor beyound the last we want to sync */
565 if (end > start) {
566 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
567 TX_DESC_OFFSET(start),
568 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
569 ops);
570 return;
571 }
572 /* sync from 'start' to end of ring */
573 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
574 TX_DESC_OFFSET(start),
575 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
576 ops);
577 /* sync from start of ring to 'end' */
578 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
579 TX_DESC_OFFSET(0),
580 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
581 ops);
582 }
583
584 static void
585 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
586 struct dwc_gmac_tx_ring *ring)
587 {
588 int i;
589
590 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
591 struct dwc_gmac_tx_data *data = &ring->t_data[i];
592
593 if (data->td_m != NULL) {
594 bus_dmamap_sync(sc->sc_dmat, data->td_active,
595 0, data->td_active->dm_mapsize,
596 BUS_DMASYNC_POSTWRITE);
597 bus_dmamap_unload(sc->sc_dmat, data->td_active);
598 m_freem(data->td_m);
599 data->td_m = NULL;
600 }
601 }
602
603 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 TX_DESC_OFFSET(0),
605 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
606 BUS_DMASYNC_PREWRITE);
607 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
608 sc->sc_txq.t_physaddr);
609
610 ring->t_queued = 0;
611 ring->t_cur = ring->t_next = 0;
612 }
613
614 static void
615 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
616 struct dwc_gmac_tx_ring *ring)
617 {
618 int i;
619
620 /* unload the maps */
621 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
622 struct dwc_gmac_tx_data *data = &ring->t_data[i];
623
624 if (data->td_m != NULL) {
625 bus_dmamap_sync(sc->sc_dmat, data->td_active,
626 0, data->td_map->dm_mapsize,
627 BUS_DMASYNC_POSTWRITE);
628 bus_dmamap_unload(sc->sc_dmat, data->td_active);
629 m_freem(data->td_m);
630 data->td_m = NULL;
631 }
632 }
633
634 /* and actually free them */
635 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
636 struct dwc_gmac_tx_data *data = &ring->t_data[i];
637
638 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
639 }
640 }
641
642 static void
643 dwc_gmac_miibus_statchg(struct ifnet *ifp)
644 {
645 struct dwc_gmac_softc * const sc = ifp->if_softc;
646 struct mii_data * const mii = &sc->sc_mii;
647 uint32_t conf;
648
649 /*
650 * Set MII or GMII interface based on the speed
651 * negotiated by the PHY.
652 */
653 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
654 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
655 |AWIN_GMAC_MAC_CONF_FULLDPLX);
656 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST | AWIN_GMAC_MAC_CONF_TXENABLE
657 | AWIN_GMAC_MAC_CONF_RXENABLE;
658 switch (IFM_SUBTYPE(mii->mii_media_active)) {
659 case IFM_10_T:
660 break;
661 case IFM_100_TX:
662 conf |= AWIN_GMAC_MAC_CONF_FES100;
663 break;
664 case IFM_1000_T:
665 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
666 break;
667 }
668 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
669 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
670
671 #ifdef DWC_GMAC_DEBUG
672 aprint_normal_dev(sc->sc_dev,
673 "setting MAC conf register: %08x\n", conf);
674 #endif
675
676 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
677 AWIN_GMAC_MAC_CONF, conf);
678 }
679
680 static int
681 dwc_gmac_init(struct ifnet *ifp)
682 {
683 struct dwc_gmac_softc *sc = ifp->if_softc;
684
685 if (ifp->if_flags & IFF_RUNNING)
686 return 0;
687
688 dwc_gmac_stop(ifp, 0);
689
690 /*
691 * Set up dma pointer for RX and TX ring
692 */
693 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
694 sc->sc_rxq.r_physaddr);
695 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
696 sc->sc_txq.t_physaddr);
697
698 /*
699 * Start RX part
700 */
701 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
702 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
703
704 ifp->if_flags |= IFF_RUNNING;
705 ifp->if_flags &= ~IFF_OACTIVE;
706
707 return 0;
708 }
709
710 static void
711 dwc_gmac_start(struct ifnet *ifp)
712 {
713 struct dwc_gmac_softc *sc = ifp->if_softc;
714 int old = sc->sc_txq.t_queued;
715 struct mbuf *m0;
716
717 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
718 return;
719
720 for (;;) {
721 IFQ_POLL(&ifp->if_snd, m0);
722 if (m0 == NULL)
723 break;
724 if (dwc_gmac_queue(sc, m0) != 0) {
725 ifp->if_flags |= IFF_OACTIVE;
726 break;
727 }
728 IFQ_DEQUEUE(&ifp->if_snd, m0);
729 bpf_mtap(ifp, m0);
730 }
731
732 if (sc->sc_txq.t_queued != old) {
733 /* packets have been queued, kick it off */
734 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
735 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
736
737 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
738 AWIN_GMAC_DMA_OPMODE,
739 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
740 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
741 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
742 AWIN_GMAC_DMA_OPMODE,
743 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
744 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
745 }
746 }
747
748 static void
749 dwc_gmac_stop(struct ifnet *ifp, int disable)
750 {
751 struct dwc_gmac_softc *sc = ifp->if_softc;
752
753 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
754 AWIN_GMAC_DMA_OPMODE,
755 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
756 AWIN_GMAC_DMA_OPMODE)
757 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
758 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
759 AWIN_GMAC_DMA_OPMODE,
760 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
761 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
762
763 mii_down(&sc->sc_mii);
764 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
765 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
766 }
767
768 /*
769 * Add m0 to the TX ring
770 */
771 static int
772 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
773 {
774 struct dwc_gmac_dev_dmadesc *desc = NULL;
775 struct dwc_gmac_tx_data *data = NULL;
776 bus_dmamap_t map;
777 uint32_t flags, len;
778 int error, i, first;
779
780 #ifdef DWC_GMAC_DEBUG
781 aprint_normal_dev(sc->sc_dev,
782 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
783 #endif
784
785 first = sc->sc_txq.t_cur;
786 map = sc->sc_txq.t_data[first].td_map;
787 flags = 0;
788
789 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
790 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
791 if (error != 0) {
792 aprint_error_dev(sc->sc_dev, "could not map mbuf "
793 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
794 return error;
795 }
796
797 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
798 bus_dmamap_unload(sc->sc_dmat, map);
799 return ENOBUFS;
800 }
801
802 data = NULL;
803 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
804 for (i = 0; i < map->dm_nsegs; i++) {
805 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
806 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
807
808 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
809 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
810 if (i == map->dm_nsegs-1)
811 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
812
813 #ifdef DWC_GMAC_DEBUG
814 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
815 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
816 (unsigned long)map->dm_segs[i].ds_addr,
817 (unsigned long)map->dm_segs[i].ds_len,
818 flags, len);
819 #endif
820
821 desc->ddesc_cntl = htole32(len|flags);
822 flags &= ~DDESC_CNTL_TXFIRST;
823
824 /*
825 * Defer passing ownership of the first descriptor
826 * untill we are done.
827 */
828 if (i)
829 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
830
831 sc->sc_txq.t_queued++;
832 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
833 }
834
835 /* Pass first to device */
836 sc->sc_txq.t_desc[first].ddesc_status
837 = htole32(DDESC_STATUS_OWNEDBYDEV);
838
839 data->td_m = m0;
840 data->td_active = map;
841
842 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
843 BUS_DMASYNC_PREWRITE);
844
845 return 0;
846 }
847
848 static int
849 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
850 {
851 // struct dwc_gmac_softc *sc = ifp->if_softc;
852 struct ifaddr *ifa = (struct ifaddr *)data;
853 int s, error = 0;
854
855 s = splnet();
856
857 switch (cmd) {
858 case SIOCINITIFADDR:
859 ifp->if_flags |= IFF_UP;
860 dwc_gmac_init(ifp);
861 switch (ifa->ifa_addr->sa_family) {
862 #ifdef INET
863 case AF_INET:
864 arp_ifinit(ifp, ifa);
865 break;
866 #endif
867 default:
868 break;
869 }
870 default:
871 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
872 break;
873 error = 0;
874 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
875 ;
876 else if (ifp->if_flags & IFF_RUNNING)
877 /* setmulti */;
878 break;
879 }
880
881 splx(s);
882
883 return error;
884 }
885
886 static void
887 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
888 {
889 struct dwc_gmac_tx_data *data;
890 struct dwc_gmac_dev_dmadesc *desc;
891 uint32_t flags;
892 int i;
893
894 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
895 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
896
897 #ifdef DWC_GMAC_DEBUG
898 aprint_normal_dev(sc->sc_dev,
899 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
900 i, sc->sc_txq.t_queued);
901 #endif
902
903 desc = &sc->sc_txq.t_desc[i];
904 dwc_gmac_txdesc_sync(sc, i, i+1,
905 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
906 flags = le32toh(desc->ddesc_status);
907 if (flags & DDESC_STATUS_OWNEDBYDEV)
908 break;
909 data = &sc->sc_txq.t_data[i];
910 if (data->td_m == NULL)
911 continue;
912 sc->sc_ec.ec_if.if_opackets++;
913 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
914 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
915 bus_dmamap_unload(sc->sc_dmat, data->td_active);
916
917 #ifdef DWC_GMAC_DEBUG
918 aprint_normal_dev(sc->sc_dev,
919 "dwc_gmac_tx_intr: done with packet at desc #%d, "
920 "freeing mbuf %p\n", i, data->td_m);
921 #endif
922
923 m_freem(data->td_m);
924 data->td_m = NULL;
925 }
926
927 sc->sc_txq.t_next = i;
928
929 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
930 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
931 }
932 }
933
934 static void
935 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
936 {
937 #ifdef DWC_GMAC_DEBUG
938 aprint_normal_dev(sc->sc_dev, "rx intr\n");
939 /* XXX */
940 #endif
941 }
942
943 int
944 dwc_gmac_intr(struct dwc_gmac_softc *sc)
945 {
946 uint32_t status, dma_status;
947 int rv = 0;
948
949 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
950 if (status & AWIN_GMAC_MII_IRQ) {
951 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
952 AWIN_GMAC_MII_STATUS);
953 rv = 1;
954 mii_pollstat(&sc->sc_mii);
955 }
956
957 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
958 AWIN_GMAC_DMA_STATUS);
959
960 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
961 rv = 1;
962
963 if (dma_status & GMAC_DMA_INT_TIE)
964 dwc_gmac_tx_intr(sc);
965
966 if (dma_status & GMAC_DMA_INT_RIE)
967 dwc_gmac_rx_intr(sc);
968
969 /*
970 * Check error conditions
971 */
972 if (dma_status & GMAC_DMA_INT_ERRORS) {
973 sc->sc_ec.ec_if.if_oerrors++;
974 #ifdef DWC_GMAC_DEBUG
975 dwc_dump_and_abort(sc, "interrupt error condition");
976 #endif
977 }
978
979 /* ack interrupt */
980 if (dma_status)
981 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
982 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
983
984 return rv;
985 }
986
987 #ifdef DWC_GMAC_DEBUG
988 static void
989 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
990 {
991 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
992 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
993 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
994 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
995 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
996 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
997 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
998 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
999 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1000 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1001 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1002 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1003 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1004 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1005 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1006 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1007 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1008 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1009 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1010 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1011 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1012 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1013 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1014 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1015 }
1016
1017 static void
1018 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1019 {
1020 int i;
1021
1022 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1023 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1024 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1025 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1026 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1027 aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
1028 i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
1029 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1030 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1031
1032 }
1033 }
1034
1035 static void
1036 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1037 {
1038 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1039 AWIN_GMAC_MAC_INTR);
1040 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1041 AWIN_GMAC_DMA_STATUS);
1042 char buf[200];
1043
1044 /* print interrupt state */
1045 snprintb(buf, sizeof(buf), "\177\20"
1046 "b\x10""NIE\0"
1047 "b\x0f""AIE\0"
1048 "b\x0e""ERE\0"
1049 "b\x0d""FBE\0"
1050 "b\x0a""ETE\0"
1051 "b\x09""RWE\0"
1052 "b\x08""RSE\0"
1053 "b\x07""RUE\0"
1054 "b\x06""RIE\0"
1055 "b\x05""UNE\0"
1056 "b\x04""OVE\0"
1057 "b\x03""TJE\0"
1058 "b\x02""TUE\0"
1059 "b\x01""TSE\0"
1060 "b\x00""TIE\0"
1061 "\0", dma_status);
1062 printf("%s: INTR status: %08x, DMA status: %s\n",
1063 device_xname(sc->sc_dev),
1064 status, buf);
1065
1066 dwc_gmac_dump_dma(sc);
1067 dwc_gmac_dump_tx_desc(sc);
1068
1069 panic(msg);
1070 }
1071 #endif
1072