isadma_machdep.c revision 1.4 1 /* $NetBSD: isadma_machdep.c,v 1.4 2003/03/23 14:12:26 chris Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 * NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.4 2003/03/23 14:12:26 chris Exp $");
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/syslog.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50 #include <sys/proc.h>
51 #include <sys/mbuf.h>
52
53 #define _ARM32_BUS_DMA_PRIVATE
54 #include <machine/bus.h>
55
56 #include <dev/isa/isareg.h>
57 #include <dev/isa/isavar.h>
58
59 #include <uvm/uvm_extern.h>
60
61 /*
62 * ISA has a 24-bit address limitation, so at most it has a 16M
63 * DMA range. However, some platforms have a more limited range,
64 * e.g. the Shark NC. On these systems, we are provided with
65 * a set of DMA ranges. The pmap module is aware of these ranges
66 * and places DMA-safe memory for them onto an alternate free list
67 * so that they are protected from being used to service page faults,
68 * etc. (unless we've run out of memory elsewhere).
69 */
70 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
71
72 struct arm32_dma_range *footbridge_isa_dma_ranges;
73 int footbridge_isa_dma_nranges;
74
75 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
76 bus_size_t, bus_size_t, int, bus_dmamap_t *));
77 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
78 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
79 bus_size_t, struct proc *, int));
80 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
81 struct mbuf *, int));
82 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
83 struct uio *, int));
84 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
85 bus_dma_segment_t *, int, bus_size_t, int));
86 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
87 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
88 bus_addr_t, bus_size_t, int));
89
90 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
91 bus_size_t, bus_dma_segment_t *, int, int *, int));
92
93 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
94 bus_size_t, int));
95 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
96
97 /*
98 * Entry points for ISA DMA. These are mostly wrappers around
99 * the generic functions that understand how to deal with bounce
100 * buffers, if necessary.
101 */
102 struct arm32_bus_dma_tag isa_bus_dma_tag = {
103 0, /* _ranges */
104 0, /* _nranges */
105 _isa_bus_dmamap_create,
106 _isa_bus_dmamap_destroy,
107 _isa_bus_dmamap_load,
108 _isa_bus_dmamap_load_mbuf,
109 _isa_bus_dmamap_load_uio,
110 _isa_bus_dmamap_load_raw,
111 _isa_bus_dmamap_unload,
112 _isa_bus_dmamap_sync, /* pre */
113 _isa_bus_dmamap_sync, /* post */
114 _isa_bus_dmamem_alloc,
115 _bus_dmamem_free,
116 _bus_dmamem_map,
117 _bus_dmamem_unmap,
118 _bus_dmamem_mmap,
119 };
120
121 /*
122 * Initialize ISA DMA.
123 */
124 void
125 isa_dma_init()
126 {
127
128 isa_bus_dma_tag._ranges = footbridge_isa_dma_ranges;
129 isa_bus_dma_tag._nranges = footbridge_isa_dma_nranges;
130 }
131
132 /**********************************************************************
133 * bus.h dma interface entry points
134 **********************************************************************/
135
136 #ifdef ISA_DMA_STATS
137 #define STAT_INCR(v) (v)++
138 #define STAT_DECR(v) do { \
139 if ((v) == 0) \
140 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
141 else \
142 (v)--; \
143 } while (0)
144 u_long isa_dma_stats_loads;
145 u_long isa_dma_stats_bounces;
146 u_long isa_dma_stats_nbouncebufs;
147 #else
148 #define STAT_INCR(v)
149 #define STAT_DECR(v)
150 #endif
151
152 /*
153 * Create an ISA DMA map.
154 */
155 int
156 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
157 bus_dma_tag_t t;
158 bus_size_t size;
159 int nsegments;
160 bus_size_t maxsegsz;
161 bus_size_t boundary;
162 int flags;
163 bus_dmamap_t *dmamp;
164 {
165 struct arm32_isa_dma_cookie *cookie;
166 bus_dmamap_t map;
167 int error, cookieflags;
168 void *cookiestore;
169 size_t cookiesize;
170
171 /* Call common function to create the basic map. */
172 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
173 flags, dmamp);
174 if (error)
175 return (error);
176
177 map = *dmamp;
178 map->_dm_cookie = NULL;
179
180 cookiesize = sizeof(struct arm32_isa_dma_cookie);
181
182 /*
183 * ISA only has 24-bits of address space. This means
184 * we can't DMA to pages over 16M. In order to DMA to
185 * arbitrary buffers, we use "bounce buffers" - pages
186 * in memory below the 16M boundary. On DMA reads,
187 * DMA happens to the bounce buffers, and is copied into
188 * the caller's buffer. On writes, data is copied into
189 * but bounce buffer, and the DMA happens from those
190 * pages. To software using the DMA mapping interface,
191 * this looks simply like a data cache.
192 *
193 * If we have more than 16M of RAM in the system, we may
194 * need bounce buffers. We check and remember that here.
195 *
196 * There are exceptions, however. VLB devices can do
197 * 32-bit DMA, and indicate that here.
198 *
199 * ...or, there is an opposite case. The most segments
200 * a transfer will require is (maxxfer / NBPG) + 1. If
201 * the caller can't handle that many segments (e.g. the
202 * ISA DMA controller), we may have to bounce it as well.
203 *
204 * Well, not really... see note above regarding DMA ranges.
205 * Because of the range issue on this platform, we just
206 * always "might bounce".
207 */
208 cookieflags = ID_MIGHT_NEED_BOUNCE;
209 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
210
211 /*
212 * Allocate our cookie.
213 */
214 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
215 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
216 error = ENOMEM;
217 goto out;
218 }
219 memset(cookiestore, 0, cookiesize);
220 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
221 cookie->id_flags = cookieflags;
222 map->_dm_cookie = cookie;
223
224 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
225 /*
226 * Allocate the bounce pages now if the caller
227 * wishes us to do so.
228 */
229 if ((flags & BUS_DMA_ALLOCNOW) == 0)
230 goto out;
231
232 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
233 }
234
235 out:
236 if (error) {
237 if (map->_dm_cookie != NULL)
238 free(map->_dm_cookie, M_DMAMAP);
239 _bus_dmamap_destroy(t, map);
240 }
241 return (error);
242 }
243
244 /*
245 * Destroy an ISA DMA map.
246 */
247 void
248 _isa_bus_dmamap_destroy(t, map)
249 bus_dma_tag_t t;
250 bus_dmamap_t map;
251 {
252 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
253
254 /*
255 * Free any bounce pages this map might hold.
256 */
257 if (cookie->id_flags & ID_HAS_BOUNCE)
258 _isa_dma_free_bouncebuf(t, map);
259
260 free(cookie, M_DMAMAP);
261 _bus_dmamap_destroy(t, map);
262 }
263
264 /*
265 * Load an ISA DMA map with a linear buffer.
266 */
267 int
268 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
269 bus_dma_tag_t t;
270 bus_dmamap_t map;
271 void *buf;
272 bus_size_t buflen;
273 struct proc *p;
274 int flags;
275 {
276 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
277 int error;
278
279 STAT_INCR(isa_dma_stats_loads);
280
281 /*
282 * Make sure that on error condition we return "no valid mappings."
283 */
284 map->dm_mapsize = 0;
285 map->dm_nsegs = 0;
286
287 /*
288 * Try to load the map the normal way. If this errors out,
289 * and we can bounce, we will.
290 */
291 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
292 if (error == 0 ||
293 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
294 return (error);
295
296 /*
297 * First attempt failed; bounce it.
298 */
299
300 STAT_INCR(isa_dma_stats_bounces);
301
302 /*
303 * Allocate bounce pages, if necessary.
304 */
305 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
306 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
307 if (error)
308 return (error);
309 }
310
311 /*
312 * Cache a pointer to the caller's buffer and load the DMA map
313 * with the bounce buffer.
314 */
315 cookie->id_origbuf = buf;
316 cookie->id_origbuflen = buflen;
317 cookie->id_buftype = ID_BUFTYPE_LINEAR;
318 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
319 NULL, flags);
320 if (error) {
321 /*
322 * Free the bounce pages, unless our resources
323 * are reserved for our exclusive use.
324 */
325 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
326 _isa_dma_free_bouncebuf(t, map);
327 return (error);
328 }
329
330 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
331 cookie->id_flags |= ID_IS_BOUNCING;
332 return (0);
333 }
334
335 /*
336 * Like _isa_bus_dmamap_load(), but for mbufs.
337 */
338 int
339 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
340 bus_dma_tag_t t;
341 bus_dmamap_t map;
342 struct mbuf *m0;
343 int flags;
344 {
345 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
346 int error;
347
348 /*
349 * Make sure that on error condition we return "no valid mappings."
350 */
351 map->dm_mapsize = 0;
352 map->dm_nsegs = 0;
353
354 #ifdef DIAGNOSTIC
355 if ((m0->m_flags & M_PKTHDR) == 0)
356 panic("_isa_bus_dmamap_load_mbuf: no packet header");
357 #endif
358
359 if (m0->m_pkthdr.len > map->_dm_size)
360 return (EINVAL);
361
362 /*
363 * Try to load the map the normal way. If this errors out,
364 * and we can bounce, we will.
365 */
366 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
367 if (error == 0 ||
368 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
369 return (error);
370
371 /*
372 * First attempt failed; bounce it.
373 */
374
375 STAT_INCR(isa_dma_stats_bounces);
376
377 /*
378 * Allocate bounce pages, if necessary.
379 */
380 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
381 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
382 flags);
383 if (error)
384 return (error);
385 }
386
387 /*
388 * Cache a pointer to the caller's buffer and load the DMA map
389 * with the bounce buffer.
390 */
391 cookie->id_origbuf = m0;
392 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
393 cookie->id_buftype = ID_BUFTYPE_MBUF;
394 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
395 m0->m_pkthdr.len, NULL, flags);
396 if (error) {
397 /*
398 * Free the bounce pages, unless our resources
399 * are reserved for our exclusive use.
400 */
401 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
402 _isa_dma_free_bouncebuf(t, map);
403 return (error);
404 }
405
406 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
407 cookie->id_flags |= ID_IS_BOUNCING;
408 return (0);
409 }
410
411 /*
412 * Like _isa_bus_dmamap_load(), but for uios.
413 */
414 int
415 _isa_bus_dmamap_load_uio(t, map, uio, flags)
416 bus_dma_tag_t t;
417 bus_dmamap_t map;
418 struct uio *uio;
419 int flags;
420 {
421
422 panic("_isa_bus_dmamap_load_uio: not implemented");
423 }
424
425 /*
426 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
427 * bus_dmamem_alloc().
428 */
429 int
430 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
431 bus_dma_tag_t t;
432 bus_dmamap_t map;
433 bus_dma_segment_t *segs;
434 int nsegs;
435 bus_size_t size;
436 int flags;
437 {
438
439 panic("_isa_bus_dmamap_load_raw: not implemented");
440 }
441
442 /*
443 * Unload an ISA DMA map.
444 */
445 void
446 _isa_bus_dmamap_unload(t, map)
447 bus_dma_tag_t t;
448 bus_dmamap_t map;
449 {
450 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
451
452 /*
453 * If we have bounce pages, free them, unless they're
454 * reserved for our exclusive use.
455 */
456 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
457 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
458 _isa_dma_free_bouncebuf(t, map);
459
460 cookie->id_flags &= ~ID_IS_BOUNCING;
461 cookie->id_buftype = ID_BUFTYPE_INVALID;
462
463 /*
464 * Do the generic bits of the unload.
465 */
466 _bus_dmamap_unload(t, map);
467 }
468
469 /*
470 * Synchronize an ISA DMA map.
471 */
472 void
473 _isa_bus_dmamap_sync(t, map, offset, len, ops)
474 bus_dma_tag_t t;
475 bus_dmamap_t map;
476 bus_addr_t offset;
477 bus_size_t len;
478 int ops;
479 {
480 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
481
482 /*
483 * Mixing PRE and POST operations is not allowed.
484 */
485 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
486 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
487 panic("_isa_bus_dmamap_sync: mix PRE and POST");
488
489 #ifdef DIAGNOSTIC
490 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
491 if (offset >= map->dm_mapsize)
492 panic("_isa_bus_dmamap_sync: bad offset");
493 if (len == 0 || (offset + len) > map->dm_mapsize)
494 panic("_isa_bus_dmamap_sync: bad length");
495 }
496 #endif
497
498 /*
499 * If we're not bouncing, just return; nothing to do.
500 */
501 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
502 return;
503
504 switch (cookie->id_buftype) {
505 case ID_BUFTYPE_LINEAR:
506 /*
507 * Nothing to do for pre-read.
508 */
509
510 if (ops & BUS_DMASYNC_PREWRITE) {
511 /*
512 * Copy the caller's buffer to the bounce buffer.
513 */
514 memcpy((char *)cookie->id_bouncebuf + offset,
515 (char *)cookie->id_origbuf + offset, len);
516 }
517
518 if (ops & BUS_DMASYNC_POSTREAD) {
519 /*
520 * Copy the bounce buffer to the caller's buffer.
521 */
522 memcpy((char *)cookie->id_origbuf + offset,
523 (char *)cookie->id_bouncebuf + offset, len);
524 }
525
526 /*
527 * Nothing to do for post-write.
528 */
529 break;
530
531 case ID_BUFTYPE_MBUF:
532 {
533 struct mbuf *m, *m0 = cookie->id_origbuf;
534 bus_size_t minlen, moff;
535
536 /*
537 * Nothing to do for pre-read.
538 */
539
540 if (ops & BUS_DMASYNC_PREWRITE) {
541 /*
542 * Copy the caller's buffer to the bounce buffer.
543 */
544 m_copydata(m0, offset, len,
545 (char *)cookie->id_bouncebuf + offset);
546 }
547
548 if (ops & BUS_DMASYNC_POSTREAD) {
549 /*
550 * Copy the bounce buffer to the caller's buffer.
551 */
552 for (moff = offset, m = m0; m != NULL && len != 0;
553 m = m->m_next) {
554 /* Find the beginning mbuf. */
555 if (moff >= m->m_len) {
556 moff -= m->m_len;
557 continue;
558 }
559
560 /*
561 * Now at the first mbuf to sync; nail
562 * each one until we have exhausted the
563 * length.
564 */
565 minlen = len < m->m_len - moff ?
566 len : m->m_len - moff;
567
568 memcpy(mtod(m, caddr_t) + moff,
569 (char *)cookie->id_bouncebuf + offset,
570 minlen);
571
572 moff = 0;
573 len -= minlen;
574 offset += minlen;
575 }
576 }
577
578 /*
579 * Nothing to do for post-write.
580 */
581 break;
582 }
583
584 case ID_BUFTYPE_UIO:
585 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
586 break;
587
588 case ID_BUFTYPE_RAW:
589 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
590 break;
591
592 case ID_BUFTYPE_INVALID:
593 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
594 break;
595
596 default:
597 printf("unknown buffer type %d\n", cookie->id_buftype);
598 panic("_isa_bus_dmamap_sync");
599 }
600 }
601
602 /*
603 * Allocate memory safe for ISA DMA.
604 */
605 int
606 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
607 bus_dma_tag_t t;
608 bus_size_t size, alignment, boundary;
609 bus_dma_segment_t *segs;
610 int nsegs;
611 int *rsegs;
612 int flags;
613 {
614
615 if (t->_ranges == NULL)
616 return (ENOMEM);
617
618 /* _bus_dmamem_alloc() does the range checks for us. */
619 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
620 rsegs, flags));
621 }
622
623 /**********************************************************************
624 * ISA DMA utility functions
625 **********************************************************************/
626
627 int
628 _isa_dma_alloc_bouncebuf(t, map, size, flags)
629 bus_dma_tag_t t;
630 bus_dmamap_t map;
631 bus_size_t size;
632 int flags;
633 {
634 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
635 int error = 0;
636
637 cookie->id_bouncebuflen = round_page(size);
638 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
639 NBPG, map->_dm_boundary, cookie->id_bouncesegs,
640 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
641 if (error)
642 goto out;
643 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
644 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
645 (caddr_t *)&cookie->id_bouncebuf, flags);
646
647 out:
648 if (error) {
649 _bus_dmamem_free(t, cookie->id_bouncesegs,
650 cookie->id_nbouncesegs);
651 cookie->id_bouncebuflen = 0;
652 cookie->id_nbouncesegs = 0;
653 } else {
654 cookie->id_flags |= ID_HAS_BOUNCE;
655 STAT_INCR(isa_dma_stats_nbouncebufs);
656 }
657
658 return (error);
659 }
660
661 void
662 _isa_dma_free_bouncebuf(t, map)
663 bus_dma_tag_t t;
664 bus_dmamap_t map;
665 {
666 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
667
668 STAT_DECR(isa_dma_stats_nbouncebufs);
669
670 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
671 cookie->id_bouncebuflen);
672 _bus_dmamem_free(t, cookie->id_bouncesegs,
673 cookie->id_nbouncesegs);
674 cookie->id_bouncebuflen = 0;
675 cookie->id_nbouncesegs = 0;
676 cookie->id_flags &= ~ID_HAS_BOUNCE;
677 }
678