lfs_bio.c revision 1.50 1 /* $NetBSD: lfs_bio.c,v 1.50 2002/12/22 17:31:52 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.50 2002/12/22 17:31:52 yamt Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/proc.h>
79 #include <sys/buf.h>
80 #include <sys/vnode.h>
81 #include <sys/resourcevar.h>
82 #include <sys/mount.h>
83 #include <sys/kernel.h>
84
85 #include <ufs/ufs/inode.h>
86 #include <ufs/ufs/ufsmount.h>
87 #include <ufs/ufs/ufs_extern.h>
88
89 #include <sys/malloc.h>
90 #include <ufs/lfs/lfs.h>
91 #include <ufs/lfs/lfs_extern.h>
92
93 /* Macros to clear/set/test flags. */
94 # define SET(t, f) (t) |= (f)
95 # define CLR(t, f) (t) &= ~(f)
96 # define ISSET(t, f) ((t) & (f))
97
98 /*
99 * LFS block write function.
100 *
101 * XXX
102 * No write cost accounting is done.
103 * This is almost certainly wrong for synchronous operations and NFS.
104 */
105 int locked_queue_count = 0; /* XXX Count of locked-down buffers. */
106 long locked_queue_bytes = 0L; /* XXX Total size of locked buffers. */
107 int lfs_writing = 0; /* Set if already kicked off a writer
108 because of buffer space */
109 extern int lfs_dostats;
110
111 /*
112 * Try to reserve some blocks, prior to performing a sensitive operation that
113 * requires the vnode lock to be honored. If there is not enough space, give
114 * up the vnode lock temporarily and wait for the space to become available.
115 *
116 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
117 *
118 * XXX YAMT - it isn't safe to unlock vp here
119 * because the node might be modified while we sleep.
120 * (eg. cached states like i_offset might be stale,
121 * the vnode might be truncated, etc..)
122 * maybe we should have a way to restart the vnode op. (EVOPRESTART?)
123 *
124 * XXX YAMT - we unlock the vnode so that cleaner can lock it.
125 * but it isn't enough. eg. for VOP_REMOVE, we should unlock the vnode that
126 * is going to be removed as well.
127 */
128 int
129 lfs_reserve(struct lfs *fs, struct vnode *vp, int fsb)
130 {
131 CLEANERINFO *cip;
132 struct buf *bp;
133 int error, slept;
134
135 slept = 0;
136 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail) &&
137 vp != fs->lfs_unlockvp) {
138 #if 0
139 /*
140 * XXX ideally, we should unlock vnodes here
141 * because we might sleep very long time.
142 */
143 VOP_UNLOCK(vp, 0);
144 #else
145 /*
146 * XXX since we'll sleep for cleaner with vnode lock holding,
147 * deadlock will occur if cleaner wants to acquire the vnode
148 * lock.
149 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
150 */
151 #endif
152
153 if (!slept) {
154 #ifdef DEBUG
155 printf("lfs_reserve: waiting for %ld (bfree = %d,"
156 " est_bfree = %d)\n",
157 fsb + fs->lfs_ravail, fs->lfs_bfree,
158 LFS_EST_BFREE(fs));
159 #endif
160 }
161 ++slept;
162
163 /* Wake up the cleaner */
164 LFS_CLEANERINFO(cip, fs, bp);
165 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
166 wakeup(&lfs_allclean_wakeup);
167 wakeup(&fs->lfs_nextseg);
168
169 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
170 0);
171 #if 0
172 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
173 #endif
174 if (error)
175 return error;
176 }
177 #ifdef DEBUG
178 if (slept)
179 printf("lfs_reserve: woke up\n");
180 #endif
181 fs->lfs_ravail += fsb;
182 return 0;
183 }
184
185 /*
186 *
187 * XXX we don't let meta-data writes run out of space because they can
188 * come from the segment writer. We need to make sure that there is
189 * enough space reserved so that there's room to write meta-data
190 * blocks.
191 *
192 * Also, we don't let blocks that have come to us from the cleaner
193 * run out of space.
194 */
195 #define CANT_WAIT(BP,F) (IS_IFILE((BP)) || (BP)->b_lblkno < 0 || ((F) & BW_CLEAN))
196
197 int
198 lfs_bwrite(void *v)
199 {
200 struct vop_bwrite_args /* {
201 struct buf *a_bp;
202 } */ *ap = v;
203 struct buf *bp = ap->a_bp;
204
205 #ifdef DIAGNOSTIC
206 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
207 panic("bawrite LFS buffer");
208 }
209 #endif /* DIAGNOSTIC */
210 return lfs_bwrite_ext(bp,0);
211 }
212
213 /*
214 * Determine if there is enough room currently available to write fsb
215 * blocks. We need enough blocks for the new blocks, the current
216 * inode blocks (including potentially the ifile inode), a summary block,
217 * and the segment usage table, plus an ifile block.
218 */
219 int
220 lfs_fits(struct lfs *fs, int fsb)
221 {
222 int needed;
223
224 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
225 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
226 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
227
228 if (needed >= fs->lfs_avail) {
229 #ifdef DEBUG
230 printf("lfs_fits: no fit: fsb = %d, uinodes = %d, "
231 "needed = %d, avail = %d\n",
232 fsb, fs->lfs_uinodes, needed, fs->lfs_avail);
233 #endif
234 return 0;
235 }
236 return 1;
237 }
238
239 int
240 lfs_availwait(struct lfs *fs, int fsb)
241 {
242 int error;
243 CLEANERINFO *cip;
244 struct buf *cbp;
245
246 while (!lfs_fits(fs, fsb)) {
247 /*
248 * Out of space, need cleaner to run.
249 * Update the cleaner info, then wake it up.
250 * Note the cleanerinfo block is on the ifile
251 * so it CANT_WAIT.
252 */
253 LFS_CLEANERINFO(cip, fs, cbp);
254 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
255
256 printf("lfs_availwait: out of available space, "
257 "waiting on cleaner\n");
258
259 wakeup(&lfs_allclean_wakeup);
260 wakeup(&fs->lfs_nextseg);
261 #ifdef DIAGNOSTIC
262 if (fs->lfs_seglock && fs->lfs_lockpid == curproc->p_pid)
263 panic("lfs_availwait: deadlock");
264 #endif
265 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
266 if (error)
267 return (error);
268 }
269 return 0;
270 }
271
272 int
273 lfs_bwrite_ext(struct buf *bp, int flags)
274 {
275 struct lfs *fs;
276 struct inode *ip;
277 int fsb, error, s;
278
279 KASSERT(bp->b_flags & B_BUSY);
280 KASSERT(flags & BW_CLEAN || !(bp->b_flags & B_CALL));
281
282 /*
283 * Don't write *any* blocks if we're mounted read-only.
284 * In particular the cleaner can't write blocks either.
285 */
286 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly) {
287 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
288 LFS_UNLOCK_BUF(bp);
289 if (bp->b_flags & B_CALL)
290 bp->b_flags &= ~B_BUSY;
291 else
292 brelse(bp);
293 return EROFS;
294 }
295
296 /*
297 * Set the delayed write flag and use reassignbuf to move the buffer
298 * from the clean list to the dirty one.
299 *
300 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
301 * the buffer onto the LOCKED free list. This is necessary, otherwise
302 * getnewbuf() would try to reclaim the buffers using bawrite, which
303 * isn't going to work.
304 *
305 * XXX we don't let meta-data writes run out of space because they can
306 * come from the segment writer. We need to make sure that there is
307 * enough space reserved so that there's room to write meta-data
308 * blocks.
309 */
310 if (!(bp->b_flags & B_LOCKED)) {
311 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
312 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
313 if (!CANT_WAIT(bp, flags)) {
314 if ((error = lfs_availwait(fs, fsb)) != 0) {
315 brelse(bp);
316 return error;
317 }
318 }
319
320 ip = VTOI(bp->b_vp);
321 if (flags & BW_CLEAN) {
322 LFS_SET_UINO(ip, IN_CLEANING);
323 } else {
324 LFS_SET_UINO(ip, IN_MODIFIED);
325 if (bp->b_lblkno >= 0)
326 LFS_SET_UINO(ip, IN_UPDATE);
327 }
328 fs->lfs_avail -= fsb;
329 bp->b_flags |= B_DELWRI;
330
331 LFS_LOCK_BUF(bp);
332 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
333 s = splbio();
334 reassignbuf(bp, bp->b_vp);
335 splx(s);
336 }
337
338 if (bp->b_flags & B_CALL)
339 bp->b_flags &= ~B_BUSY;
340 else
341 brelse(bp);
342
343 return (0);
344 }
345
346 void
347 lfs_flush_fs(struct lfs *fs, int flags)
348 {
349 if (fs->lfs_ronly == 0 && fs->lfs_dirops == 0)
350 {
351 /* disallow dirops during flush */
352 fs->lfs_writer++;
353
354 /*
355 * We set the queue to 0 here because we
356 * are about to write all the dirty
357 * buffers we have. If more come in
358 * while we're writing the segment, they
359 * may not get written, so we want the
360 * count to reflect these new writes
361 * after the segwrite completes.
362 */
363 if (lfs_dostats)
364 ++lfs_stats.flush_invoked;
365 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
366
367 /* XXX KS - allow dirops again */
368 if (--fs->lfs_writer == 0)
369 wakeup(&fs->lfs_dirops);
370 }
371 }
372
373 /*
374 * XXX
375 * This routine flushes buffers out of the B_LOCKED queue when LFS has too
376 * many locked down. Eventually the pageout daemon will simply call LFS
377 * when pages need to be reclaimed. Note, we have one static count of locked
378 * buffers, so we can't have more than a single file system. To make this
379 * work for multiple file systems, put the count into the mount structure.
380 */
381 void
382 lfs_flush(struct lfs *fs, int flags)
383 {
384 struct mount *mp, *nmp;
385
386 if (lfs_dostats)
387 ++lfs_stats.write_exceeded;
388 if (lfs_writing && flags == 0) {/* XXX flags */
389 #ifdef DEBUG_LFS
390 printf("lfs_flush: not flushing because another flush is active\n");
391 #endif
392 return;
393 }
394 lfs_writing = 1;
395
396 simple_lock(&mountlist_slock);
397 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
398 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
399 nmp = mp->mnt_list.cqe_next;
400 continue;
401 }
402 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS, MFSNAMELEN) == 0)
403 lfs_flush_fs(((struct ufsmount *)mp->mnt_data)->ufsmount_u.lfs, flags);
404 simple_lock(&mountlist_slock);
405 nmp = mp->mnt_list.cqe_next;
406 vfs_unbusy(mp);
407 }
408 simple_unlock(&mountlist_slock);
409
410 LFS_DEBUG_COUNTLOCKED("flush");
411
412 lfs_writing = 0;
413 }
414
415 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
416 #define INOBYTES(fs) ((fs)->lfs_uinodes * DINODE_SIZE)
417
418 int
419 lfs_check(struct vnode *vp, ufs_daddr_t blkno, int flags)
420 {
421 int error;
422 struct lfs *fs;
423 struct inode *ip;
424 extern int lfs_dirvcount;
425
426 error = 0;
427 ip = VTOI(vp);
428
429 /* If out of buffers, wait on writer */
430 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
431 if (ip->i_number == LFS_IFILE_INUM)
432 return 0;
433 /* If we're being called from inside a dirop, don't sleep */
434 if (ip->i_flag & IN_ADIROP)
435 return 0;
436
437 fs = ip->i_lfs;
438
439 /*
440 * If we would flush below, but dirops are active, sleep.
441 * Note that a dirop cannot ever reach this code!
442 */
443 while (fs->lfs_dirops > 0 &&
444 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
445 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
446 lfs_dirvcount > LFS_MAXDIROP || fs->lfs_diropwait > 0))
447 {
448 ++fs->lfs_diropwait;
449 tsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0);
450 --fs->lfs_diropwait;
451 }
452
453 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
454 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
455 lfs_dirvcount > LFS_MAXDIROP || fs->lfs_diropwait > 0)
456 {
457 ++fs->lfs_writer;
458 lfs_flush(fs, flags);
459 if (--fs->lfs_writer == 0)
460 wakeup(&fs->lfs_dirops);
461 }
462
463 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS
464 || locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES)
465 {
466 if (lfs_dostats)
467 ++lfs_stats.wait_exceeded;
468 #ifdef DEBUG_LFS
469 printf("lfs_check: waiting: count=%d, bytes=%ld\n",
470 locked_queue_count, locked_queue_bytes);
471 #endif
472 error = tsleep(&locked_queue_count, PCATCH | PUSER,
473 "buffers", hz * LFS_BUFWAIT);
474 if (error != EWOULDBLOCK)
475 break;
476 /*
477 * lfs_flush might not flush all the buffers, if some of the
478 * inodes were locked or if most of them were Ifile blocks
479 * and we weren't asked to checkpoint. Try flushing again
480 * to keep us from blocking indefinitely.
481 */
482 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
483 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
484 {
485 ++fs->lfs_writer;
486 lfs_flush(fs, flags | SEGM_CKP);
487 if (--fs->lfs_writer == 0)
488 wakeup(&fs->lfs_dirops);
489 }
490 }
491 return (error);
492 }
493
494 /*
495 * Allocate a new buffer header.
496 */
497 #ifdef MALLOCLOG
498 # define DOMALLOC(S, T, F) _malloc((S), (T), (F), file, line)
499 struct buf *
500 lfs_newbuf_malloclog(struct lfs *fs, struct vnode *vp, ufs_daddr_t daddr, size_t size, char *file, int line)
501 #else
502 # define DOMALLOC(S, T, F) malloc((S), (T), (F))
503 struct buf *
504 lfs_newbuf(struct lfs *fs, struct vnode *vp, ufs_daddr_t daddr, size_t size)
505 #endif
506 {
507 struct buf *bp;
508 size_t nbytes;
509 int s;
510
511 nbytes = roundup(size, fsbtob(fs, 1));
512
513 bp = DOMALLOC(sizeof(struct buf), M_SEGMENT, M_WAITOK);
514 bzero(bp, sizeof(struct buf));
515 if (nbytes) {
516 bp->b_data = DOMALLOC(nbytes, M_SEGMENT, M_WAITOK);
517 bzero(bp->b_data, nbytes);
518 }
519 #ifdef DIAGNOSTIC
520 if (vp == NULL)
521 panic("vp is NULL in lfs_newbuf");
522 if (bp == NULL)
523 panic("bp is NULL after malloc in lfs_newbuf");
524 #endif
525 s = splbio();
526 bgetvp(vp, bp);
527 splx(s);
528
529 bp->b_saveaddr = (caddr_t)fs;
530 bp->b_bufsize = size;
531 bp->b_bcount = size;
532 bp->b_lblkno = daddr;
533 bp->b_blkno = daddr;
534 bp->b_error = 0;
535 bp->b_resid = 0;
536 bp->b_iodone = lfs_callback;
537 bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE;
538
539 return (bp);
540 }
541
542 #ifdef MALLOCLOG
543 # define DOFREE(A, T) _free((A), (T), file, line)
544 void
545 lfs_freebuf_malloclog(struct buf *bp, char *file, int line)
546 #else
547 # define DOFREE(A, T) free((A), (T))
548 void
549 lfs_freebuf(struct buf *bp)
550 #endif
551 {
552 int s;
553
554 s = splbio();
555 if (bp->b_vp)
556 brelvp(bp);
557 splx(s);
558 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
559 DOFREE(bp->b_data, M_SEGMENT);
560 bp->b_data = NULL;
561 }
562 DOFREE(bp, M_SEGMENT);
563 }
564
565 /*
566 * Definitions for the buffer free lists.
567 */
568 #define BQUEUES 4 /* number of free buffer queues */
569
570 #define BQ_LOCKED 0 /* super-blocks &c */
571 #define BQ_LRU 1 /* lru, useful buffers */
572 #define BQ_AGE 2 /* rubbish */
573 #define BQ_EMPTY 3 /* buffer headers with no memory */
574
575 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
576
577 /*
578 * Return a count of buffers on the "locked" queue.
579 * Don't count malloced buffers, since they don't detract from the total.
580 */
581 void
582 lfs_countlocked(int *count, long *bytes, char *msg)
583 {
584 struct buf *bp;
585 int n = 0;
586 long int size = 0L;
587
588 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
589 bp = bp->b_freelist.tqe_next) {
590 if (bp->b_flags & B_CALL) /* Malloced buffer */
591 continue;
592 n++;
593 size += bp->b_bufsize;
594 #ifdef DEBUG_LOCKED_LIST
595 if (n > nbuf)
596 panic("lfs_countlocked: this can't happen: more"
597 " buffers locked than exist");
598 #endif
599 }
600 #ifdef DEBUG_LOCKED_LIST
601 /* Theoretically this function never really does anything */
602 if (n != *count)
603 printf("lfs_countlocked: %s: adjusted buf count from %d to %d\n",
604 msg, *count, n);
605 if (size != *bytes)
606 printf("lfs_countlocked: %s: adjusted byte count from %ld to %ld\n",
607 msg, *bytes, size);
608 #endif
609 *count = n;
610 *bytes = size;
611 return;
612 }
613