spec_vnops.c revision 1.24 1 /* $NetBSD: spec_vnops.c,v 1.24 1995/07/02 07:20:50 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94
36 */
37
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/conf.h>
43 #include <sys/buf.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/vnode.h>
47 #include <sys/stat.h>
48 #include <sys/errno.h>
49 #include <sys/ioctl.h>
50 #include <sys/file.h>
51 #include <sys/disklabel.h>
52 #include <miscfs/specfs/specdev.h>
53
54 /* symbolic sleep message strings for devices */
55 char devopn[] = "devopn";
56 char devio[] = "devio";
57 char devwait[] = "devwait";
58 char devin[] = "devin";
59 char devout[] = "devout";
60 char devioc[] = "devioc";
61 char devcls[] = "devcls";
62
63 int (**spec_vnodeop_p)();
64 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
65 { &vop_default_desc, vn_default_error },
66 { &vop_lookup_desc, spec_lookup }, /* lookup */
67 { &vop_create_desc, spec_create }, /* create */
68 { &vop_mknod_desc, spec_mknod }, /* mknod */
69 { &vop_open_desc, spec_open }, /* open */
70 { &vop_close_desc, spec_close }, /* close */
71 { &vop_access_desc, spec_access }, /* access */
72 { &vop_getattr_desc, spec_getattr }, /* getattr */
73 { &vop_setattr_desc, spec_setattr }, /* setattr */
74 { &vop_read_desc, spec_read }, /* read */
75 { &vop_write_desc, spec_write }, /* write */
76 { &vop_lease_desc, spec_lease_check }, /* lease */
77 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
78 { &vop_select_desc, spec_select }, /* select */
79 { &vop_mmap_desc, spec_mmap }, /* mmap */
80 { &vop_fsync_desc, spec_fsync }, /* fsync */
81 { &vop_seek_desc, spec_seek }, /* seek */
82 { &vop_remove_desc, spec_remove }, /* remove */
83 { &vop_link_desc, spec_link }, /* link */
84 { &vop_rename_desc, spec_rename }, /* rename */
85 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
86 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
87 { &vop_symlink_desc, spec_symlink }, /* symlink */
88 { &vop_readdir_desc, spec_readdir }, /* readdir */
89 { &vop_readlink_desc, spec_readlink }, /* readlink */
90 { &vop_abortop_desc, spec_abortop }, /* abortop */
91 { &vop_inactive_desc, spec_inactive }, /* inactive */
92 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */
93 { &vop_lock_desc, spec_lock }, /* lock */
94 { &vop_unlock_desc, spec_unlock }, /* unlock */
95 { &vop_bmap_desc, spec_bmap }, /* bmap */
96 { &vop_strategy_desc, spec_strategy }, /* strategy */
97 { &vop_print_desc, spec_print }, /* print */
98 { &vop_islocked_desc, spec_islocked }, /* islocked */
99 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
100 { &vop_advlock_desc, spec_advlock }, /* advlock */
101 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */
102 { &vop_valloc_desc, spec_valloc }, /* valloc */
103 { &vop_vfree_desc, spec_vfree }, /* vfree */
104 { &vop_truncate_desc, spec_truncate }, /* truncate */
105 { &vop_update_desc, spec_update }, /* update */
106 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
107 { (struct vnodeop_desc*)NULL, (int(*)())NULL }
108 };
109 struct vnodeopv_desc spec_vnodeop_opv_desc =
110 { &spec_vnodeop_p, spec_vnodeop_entries };
111
112 /*
113 * Trivial lookup routine that always fails.
114 */
115 int
116 spec_lookup(ap)
117 struct vop_lookup_args /* {
118 struct vnode *a_dvp;
119 struct vnode **a_vpp;
120 struct componentname *a_cnp;
121 } */ *ap;
122 {
123
124 *ap->a_vpp = NULL;
125 return (ENOTDIR);
126 }
127
128 /*
129 * Open a special file.
130 */
131 /* ARGSUSED */
132 spec_open(ap)
133 struct vop_open_args /* {
134 struct vnode *a_vp;
135 int a_mode;
136 struct ucred *a_cred;
137 struct proc *a_p;
138 } */ *ap;
139 {
140 struct vnode *bvp, *vp = ap->a_vp;
141 dev_t bdev, dev = (dev_t)vp->v_rdev;
142 register int maj = major(dev);
143 int error;
144
145 /*
146 * Don't allow open if fs is mounted -nodev.
147 */
148 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
149 return (ENXIO);
150
151 switch (vp->v_type) {
152
153 case VCHR:
154 if ((u_int)maj >= nchrdev)
155 return (ENXIO);
156 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
157 /*
158 * When running in very secure mode, do not allow
159 * opens for writing of any disk character devices.
160 */
161 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK)
162 return (EPERM);
163 /*
164 * When running in secure mode, do not allow opens
165 * for writing of /dev/mem, /dev/kmem, or character
166 * devices whose corresponding block devices are
167 * currently mounted.
168 */
169 if (securelevel >= 1) {
170 if ((bdev = chrtoblk(dev)) != NODEV &&
171 vfinddev(bdev, VBLK, &bvp) &&
172 bvp->v_usecount > 0 &&
173 (error = vfs_mountedon(bvp)))
174 return (error);
175 if (iskmemdev(dev))
176 return (EPERM);
177 }
178 }
179 if (cdevsw[maj].d_type == D_TTY)
180 vp->v_flag |= VISTTY;
181 VOP_UNLOCK(vp);
182 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
183 VOP_LOCK(vp);
184 return (error);
185
186 case VBLK:
187 if ((u_int)maj >= nblkdev)
188 return (ENXIO);
189 /*
190 * When running in very secure mode, do not allow
191 * opens for writing of any disk block devices.
192 */
193 if (securelevel >= 2 && ap->a_cred != FSCRED &&
194 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
195 return (EPERM);
196 /*
197 * Do not allow opens of block devices that are
198 * currently mounted.
199 */
200 if (error = vfs_mountedon(vp))
201 return (error);
202 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
203 }
204 return (0);
205 }
206
207 /*
208 * Vnode op for read
209 */
210 /* ARGSUSED */
211 spec_read(ap)
212 struct vop_read_args /* {
213 struct vnode *a_vp;
214 struct uio *a_uio;
215 int a_ioflag;
216 struct ucred *a_cred;
217 } */ *ap;
218 {
219 register struct vnode *vp = ap->a_vp;
220 register struct uio *uio = ap->a_uio;
221 struct proc *p = uio->uio_procp;
222 struct buf *bp;
223 daddr_t bn, nextbn;
224 long bsize, bscale, ssize;
225 struct partinfo dpart;
226 int n, on, majordev, (*ioctl)();
227 int error = 0;
228
229 #ifdef DIAGNOSTIC
230 if (uio->uio_rw != UIO_READ)
231 panic("spec_read mode");
232 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
233 panic("spec_read proc");
234 #endif
235 if (uio->uio_resid == 0)
236 return (0);
237
238 switch (vp->v_type) {
239
240 case VCHR:
241 VOP_UNLOCK(vp);
242 error = (*cdevsw[major(vp->v_rdev)].d_read)
243 (vp->v_rdev, uio, ap->a_ioflag);
244 VOP_LOCK(vp);
245 return (error);
246
247 case VBLK:
248 if (uio->uio_resid == 0)
249 return (0);
250 if (uio->uio_offset < 0)
251 return (EINVAL);
252 bsize = BLKDEV_IOSIZE;
253 ssize = DEV_BSIZE;
254 if ((majordev = major(vp->v_rdev)) < nblkdev &&
255 (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
256 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
257 if (dpart.part->p_fstype == FS_BSDFFS &&
258 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
259 bsize = dpart.part->p_frag *
260 dpart.part->p_fsize;
261 if (dpart.disklab->d_secsize != 0)
262 ssize = dpart.disklab->d_secsize;
263 }
264 bscale = bsize / ssize;
265 do {
266 bn = (uio->uio_offset / ssize) &~ (bscale - 1);
267 on = uio->uio_offset % bsize;
268 n = min((unsigned)(bsize - on), uio->uio_resid);
269 if (vp->v_lastr + bscale == bn) {
270 nextbn = bn + bscale;
271 error = breadn(vp, bn, (int)bsize, &nextbn,
272 (int *)&bsize, 1, NOCRED, &bp);
273 } else
274 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
275 vp->v_lastr = bn;
276 n = min(n, bsize - bp->b_resid);
277 if (error) {
278 brelse(bp);
279 return (error);
280 }
281 error = uiomove((char *)bp->b_data + on, n, uio);
282 if (n + on == bsize)
283 bp->b_flags |= B_AGE;
284 brelse(bp);
285 } while (error == 0 && uio->uio_resid > 0 && n != 0);
286 return (error);
287
288 default:
289 panic("spec_read type");
290 }
291 /* NOTREACHED */
292 }
293
294 /*
295 * Vnode op for write
296 */
297 /* ARGSUSED */
298 spec_write(ap)
299 struct vop_write_args /* {
300 struct vnode *a_vp;
301 struct uio *a_uio;
302 int a_ioflag;
303 struct ucred *a_cred;
304 } */ *ap;
305 {
306 register struct vnode *vp = ap->a_vp;
307 register struct uio *uio = ap->a_uio;
308 struct proc *p = uio->uio_procp;
309 struct buf *bp;
310 daddr_t bn;
311 long bsize, bscale, ssize;
312 struct partinfo dpart;
313 int n, on, majordev, (*ioctl)();
314 int error = 0;
315
316 #ifdef DIAGNOSTIC
317 if (uio->uio_rw != UIO_WRITE)
318 panic("spec_write mode");
319 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
320 panic("spec_write proc");
321 #endif
322
323 switch (vp->v_type) {
324
325 case VCHR:
326 VOP_UNLOCK(vp);
327 error = (*cdevsw[major(vp->v_rdev)].d_write)
328 (vp->v_rdev, uio, ap->a_ioflag);
329 VOP_LOCK(vp);
330 return (error);
331
332 case VBLK:
333 if (uio->uio_resid == 0)
334 return (0);
335 if (uio->uio_offset < 0)
336 return (EINVAL);
337 bsize = BLKDEV_IOSIZE;
338 ssize = DEV_BSIZE;
339 if ((majordev = major(vp->v_rdev)) < nblkdev &&
340 (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
341 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
342 if (dpart.part->p_fstype == FS_BSDFFS &&
343 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
344 bsize = dpart.part->p_frag *
345 dpart.part->p_fsize;
346 if (dpart.disklab->d_secsize != 0)
347 ssize = dpart.disklab->d_secsize;
348 }
349 bscale = bsize / ssize;
350 do {
351 bn = (uio->uio_offset / ssize) &~ (bscale - 1);
352 on = uio->uio_offset % bsize;
353 n = min((unsigned)(bsize - on), uio->uio_resid);
354 if (n == bsize)
355 bp = getblk(vp, bn, bsize, 0, 0);
356 else
357 error = bread(vp, bn, bsize, NOCRED, &bp);
358 n = min(n, bsize - bp->b_resid);
359 if (error) {
360 brelse(bp);
361 return (error);
362 }
363 error = uiomove((char *)bp->b_data + on, n, uio);
364 if (n + on == bsize) {
365 bp->b_flags |= B_AGE;
366 bawrite(bp);
367 } else
368 bdwrite(bp);
369 } while (error == 0 && uio->uio_resid > 0 && n != 0);
370 return (error);
371
372 default:
373 panic("spec_write type");
374 }
375 /* NOTREACHED */
376 }
377
378 /*
379 * Device ioctl operation.
380 */
381 /* ARGSUSED */
382 spec_ioctl(ap)
383 struct vop_ioctl_args /* {
384 struct vnode *a_vp;
385 u_long a_command;
386 caddr_t a_data;
387 int a_fflag;
388 struct ucred *a_cred;
389 struct proc *a_p;
390 } */ *ap;
391 {
392 dev_t dev = ap->a_vp->v_rdev;
393 int maj = major(dev);
394
395 switch (ap->a_vp->v_type) {
396
397 case VCHR:
398 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
399 ap->a_fflag, ap->a_p));
400
401 case VBLK:
402 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE)
403 if (bdevsw[maj].d_type == D_TAPE)
404 return (0);
405 else
406 return (1);
407 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
408 ap->a_fflag, ap->a_p));
409
410 default:
411 panic("spec_ioctl");
412 /* NOTREACHED */
413 }
414 }
415
416 /* ARGSUSED */
417 spec_select(ap)
418 struct vop_select_args /* {
419 struct vnode *a_vp;
420 int a_which;
421 int a_fflags;
422 struct ucred *a_cred;
423 struct proc *a_p;
424 } */ *ap;
425 {
426 register dev_t dev;
427
428 switch (ap->a_vp->v_type) {
429
430 default:
431 return (1); /* XXX */
432
433 case VCHR:
434 dev = ap->a_vp->v_rdev;
435 return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p);
436 }
437 }
438 /*
439 * Synch buffers associated with a block device
440 */
441 /* ARGSUSED */
442 int
443 spec_fsync(ap)
444 struct vop_fsync_args /* {
445 struct vnode *a_vp;
446 struct ucred *a_cred;
447 int a_waitfor;
448 struct proc *a_p;
449 } */ *ap;
450 {
451 register struct vnode *vp = ap->a_vp;
452 register struct buf *bp;
453 struct buf *nbp;
454 int s;
455
456 if (vp->v_type == VCHR)
457 return (0);
458 /*
459 * Flush all dirty buffers associated with a block device.
460 */
461 loop:
462 s = splbio();
463 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
464 nbp = bp->b_vnbufs.le_next;
465 if ((bp->b_flags & B_BUSY))
466 continue;
467 if ((bp->b_flags & B_DELWRI) == 0)
468 panic("spec_fsync: not dirty");
469 bremfree(bp);
470 bp->b_flags |= B_BUSY;
471 splx(s);
472 bawrite(bp);
473 goto loop;
474 }
475 if (ap->a_waitfor == MNT_WAIT) {
476 while (vp->v_numoutput) {
477 vp->v_flag |= VBWAIT;
478 sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
479 }
480 #ifdef DIAGNOSTIC
481 if (vp->v_dirtyblkhd.lh_first) {
482 vprint("spec_fsync: dirty", vp);
483 goto loop;
484 }
485 #endif
486 }
487 splx(s);
488 return (0);
489 }
490
491 /*
492 * Just call the device strategy routine
493 */
494 spec_strategy(ap)
495 struct vop_strategy_args /* {
496 struct buf *a_bp;
497 } */ *ap;
498 {
499
500 (*bdevsw[major(ap->a_bp->b_dev)].d_strategy)(ap->a_bp);
501 return (0);
502 }
503
504 /*
505 * This is a noop, simply returning what one has been given.
506 */
507 spec_bmap(ap)
508 struct vop_bmap_args /* {
509 struct vnode *a_vp;
510 daddr_t a_bn;
511 struct vnode **a_vpp;
512 daddr_t *a_bnp;
513 } */ *ap;
514 {
515
516 if (ap->a_vpp != NULL)
517 *ap->a_vpp = ap->a_vp;
518 if (ap->a_bnp != NULL)
519 *ap->a_bnp = ap->a_bn;
520 return (0);
521 }
522
523 /*
524 * At the moment we do not do any locking.
525 */
526 /* ARGSUSED */
527 spec_lock(ap)
528 struct vop_lock_args /* {
529 struct vnode *a_vp;
530 } */ *ap;
531 {
532
533 return (0);
534 }
535
536 /* ARGSUSED */
537 spec_unlock(ap)
538 struct vop_unlock_args /* {
539 struct vnode *a_vp;
540 } */ *ap;
541 {
542
543 return (0);
544 }
545
546 /*
547 * Device close routine
548 */
549 /* ARGSUSED */
550 spec_close(ap)
551 struct vop_close_args /* {
552 struct vnode *a_vp;
553 int a_fflag;
554 struct ucred *a_cred;
555 struct proc *a_p;
556 } */ *ap;
557 {
558 register struct vnode *vp = ap->a_vp;
559 dev_t dev = vp->v_rdev;
560 int (*devclose) __P((dev_t, int, int, struct proc *));
561 int mode, error;
562
563 switch (vp->v_type) {
564
565 case VCHR:
566 /*
567 * Hack: a tty device that is a controlling terminal
568 * has a reference from the session structure.
569 * We cannot easily tell that a character device is
570 * a controlling terminal, unless it is the closing
571 * process' controlling terminal. In that case,
572 * if the reference count is 2 (this last descriptor
573 * plus the session), release the reference from the session.
574 */
575 if (vcount(vp) == 2 && ap->a_p &&
576 vp == ap->a_p->p_session->s_ttyvp) {
577 vrele(vp);
578 ap->a_p->p_session->s_ttyvp = NULL;
579 }
580 /*
581 * If the vnode is locked, then we are in the midst
582 * of forcably closing the device, otherwise we only
583 * close on last reference.
584 */
585 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
586 return (0);
587 devclose = cdevsw[major(dev)].d_close;
588 mode = S_IFCHR;
589 break;
590
591 case VBLK:
592 /*
593 * On last close of a block device (that isn't mounted)
594 * we must invalidate any in core blocks, so that
595 * we can, for instance, change floppy disks.
596 */
597 if (error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0))
598 return (error);
599 /*
600 * We do not want to really close the device if it
601 * is still in use unless we are trying to close it
602 * forcibly. Since every use (buffer, vnode, swap, cmap)
603 * holds a reference to the vnode, and because we mark
604 * any other vnodes that alias this device, when the
605 * sum of the reference counts on all the aliased
606 * vnodes descends to one, we are on last close.
607 */
608 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
609 return (0);
610 devclose = bdevsw[major(dev)].d_close;
611 mode = S_IFBLK;
612 break;
613
614 default:
615 panic("spec_close: not special");
616 }
617
618 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
619 }
620
621 /*
622 * Print out the contents of a special device vnode.
623 */
624 spec_print(ap)
625 struct vop_print_args /* {
626 struct vnode *a_vp;
627 } */ *ap;
628 {
629
630 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
631 minor(ap->a_vp->v_rdev));
632 }
633
634 /*
635 * Return POSIX pathconf information applicable to special devices.
636 */
637 spec_pathconf(ap)
638 struct vop_pathconf_args /* {
639 struct vnode *a_vp;
640 int a_name;
641 register_t *a_retval;
642 } */ *ap;
643 {
644
645 switch (ap->a_name) {
646 case _PC_LINK_MAX:
647 *ap->a_retval = LINK_MAX;
648 return (0);
649 case _PC_MAX_CANON:
650 *ap->a_retval = MAX_CANON;
651 return (0);
652 case _PC_MAX_INPUT:
653 *ap->a_retval = MAX_INPUT;
654 return (0);
655 case _PC_PIPE_BUF:
656 *ap->a_retval = PIPE_BUF;
657 return (0);
658 case _PC_CHOWN_RESTRICTED:
659 *ap->a_retval = 1;
660 return (0);
661 case _PC_VDISABLE:
662 *ap->a_retval = _POSIX_VDISABLE;
663 return (0);
664 default:
665 return (EINVAL);
666 }
667 /* NOTREACHED */
668 }
669
670 /*
671 * Special device advisory byte-level locks.
672 */
673 /* ARGSUSED */
674 spec_advlock(ap)
675 struct vop_advlock_args /* {
676 struct vnode *a_vp;
677 caddr_t a_id;
678 int a_op;
679 struct flock *a_fl;
680 int a_flags;
681 } */ *ap;
682 {
683
684 return (EOPNOTSUPP);
685 }
686
687 /*
688 * Special device failed operation
689 */
690 spec_ebadf()
691 {
692
693 return (EBADF);
694 }
695
696 /*
697 * Special device bad operation
698 */
699 spec_badop()
700 {
701
702 panic("spec_badop called");
703 /* NOTREACHED */
704 }
705