lfs_vnops.c revision 1.265 1 /* $NetBSD: lfs_vnops.c,v 1.265 2014/05/17 07:09:09 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95
60 */
61
62 /* from NetBSD: ufs_vnops.c,v 1.213 2013/06/08 05:47:02 kardel Exp */
63 /*-
64 * Copyright (c) 2008 The NetBSD Foundation, Inc.
65 * All rights reserved.
66 *
67 * This code is derived from software contributed to The NetBSD Foundation
68 * by Wasabi Systems, Inc.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions
72 * are met:
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce the above copyright
76 * notice, this list of conditions and the following disclaimer in the
77 * documentation and/or other materials provided with the distribution.
78 *
79 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
80 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
81 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
82 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
83 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
84 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
85 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
86 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
87 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
88 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
89 * POSSIBILITY OF SUCH DAMAGE.
90 */
91 /*
92 * Copyright (c) 1982, 1986, 1989, 1993, 1995
93 * The Regents of the University of California. All rights reserved.
94 * (c) UNIX System Laboratories, Inc.
95 * All or some portions of this file are derived from material licensed
96 * to the University of California by American Telephone and Telegraph
97 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
98 * the permission of UNIX System Laboratories, Inc.
99 *
100 * Redistribution and use in source and binary forms, with or without
101 * modification, are permitted provided that the following conditions
102 * are met:
103 * 1. Redistributions of source code must retain the above copyright
104 * notice, this list of conditions and the following disclaimer.
105 * 2. Redistributions in binary form must reproduce the above copyright
106 * notice, this list of conditions and the following disclaimer in the
107 * documentation and/or other materials provided with the distribution.
108 * 3. Neither the name of the University nor the names of its contributors
109 * may be used to endorse or promote products derived from this software
110 * without specific prior written permission.
111 *
112 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
113 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
114 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
115 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
116 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
117 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
118 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
119 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
120 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
121 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
122 * SUCH DAMAGE.
123 *
124 * @(#)ufs_vnops.c 8.28 (Berkeley) 7/31/95
125 */
126
127 #include <sys/cdefs.h>
128 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.265 2014/05/17 07:09:09 dholland Exp $");
129
130 #ifdef _KERNEL_OPT
131 #include "opt_compat_netbsd.h"
132 #include "opt_uvm_page_trkown.h"
133 #endif
134
135 #include <sys/param.h>
136 #include <sys/systm.h>
137 #include <sys/namei.h>
138 #include <sys/resourcevar.h>
139 #include <sys/kernel.h>
140 #include <sys/file.h>
141 #include <sys/stat.h>
142 #include <sys/buf.h>
143 #include <sys/proc.h>
144 #include <sys/mount.h>
145 #include <sys/vnode.h>
146 #include <sys/pool.h>
147 #include <sys/signalvar.h>
148 #include <sys/kauth.h>
149 #include <sys/syslog.h>
150 #include <sys/fstrans.h>
151
152 #include <miscfs/fifofs/fifo.h>
153 #include <miscfs/genfs/genfs.h>
154 #include <miscfs/specfs/specdev.h>
155
156 #include <ufs/lfs/ulfs_inode.h>
157 #include <ufs/lfs/ulfsmount.h>
158 #include <ufs/lfs/ulfs_bswap.h>
159 #include <ufs/lfs/ulfs_extern.h>
160
161 #include <uvm/uvm.h>
162 #include <uvm/uvm_pmap.h>
163 #include <uvm/uvm_stat.h>
164 #include <uvm/uvm_pager.h>
165
166 #include <ufs/lfs/lfs.h>
167 #include <ufs/lfs/lfs_kernel.h>
168 #include <ufs/lfs/lfs_extern.h>
169
170 extern pid_t lfs_writer_daemon;
171 int lfs_ignore_lazy_sync = 1;
172
173 static int lfs_openextattr(void *v);
174 static int lfs_closeextattr(void *v);
175 static int lfs_getextattr(void *v);
176 static int lfs_setextattr(void *v);
177 static int lfs_listextattr(void *v);
178 static int lfs_deleteextattr(void *v);
179
180 /*
181 * A virgin directory (no blushing please).
182 */
183 static const struct lfs_dirtemplate mastertemplate = {
184 0, 12, LFS_DT_DIR, 1, ".",
185 0, LFS_DIRBLKSIZ - 12, LFS_DT_DIR, 2, ".."
186 };
187
188 /* Global vfs data structures for lfs. */
189 int (**lfs_vnodeop_p)(void *);
190 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
191 { &vop_default_desc, vn_default_error },
192 { &vop_lookup_desc, ulfs_lookup }, /* lookup */
193 { &vop_create_desc, lfs_create }, /* create */
194 { &vop_whiteout_desc, ulfs_whiteout }, /* whiteout */
195 { &vop_mknod_desc, lfs_mknod }, /* mknod */
196 { &vop_open_desc, ulfs_open }, /* open */
197 { &vop_close_desc, lfs_close }, /* close */
198 { &vop_access_desc, ulfs_access }, /* access */
199 { &vop_getattr_desc, lfs_getattr }, /* getattr */
200 { &vop_setattr_desc, lfs_setattr }, /* setattr */
201 { &vop_read_desc, lfs_read }, /* read */
202 { &vop_write_desc, lfs_write }, /* write */
203 { &vop_ioctl_desc, ulfs_ioctl }, /* ioctl */
204 { &vop_fcntl_desc, lfs_fcntl }, /* fcntl */
205 { &vop_poll_desc, ulfs_poll }, /* poll */
206 { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */
207 { &vop_revoke_desc, ulfs_revoke }, /* revoke */
208 { &vop_mmap_desc, lfs_mmap }, /* mmap */
209 { &vop_fsync_desc, lfs_fsync }, /* fsync */
210 { &vop_seek_desc, ulfs_seek }, /* seek */
211 { &vop_remove_desc, lfs_remove }, /* remove */
212 { &vop_link_desc, lfs_link }, /* link */
213 { &vop_rename_desc, lfs_rename }, /* rename */
214 { &vop_mkdir_desc, lfs_mkdir }, /* mkdir */
215 { &vop_rmdir_desc, lfs_rmdir }, /* rmdir */
216 { &vop_symlink_desc, lfs_symlink }, /* symlink */
217 { &vop_readdir_desc, ulfs_readdir }, /* readdir */
218 { &vop_readlink_desc, ulfs_readlink }, /* readlink */
219 { &vop_abortop_desc, ulfs_abortop }, /* abortop */
220 { &vop_inactive_desc, lfs_inactive }, /* inactive */
221 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
222 { &vop_lock_desc, ulfs_lock }, /* lock */
223 { &vop_unlock_desc, ulfs_unlock }, /* unlock */
224 { &vop_bmap_desc, ulfs_bmap }, /* bmap */
225 { &vop_strategy_desc, lfs_strategy }, /* strategy */
226 { &vop_print_desc, ulfs_print }, /* print */
227 { &vop_islocked_desc, ulfs_islocked }, /* islocked */
228 { &vop_pathconf_desc, ulfs_pathconf }, /* pathconf */
229 { &vop_advlock_desc, ulfs_advlock }, /* advlock */
230 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
231 { &vop_getpages_desc, lfs_getpages }, /* getpages */
232 { &vop_putpages_desc, lfs_putpages }, /* putpages */
233 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */
234 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */
235 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */
236 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */
237 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */
238 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */
239 { NULL, NULL }
240 };
241 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
242 { &lfs_vnodeop_p, lfs_vnodeop_entries };
243
244 int (**lfs_specop_p)(void *);
245 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
246 { &vop_default_desc, vn_default_error },
247 { &vop_lookup_desc, spec_lookup }, /* lookup */
248 { &vop_create_desc, spec_create }, /* create */
249 { &vop_mknod_desc, spec_mknod }, /* mknod */
250 { &vop_open_desc, spec_open }, /* open */
251 { &vop_close_desc, lfsspec_close }, /* close */
252 { &vop_access_desc, ulfs_access }, /* access */
253 { &vop_getattr_desc, lfs_getattr }, /* getattr */
254 { &vop_setattr_desc, lfs_setattr }, /* setattr */
255 { &vop_read_desc, ulfsspec_read }, /* read */
256 { &vop_write_desc, ulfsspec_write }, /* write */
257 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
258 { &vop_fcntl_desc, ulfs_fcntl }, /* fcntl */
259 { &vop_poll_desc, spec_poll }, /* poll */
260 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
261 { &vop_revoke_desc, spec_revoke }, /* revoke */
262 { &vop_mmap_desc, spec_mmap }, /* mmap */
263 { &vop_fsync_desc, spec_fsync }, /* fsync */
264 { &vop_seek_desc, spec_seek }, /* seek */
265 { &vop_remove_desc, spec_remove }, /* remove */
266 { &vop_link_desc, spec_link }, /* link */
267 { &vop_rename_desc, spec_rename }, /* rename */
268 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
269 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
270 { &vop_symlink_desc, spec_symlink }, /* symlink */
271 { &vop_readdir_desc, spec_readdir }, /* readdir */
272 { &vop_readlink_desc, spec_readlink }, /* readlink */
273 { &vop_abortop_desc, spec_abortop }, /* abortop */
274 { &vop_inactive_desc, lfs_inactive }, /* inactive */
275 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
276 { &vop_lock_desc, ulfs_lock }, /* lock */
277 { &vop_unlock_desc, ulfs_unlock }, /* unlock */
278 { &vop_bmap_desc, spec_bmap }, /* bmap */
279 { &vop_strategy_desc, spec_strategy }, /* strategy */
280 { &vop_print_desc, ulfs_print }, /* print */
281 { &vop_islocked_desc, ulfs_islocked }, /* islocked */
282 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
283 { &vop_advlock_desc, spec_advlock }, /* advlock */
284 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */
285 { &vop_getpages_desc, spec_getpages }, /* getpages */
286 { &vop_putpages_desc, spec_putpages }, /* putpages */
287 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */
288 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */
289 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */
290 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */
291 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */
292 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */
293 { NULL, NULL }
294 };
295 const struct vnodeopv_desc lfs_specop_opv_desc =
296 { &lfs_specop_p, lfs_specop_entries };
297
298 int (**lfs_fifoop_p)(void *);
299 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
300 { &vop_default_desc, vn_default_error },
301 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */
302 { &vop_create_desc, vn_fifo_bypass }, /* create */
303 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */
304 { &vop_open_desc, vn_fifo_bypass }, /* open */
305 { &vop_close_desc, lfsfifo_close }, /* close */
306 { &vop_access_desc, ulfs_access }, /* access */
307 { &vop_getattr_desc, lfs_getattr }, /* getattr */
308 { &vop_setattr_desc, lfs_setattr }, /* setattr */
309 { &vop_read_desc, ulfsfifo_read }, /* read */
310 { &vop_write_desc, ulfsfifo_write }, /* write */
311 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */
312 { &vop_fcntl_desc, ulfs_fcntl }, /* fcntl */
313 { &vop_poll_desc, vn_fifo_bypass }, /* poll */
314 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */
315 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */
316 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */
317 { &vop_fsync_desc, vn_fifo_bypass }, /* fsync */
318 { &vop_seek_desc, vn_fifo_bypass }, /* seek */
319 { &vop_remove_desc, vn_fifo_bypass }, /* remove */
320 { &vop_link_desc, vn_fifo_bypass }, /* link */
321 { &vop_rename_desc, vn_fifo_bypass }, /* rename */
322 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */
323 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */
324 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */
325 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */
326 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */
327 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */
328 { &vop_inactive_desc, lfs_inactive }, /* inactive */
329 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
330 { &vop_lock_desc, ulfs_lock }, /* lock */
331 { &vop_unlock_desc, ulfs_unlock }, /* unlock */
332 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */
333 { &vop_strategy_desc, vn_fifo_bypass }, /* strategy */
334 { &vop_print_desc, ulfs_print }, /* print */
335 { &vop_islocked_desc, ulfs_islocked }, /* islocked */
336 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */
337 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */
338 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
339 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */
340 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */
341 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */
342 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */
343 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */
344 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */
345 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */
346 { NULL, NULL }
347 };
348 const struct vnodeopv_desc lfs_fifoop_opv_desc =
349 { &lfs_fifoop_p, lfs_fifoop_entries };
350
351 #define LFS_READWRITE
352 #include <ufs/lfs/ulfs_readwrite.c>
353 #undef LFS_READWRITE
354
355 /*
356 * Synch an open file.
357 */
358 /* ARGSUSED */
359 int
360 lfs_fsync(void *v)
361 {
362 struct vop_fsync_args /* {
363 struct vnode *a_vp;
364 kauth_cred_t a_cred;
365 int a_flags;
366 off_t offlo;
367 off_t offhi;
368 } */ *ap = v;
369 struct vnode *vp = ap->a_vp;
370 int error, wait;
371 struct inode *ip = VTOI(vp);
372 struct lfs *fs = ip->i_lfs;
373
374 /* If we're mounted read-only, don't try to sync. */
375 if (fs->lfs_ronly)
376 return 0;
377
378 /* If a removed vnode is being cleaned, no need to sync here. */
379 if ((ap->a_flags & FSYNC_RECLAIM) != 0 && ip->i_mode == 0)
380 return 0;
381
382 /*
383 * Trickle sync simply adds this vnode to the pager list, as if
384 * the pagedaemon had requested a pageout.
385 */
386 if (ap->a_flags & FSYNC_LAZY) {
387 if (lfs_ignore_lazy_sync == 0) {
388 mutex_enter(&lfs_lock);
389 if (!(ip->i_flags & IN_PAGING)) {
390 ip->i_flags |= IN_PAGING;
391 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip,
392 i_lfs_pchain);
393 }
394 wakeup(&lfs_writer_daemon);
395 mutex_exit(&lfs_lock);
396 }
397 return 0;
398 }
399
400 /*
401 * If a vnode is bring cleaned, flush it out before we try to
402 * reuse it. This prevents the cleaner from writing files twice
403 * in the same partial segment, causing an accounting underflow.
404 */
405 if (ap->a_flags & FSYNC_RECLAIM && ip->i_flags & IN_CLEANING) {
406 lfs_vflush(vp);
407 }
408
409 wait = (ap->a_flags & FSYNC_WAIT);
410 do {
411 mutex_enter(vp->v_interlock);
412 error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
413 round_page(ap->a_offhi),
414 PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
415 if (error == EAGAIN) {
416 mutex_enter(&lfs_lock);
417 mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_fsync",
418 hz / 100 + 1, &lfs_lock);
419 mutex_exit(&lfs_lock);
420 }
421 } while (error == EAGAIN);
422 if (error)
423 return error;
424
425 if ((ap->a_flags & FSYNC_DATAONLY) == 0)
426 error = lfs_update(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
427
428 if (error == 0 && ap->a_flags & FSYNC_CACHE) {
429 int l = 0;
430 error = VOP_IOCTL(ip->i_devvp, DIOCCACHESYNC, &l, FWRITE,
431 curlwp->l_cred);
432 }
433 if (wait && !VPISEMPTY(vp))
434 LFS_SET_UINO(ip, IN_MODIFIED);
435
436 return error;
437 }
438
439 /*
440 * Take IN_ADIROP off, then call ulfs_inactive.
441 */
442 int
443 lfs_inactive(void *v)
444 {
445 struct vop_inactive_args /* {
446 struct vnode *a_vp;
447 } */ *ap = v;
448
449 lfs_unmark_vnode(ap->a_vp);
450
451 /*
452 * The Ifile is only ever inactivated on unmount.
453 * Streamline this process by not giving it more dirty blocks.
454 */
455 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
456 mutex_enter(&lfs_lock);
457 LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
458 mutex_exit(&lfs_lock);
459 VOP_UNLOCK(ap->a_vp);
460 return 0;
461 }
462
463 #ifdef DEBUG
464 /*
465 * This might happen on unmount.
466 * XXX If it happens at any other time, it should be a panic.
467 */
468 if (ap->a_vp->v_uflag & VU_DIROP) {
469 struct inode *ip = VTOI(ap->a_vp);
470 printf("lfs_inactive: inactivating VU_DIROP? ino = %d\n", (int)ip->i_number);
471 }
472 #endif /* DIAGNOSTIC */
473
474 return ulfs_inactive(v);
475 }
476
477 int
478 lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
479 {
480 struct lfs *fs;
481 int error;
482
483 KASSERT(VOP_ISLOCKED(dvp));
484 KASSERT(vp == NULL || VOP_ISLOCKED(vp));
485
486 fs = VTOI(dvp)->i_lfs;
487
488 ASSERT_NO_SEGLOCK(fs);
489 /*
490 * LFS_NRESERVE calculates direct and indirect blocks as well
491 * as an inode block; an overestimate in most cases.
492 */
493 if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0)
494 return (error);
495
496 restart:
497 mutex_enter(&lfs_lock);
498 if (fs->lfs_dirops == 0) {
499 mutex_exit(&lfs_lock);
500 lfs_check(dvp, LFS_UNUSED_LBN, 0);
501 mutex_enter(&lfs_lock);
502 }
503 while (fs->lfs_writer) {
504 error = mtsleep(&fs->lfs_dirops, (PRIBIO + 1) | PCATCH,
505 "lfs_sdirop", 0, &lfs_lock);
506 if (error == EINTR) {
507 mutex_exit(&lfs_lock);
508 goto unreserve;
509 }
510 }
511 if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
512 wakeup(&lfs_writer_daemon);
513 mutex_exit(&lfs_lock);
514 preempt();
515 goto restart;
516 }
517
518 if (lfs_dirvcount > LFS_MAX_DIROP) {
519 DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, "
520 "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount));
521 if ((error = mtsleep(&lfs_dirvcount,
522 PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
523 &lfs_lock)) != 0) {
524 goto unreserve;
525 }
526 goto restart;
527 }
528
529 ++fs->lfs_dirops;
530 /* fs->lfs_doifile = 1; */ /* XXX why? --ks */
531 mutex_exit(&lfs_lock);
532
533 /* Hold a reference so SET_ENDOP will be happy */
534 vref(dvp);
535 if (vp) {
536 vref(vp);
537 MARK_VNODE(vp);
538 }
539
540 MARK_VNODE(dvp);
541 return 0;
542
543 unreserve:
544 lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs));
545 return error;
546 }
547
548 /*
549 * Opposite of lfs_set_dirop... mostly. For now at least must call
550 * UNMARK_VNODE(dvp) explicitly first. (XXX: clean that up)
551 */
552 void
553 lfs_unset_dirop(struct lfs *fs, struct vnode *dvp, const char *str)
554 {
555 mutex_enter(&lfs_lock);
556 --fs->lfs_dirops;
557 if (!fs->lfs_dirops) {
558 if (fs->lfs_nadirop) {
559 panic("lfs_unset_dirop: %s: no dirops but "
560 " nadirop=%d", str,
561 fs->lfs_nadirop);
562 }
563 wakeup(&fs->lfs_writer);
564 mutex_exit(&lfs_lock);
565 lfs_check(dvp, LFS_UNUSED_LBN, 0);
566 } else {
567 mutex_exit(&lfs_lock);
568 }
569 lfs_reserve(fs, dvp, NULL, -LFS_NRESERVE(fs));
570 }
571
572 void
573 lfs_mark_vnode(struct vnode *vp)
574 {
575 struct inode *ip = VTOI(vp);
576 struct lfs *fs = ip->i_lfs;
577
578 mutex_enter(&lfs_lock);
579 if (!(ip->i_flag & IN_ADIROP)) {
580 if (!(vp->v_uflag & VU_DIROP)) {
581 mutex_exit(&lfs_lock);
582 mutex_enter(vp->v_interlock);
583 if (lfs_vref(vp) != 0)
584 panic("lfs_mark_vnode: could not vref");
585 mutex_enter(&lfs_lock);
586 ++lfs_dirvcount;
587 ++fs->lfs_dirvcount;
588 TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
589 vp->v_uflag |= VU_DIROP;
590 }
591 ++fs->lfs_nadirop;
592 ip->i_flag &= ~IN_CDIROP;
593 ip->i_flag |= IN_ADIROP;
594 } else
595 KASSERT(vp->v_uflag & VU_DIROP);
596 mutex_exit(&lfs_lock);
597 }
598
599 void
600 lfs_unmark_vnode(struct vnode *vp)
601 {
602 struct inode *ip = VTOI(vp);
603
604 mutex_enter(&lfs_lock);
605 if (ip && (ip->i_flag & IN_ADIROP)) {
606 KASSERT(vp->v_uflag & VU_DIROP);
607 --ip->i_lfs->lfs_nadirop;
608 ip->i_flag &= ~IN_ADIROP;
609 }
610 mutex_exit(&lfs_lock);
611 }
612
613 /*
614 * symlink -- make a symbolic link
615 */
616 int
617 ulfs_symlink(void *v)
618 {
619 struct vop_symlink_v3_args /* {
620 struct vnode *a_dvp;
621 struct vnode **a_vpp;
622 struct componentname *a_cnp;
623 struct vattr *a_vap;
624 char *a_target;
625 } */ *ap = v;
626 struct vnode *vp, **vpp;
627 struct inode *ip;
628 int len, error;
629 struct ulfs_lookup_results *ulr;
630
631 vpp = ap->a_vpp;
632
633 /* XXX should handle this material another way */
634 ulr = &VTOI(ap->a_dvp)->i_crap;
635 ULFS_CHECK_CRAPCOUNTER(VTOI(ap->a_dvp));
636
637 fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED);
638 error = ulfs_makeinode(LFS_IFLNK | ap->a_vap->va_mode, ap->a_dvp, ulr,
639 vpp, ap->a_cnp);
640 if (error)
641 goto out;
642 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
643 vp = *vpp;
644 len = strlen(ap->a_target);
645 ip = VTOI(vp);
646 if (len < ip->i_lfs->um_maxsymlinklen) {
647 memcpy((char *)SHORTLINK(ip), ap->a_target, len);
648 ip->i_size = len;
649 DIP_ASSIGN(ip, size, len);
650 uvm_vnp_setsize(vp, ip->i_size);
651 ip->i_flag |= IN_CHANGE | IN_UPDATE;
652 if (vp->v_mount->mnt_flag & MNT_RELATIME)
653 ip->i_flag |= IN_ACCESS;
654 } else
655 error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0,
656 UIO_SYSSPACE, IO_NODELOCKED | IO_JOURNALLOCKED,
657 ap->a_cnp->cn_cred, NULL, NULL);
658 VOP_UNLOCK(vp);
659 if (error)
660 vrele(vp);
661 out:
662 fstrans_done(ap->a_dvp->v_mount);
663 return (error);
664 }
665
666 int
667 lfs_symlink(void *v)
668 {
669 struct vop_symlink_v3_args /* {
670 struct vnode *a_dvp;
671 struct vnode **a_vpp;
672 struct componentname *a_cnp;
673 struct vattr *a_vap;
674 char *a_target;
675 } */ *ap = v;
676 struct lfs *fs;
677 struct vnode *dvp, **vpp;
678 int error;
679
680 dvp = ap->a_dvp;
681 vpp = ap->a_vpp;
682
683 KASSERT(vpp != NULL);
684 KASSERT(*vpp == NULL);
685
686 fs = VFSTOULFS(dvp->v_mount)->um_lfs;
687 ASSERT_NO_SEGLOCK(fs);
688 if (fs->lfs_ronly) {
689 return EROFS;
690 }
691
692 /*
693 * Get a new vnode *before* adjusting the dirop count, to
694 * avoid a deadlock in getnewvnode(), if we have a stacked
695 * filesystem mounted on top of us.
696 *
697 * NB: this means we have to destroy the new vnode on error.
698 */
699
700 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
701 if (error) {
702 DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error));
703 return error;
704 }
705 KASSERT(*vpp != NULL);
706
707 error = lfs_set_dirop(dvp, NULL);
708 if (error) {
709 ungetnewvnode(*vpp);
710 *vpp = NULL;
711 return error;
712 }
713
714 error = ulfs_symlink(ap);
715
716 UNMARK_VNODE(dvp);
717 /* XXX: is it even possible for the symlink to get MARK'd? */
718 UNMARK_VNODE(*vpp);
719 if (!((*vpp)->v_uflag & VU_DIROP)) {
720 KASSERT(error != 0);
721 ungetnewvnode(*vpp);
722 *vpp = NULL;
723 }
724 else {
725 KASSERT(error == 0);
726 }
727 lfs_unset_dirop(fs, dvp, "symlink");
728
729 vrele(dvp);
730 return (error);
731 }
732
733 int
734 lfs_mknod(void *v)
735 {
736 struct vop_mknod_v3_args /* {
737 struct vnode *a_dvp;
738 struct vnode **a_vpp;
739 struct componentname *a_cnp;
740 struct vattr *a_vap;
741 } */ *ap = v;
742 struct lfs *fs;
743 struct vnode *dvp, **vpp;
744 struct vattr *vap;
745 struct inode *ip;
746 int error;
747 struct mount *mp;
748 ino_t ino;
749 struct ulfs_lookup_results *ulr;
750
751 dvp = ap->a_dvp;
752 vpp = ap->a_vpp;
753 vap = ap->a_vap;
754
755 KASSERT(vpp != NULL);
756 KASSERT(*vpp == NULL);
757
758 /* XXX should handle this material another way */
759 ulr = &VTOI(dvp)->i_crap;
760 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp));
761
762 fs = VFSTOULFS(dvp->v_mount)->um_lfs;
763 ASSERT_NO_SEGLOCK(fs);
764 if (fs->lfs_ronly) {
765 return EROFS;
766 }
767
768 /*
769 * Get a new vnode *before* adjusting the dirop count, to
770 * avoid a deadlock in getnewvnode(), if we have a stacked
771 * filesystem mounted on top of us.
772 *
773 * NB: this means we have to destroy the new vnode on error.
774 */
775
776 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
777 if (error) {
778 DLOG((DLOG_ALLOC, "lfs_mknod: dvp %p error %d\n", dvp, error));
779 return error;
780 }
781 KASSERT(*vpp != NULL);
782
783 error = lfs_set_dirop(dvp, NULL);
784 if (error) {
785 ungetnewvnode(*vpp);
786 *vpp = NULL;
787 return error;
788 }
789
790 fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED);
791 error = ulfs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
792 dvp, ulr, vpp, ap->a_cnp);
793
794 /* Either way we're done with the dirop at this point */
795 UNMARK_VNODE(dvp);
796 UNMARK_VNODE(*vpp);
797 if (!((*vpp)->v_uflag & VU_DIROP)) {
798 KASSERT(error != 0);
799 ungetnewvnode(*vpp);
800 *vpp = NULL;
801 }
802 else {
803 KASSERT(error == 0);
804 }
805 lfs_unset_dirop(fs, dvp, "mknod");
806 /*
807 * XXX this is where this used to be (though inside some evil
808 * macros) but it clearly should be moved further down.
809 * - dholland 20140515
810 */
811 vrele(dvp);
812
813 if (error) {
814 fstrans_done(ap->a_dvp->v_mount);
815 *vpp = NULL;
816 return (error);
817 }
818
819 VN_KNOTE(dvp, NOTE_WRITE);
820 ip = VTOI(*vpp);
821 mp = (*vpp)->v_mount;
822 ino = ip->i_number;
823 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
824 if (vap->va_rdev != VNOVAL) {
825 struct ulfsmount *ump = ip->i_ump;
826 KASSERT(fs == ip->i_lfs);
827 /*
828 * Want to be able to use this to make badblock
829 * inodes, so don't truncate the dev number.
830 */
831 if (ump->um_fstype == ULFS1)
832 ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
833 ULFS_MPNEEDSWAP(fs));
834 else
835 ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
836 ULFS_MPNEEDSWAP(fs));
837 }
838
839 /*
840 * Call fsync to write the vnode so that we don't have to deal with
841 * flushing it when it's marked VU_DIROP or reclaiming.
842 *
843 * XXX KS - If we can't flush we also can't call vgone(), so must
844 * return. But, that leaves this vnode in limbo, also not good.
845 * Can this ever happen (barring hardware failure)?
846 */
847 if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0)) != 0) {
848 panic("lfs_mknod: couldn't fsync (ino %llu)",
849 (unsigned long long)ino);
850 /* return (error); */
851 }
852 /*
853 * Remove vnode so that it will be reloaded by VFS_VGET and
854 * checked to see if it is an alias of an existing entry in
855 * the inode cache.
856 */
857 /* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
858
859 (*vpp)->v_type = VNON;
860 VOP_UNLOCK(*vpp);
861 vgone(*vpp);
862 error = VFS_VGET(mp, ino, vpp);
863
864 fstrans_done(ap->a_dvp->v_mount);
865 if (error != 0) {
866 *vpp = NULL;
867 return (error);
868 }
869 VOP_UNLOCK(*vpp);
870 return (0);
871 }
872
873 /*
874 * Create a regular file
875 */
876 int
877 ulfs_create(void *v)
878 {
879 struct vop_create_v3_args /* {
880 struct vnode *a_dvp;
881 struct vnode **a_vpp;
882 struct componentname *a_cnp;
883 struct vattr *a_vap;
884 } */ *ap = v;
885 int error;
886 struct vnode *dvp = ap->a_dvp;
887 struct ulfs_lookup_results *ulr;
888
889 /* XXX should handle this material another way */
890 ulr = &VTOI(dvp)->i_crap;
891 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp));
892
893 fstrans_start(dvp->v_mount, FSTRANS_SHARED);
894 error =
895 ulfs_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode),
896 dvp, ulr, ap->a_vpp, ap->a_cnp);
897 if (error) {
898 fstrans_done(dvp->v_mount);
899 return (error);
900 }
901 fstrans_done(dvp->v_mount);
902 VN_KNOTE(dvp, NOTE_WRITE);
903 VOP_UNLOCK(*ap->a_vpp);
904 return (0);
905 }
906
907 int
908 lfs_create(void *v)
909 {
910 struct vop_create_v3_args /* {
911 struct vnode *a_dvp;
912 struct vnode **a_vpp;
913 struct componentname *a_cnp;
914 struct vattr *a_vap;
915 } */ *ap = v;
916 struct lfs *fs;
917 struct vnode *dvp, **vpp;
918 int error;
919
920 dvp = ap->a_dvp;
921 vpp = ap->a_vpp;
922
923 KASSERT(vpp != NULL);
924 KASSERT(*vpp == NULL);
925
926 fs = VFSTOULFS(dvp->v_mount)->um_lfs;
927 ASSERT_NO_SEGLOCK(fs);
928 if (fs->lfs_ronly) {
929 return EROFS;
930 }
931
932 /*
933 * Get a new vnode *before* adjusting the dirop count, to
934 * avoid a deadlock in getnewvnode(), if we have a stacked
935 * filesystem mounted on top of us.
936 *
937 * NB: this means we have to destroy the new vnode on error.
938 */
939
940 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
941 if (error) {
942 DLOG((DLOG_ALLOC, "lfs_create: dvp %p error %d\n", dvp,error));
943 return error;
944 }
945 error = lfs_set_dirop(dvp, NULL);
946 if (error) {
947 ungetnewvnode(*vpp);
948 *vpp = NULL;
949 return error;
950 }
951
952 error = ulfs_create(ap);
953
954 UNMARK_VNODE(dvp);
955 UNMARK_VNODE(*vpp);
956 if (!((*vpp)->v_uflag & VU_DIROP)) {
957 KASSERT(error != 0);
958 ungetnewvnode(*vpp);
959 *vpp = NULL;
960 }
961 else {
962 KASSERT(error == 0);
963 }
964 lfs_unset_dirop(fs, dvp, "create");
965
966 vrele(dvp);
967 return (error);
968 }
969
970 int
971 ulfs_mkdir(void *v)
972 {
973 struct vop_mkdir_v3_args /* {
974 struct vnode *a_dvp;
975 struct vnode **a_vpp;
976 struct componentname *a_cnp;
977 struct vattr *a_vap;
978 } */ *ap = v;
979 struct vnode *dvp = ap->a_dvp, *tvp;
980 struct vattr *vap = ap->a_vap;
981 struct componentname *cnp = ap->a_cnp;
982 struct inode *ip, *dp = VTOI(dvp);
983 struct buf *bp;
984 struct lfs_dirtemplate dirtemplate;
985 struct lfs_direct *newdir;
986 int error, dmode;
987 struct ulfsmount *ump = dp->i_ump;
988 struct lfs *fs = ump->um_lfs;
989 int dirblksiz = fs->um_dirblksiz;
990 struct ulfs_lookup_results *ulr;
991
992 fstrans_start(dvp->v_mount, FSTRANS_SHARED);
993
994 /* XXX should handle this material another way */
995 ulr = &dp->i_crap;
996 ULFS_CHECK_CRAPCOUNTER(dp);
997
998 if ((nlink_t)dp->i_nlink >= LINK_MAX) {
999 error = EMLINK;
1000 goto out;
1001 }
1002 dmode = vap->va_mode & ACCESSPERMS;
1003 dmode |= LFS_IFDIR;
1004 /*
1005 * Must simulate part of ulfs_makeinode here to acquire the inode,
1006 * but not have it entered in the parent directory. The entry is
1007 * made later after writing "." and ".." entries.
1008 */
1009 if ((error = lfs_valloc(dvp, dmode, cnp->cn_cred, ap->a_vpp)) != 0)
1010 goto out;
1011
1012 tvp = *ap->a_vpp;
1013 ip = VTOI(tvp);
1014
1015 ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
1016 DIP_ASSIGN(ip, uid, ip->i_uid);
1017 ip->i_gid = dp->i_gid;
1018 DIP_ASSIGN(ip, gid, ip->i_gid);
1019 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
1020 if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) {
1021 lfs_vfree(tvp, ip->i_number, dmode);
1022 fstrans_done(dvp->v_mount);
1023 vput(tvp);
1024 return (error);
1025 }
1026 #endif
1027 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
1028 ip->i_mode = dmode;
1029 DIP_ASSIGN(ip, mode, dmode);
1030 tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */
1031 ip->i_nlink = 2;
1032 DIP_ASSIGN(ip, nlink, 2);
1033 if (cnp->cn_flags & ISWHITEOUT) {
1034 ip->i_flags |= UF_OPAQUE;
1035 DIP_ASSIGN(ip, flags, ip->i_flags);
1036 }
1037
1038 /*
1039 * Bump link count in parent directory to reflect work done below.
1040 * Should be done before reference is created so cleanup is
1041 * possible if we crash.
1042 */
1043 dp->i_nlink++;
1044 DIP_ASSIGN(dp, nlink, dp->i_nlink);
1045 dp->i_flag |= IN_CHANGE;
1046 if ((error = lfs_update(dvp, NULL, NULL, UPDATE_DIROP)) != 0)
1047 goto bad;
1048
1049 /*
1050 * Initialize directory with "." and ".." from static template.
1051 */
1052 dirtemplate = mastertemplate;
1053 dirtemplate.dotdot_reclen = dirblksiz - dirtemplate.dot_reclen;
1054 dirtemplate.dot_ino = ulfs_rw32(ip->i_number, ULFS_MPNEEDSWAP(fs));
1055 dirtemplate.dotdot_ino = ulfs_rw32(dp->i_number, ULFS_MPNEEDSWAP(fs));
1056 dirtemplate.dot_reclen = ulfs_rw16(dirtemplate.dot_reclen,
1057 ULFS_MPNEEDSWAP(fs));
1058 dirtemplate.dotdot_reclen = ulfs_rw16(dirtemplate.dotdot_reclen,
1059 ULFS_MPNEEDSWAP(fs));
1060 if (fs->um_maxsymlinklen <= 0) {
1061 #if BYTE_ORDER == LITTLE_ENDIAN
1062 if (ULFS_MPNEEDSWAP(fs) == 0)
1063 #else
1064 if (ULFS_MPNEEDSWAP(fs) != 0)
1065 #endif
1066 {
1067 dirtemplate.dot_type = dirtemplate.dot_namlen;
1068 dirtemplate.dotdot_type = dirtemplate.dotdot_namlen;
1069 dirtemplate.dot_namlen = dirtemplate.dotdot_namlen = 0;
1070 } else
1071 dirtemplate.dot_type = dirtemplate.dotdot_type = 0;
1072 }
1073 if ((error = lfs_balloc(tvp, (off_t)0, dirblksiz, cnp->cn_cred,
1074 B_CLRBUF, &bp)) != 0)
1075 goto bad;
1076 ip->i_size = dirblksiz;
1077 DIP_ASSIGN(ip, size, dirblksiz);
1078 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
1079 uvm_vnp_setsize(tvp, ip->i_size);
1080 memcpy((void *)bp->b_data, (void *)&dirtemplate, sizeof dirtemplate);
1081
1082 /*
1083 * Directory set up, now install it's entry in the parent directory.
1084 * We must write out the buffer containing the new directory body
1085 * before entering the new name in the parent.
1086 */
1087 if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0)
1088 goto bad;
1089 if ((error = lfs_update(tvp, NULL, NULL, UPDATE_DIROP)) != 0) {
1090 goto bad;
1091 }
1092 newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK);
1093 ulfs_makedirentry(ip, cnp, newdir);
1094 error = ulfs_direnter(dvp, ulr, tvp, newdir, cnp, bp);
1095 pool_cache_put(ulfs_direct_cache, newdir);
1096 bad:
1097 if (error == 0) {
1098 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
1099 VOP_UNLOCK(tvp);
1100 } else {
1101 dp->i_nlink--;
1102 DIP_ASSIGN(dp, nlink, dp->i_nlink);
1103 dp->i_flag |= IN_CHANGE;
1104 /*
1105 * No need to do an explicit lfs_truncate here, vrele will
1106 * do this for us because we set the link count to 0.
1107 */
1108 ip->i_nlink = 0;
1109 DIP_ASSIGN(ip, nlink, 0);
1110 ip->i_flag |= IN_CHANGE;
1111 /* If IN_ADIROP, account for it */
1112 lfs_unmark_vnode(tvp);
1113 vput(tvp);
1114 }
1115 out:
1116 fstrans_done(dvp->v_mount);
1117 return (error);
1118 }
1119
1120 int
1121 lfs_mkdir(void *v)
1122 {
1123 struct vop_mkdir_v3_args /* {
1124 struct vnode *a_dvp;
1125 struct vnode **a_vpp;
1126 struct componentname *a_cnp;
1127 struct vattr *a_vap;
1128 } */ *ap = v;
1129 struct lfs *fs;
1130 struct vnode *dvp, **vpp;
1131 int error;
1132
1133 dvp = ap->a_dvp;
1134 vpp = ap->a_vpp;
1135
1136 KASSERT(vpp != NULL);
1137 KASSERT(*vpp == NULL);
1138
1139 fs = VFSTOULFS(dvp->v_mount)->um_lfs;
1140 ASSERT_NO_SEGLOCK(fs);
1141 if (fs->lfs_ronly) {
1142 return EROFS;
1143 }
1144
1145 /*
1146 * Get a new vnode *before* adjusting the dirop count, to
1147 * avoid a deadlock in getnewvnode(), if we have a stacked
1148 * filesystem mounted on top of us.
1149 *
1150 * NB: this means we have to destroy the new vnode on error.
1151 */
1152
1153 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp);
1154 if (error) {
1155 DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error));
1156 return error;
1157 }
1158 error = lfs_set_dirop(dvp, NULL);
1159 if (error) {
1160 ungetnewvnode(*vpp);
1161 *vpp = NULL;
1162 return error;
1163 }
1164
1165 error = ulfs_mkdir(ap);
1166
1167 UNMARK_VNODE(dvp);
1168 UNMARK_VNODE(*vpp);
1169 if (!((*vpp)->v_uflag & VU_DIROP)) {
1170 KASSERT(error != 0);
1171 ungetnewvnode(*vpp);
1172 *vpp = NULL;
1173 }
1174 else {
1175 KASSERT(error == 0);
1176 }
1177 lfs_unset_dirop(fs, dvp, "mkdir");
1178
1179 vrele(dvp);
1180 return (error);
1181 }
1182
1183 int
1184 lfs_remove(void *v)
1185 {
1186 struct vop_remove_args /* {
1187 struct vnode *a_dvp;
1188 struct vnode *a_vp;
1189 struct componentname *a_cnp;
1190 } */ *ap = v;
1191 struct vnode *dvp, *vp;
1192 struct inode *ip;
1193 int error;
1194
1195 dvp = ap->a_dvp;
1196 vp = ap->a_vp;
1197 ip = VTOI(vp);
1198 if ((error = lfs_set_dirop(dvp, vp)) != 0) {
1199 if (dvp == vp)
1200 vrele(vp);
1201 else
1202 vput(vp);
1203 vput(dvp);
1204 return error;
1205 }
1206 error = ulfs_remove(ap);
1207 if (ip->i_nlink == 0)
1208 lfs_orphan(ip->i_lfs, ip->i_number);
1209
1210 UNMARK_VNODE(dvp);
1211 if (ap->a_vp) {
1212 UNMARK_VNODE(ap->a_vp);
1213 }
1214 lfs_unset_dirop(ip->i_lfs, dvp, "remove");
1215 vrele(dvp);
1216 if (ap->a_vp) {
1217 vrele(ap->a_vp);
1218 }
1219
1220 return (error);
1221 }
1222
1223 int
1224 lfs_rmdir(void *v)
1225 {
1226 struct vop_rmdir_args /* {
1227 struct vnodeop_desc *a_desc;
1228 struct vnode *a_dvp;
1229 struct vnode *a_vp;
1230 struct componentname *a_cnp;
1231 } */ *ap = v;
1232 struct vnode *vp;
1233 struct inode *ip;
1234 int error;
1235
1236 vp = ap->a_vp;
1237 ip = VTOI(vp);
1238 if ((error = lfs_set_dirop(ap->a_dvp, ap->a_vp)) != 0) {
1239 if (ap->a_dvp == vp)
1240 vrele(ap->a_dvp);
1241 else
1242 vput(ap->a_dvp);
1243 vput(vp);
1244 return error;
1245 }
1246 error = ulfs_rmdir(ap);
1247 if (ip->i_nlink == 0)
1248 lfs_orphan(ip->i_lfs, ip->i_number);
1249
1250 UNMARK_VNODE(ap->a_dvp);
1251 if (ap->a_vp) {
1252 UNMARK_VNODE(ap->a_vp);
1253 }
1254 lfs_unset_dirop(ip->i_lfs, ap->a_dvp, "rmdir");
1255 vrele(ap->a_dvp);
1256 if (ap->a_vp) {
1257 vrele(ap->a_vp);
1258 }
1259
1260 return (error);
1261 }
1262
1263 int
1264 lfs_link(void *v)
1265 {
1266 struct vop_link_args /* {
1267 struct vnode *a_dvp;
1268 struct vnode *a_vp;
1269 struct componentname *a_cnp;
1270 } */ *ap = v;
1271 struct lfs *fs;
1272 struct vnode *dvp;
1273 int error;
1274
1275 dvp = ap->a_dvp;
1276
1277 fs = VFSTOULFS(dvp->v_mount)->um_lfs;
1278 ASSERT_NO_SEGLOCK(fs);
1279 if (fs->lfs_ronly) {
1280 return EROFS;
1281 }
1282
1283 error = lfs_set_dirop(dvp, NULL);
1284 if (error) {
1285 /*
1286 * XXX dholland 20140515 this was here before but must
1287 * be wrong.
1288 */
1289 vput(dvp);
1290
1291 return error;
1292 }
1293
1294 error = ulfs_link(ap);
1295
1296 UNMARK_VNODE(dvp);
1297 lfs_unset_dirop(fs, dvp, "link");
1298 vrele(dvp);
1299
1300 return (error);
1301 }
1302
1303 /* XXX hack to avoid calling ITIMES in getattr */
1304 int
1305 lfs_getattr(void *v)
1306 {
1307 struct vop_getattr_args /* {
1308 struct vnode *a_vp;
1309 struct vattr *a_vap;
1310 kauth_cred_t a_cred;
1311 } */ *ap = v;
1312 struct vnode *vp = ap->a_vp;
1313 struct inode *ip = VTOI(vp);
1314 struct vattr *vap = ap->a_vap;
1315 struct lfs *fs = ip->i_lfs;
1316
1317 fstrans_start(vp->v_mount, FSTRANS_SHARED);
1318 /*
1319 * Copy from inode table
1320 */
1321 vap->va_fsid = ip->i_dev;
1322 vap->va_fileid = ip->i_number;
1323 vap->va_mode = ip->i_mode & ~LFS_IFMT;
1324 vap->va_nlink = ip->i_nlink;
1325 vap->va_uid = ip->i_uid;
1326 vap->va_gid = ip->i_gid;
1327 vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
1328 vap->va_size = vp->v_size;
1329 vap->va_atime.tv_sec = ip->i_ffs1_atime;
1330 vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
1331 vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
1332 vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
1333 vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
1334 vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
1335 vap->va_flags = ip->i_flags;
1336 vap->va_gen = ip->i_gen;
1337 /* this doesn't belong here */
1338 if (vp->v_type == VBLK)
1339 vap->va_blocksize = BLKDEV_IOSIZE;
1340 else if (vp->v_type == VCHR)
1341 vap->va_blocksize = MAXBSIZE;
1342 else
1343 vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
1344 vap->va_bytes = lfs_fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
1345 vap->va_type = vp->v_type;
1346 vap->va_filerev = ip->i_modrev;
1347 fstrans_done(vp->v_mount);
1348 return (0);
1349 }
1350
1351 /*
1352 * Check to make sure the inode blocks won't choke the buffer
1353 * cache, then call ulfs_setattr as usual.
1354 */
1355 int
1356 lfs_setattr(void *v)
1357 {
1358 struct vop_setattr_args /* {
1359 struct vnode *a_vp;
1360 struct vattr *a_vap;
1361 kauth_cred_t a_cred;
1362 } */ *ap = v;
1363 struct vnode *vp = ap->a_vp;
1364
1365 lfs_check(vp, LFS_UNUSED_LBN, 0);
1366 return ulfs_setattr(v);
1367 }
1368
1369 /*
1370 * Release the block we hold on lfs_newseg wrapping. Called on file close,
1371 * or explicitly from LFCNWRAPGO. Called with the interlock held.
1372 */
1373 static int
1374 lfs_wrapgo(struct lfs *fs, struct inode *ip, int waitfor)
1375 {
1376 if (fs->lfs_stoplwp != curlwp)
1377 return EBUSY;
1378
1379 fs->lfs_stoplwp = NULL;
1380 cv_signal(&fs->lfs_stopcv);
1381
1382 KASSERT(fs->lfs_nowrap > 0);
1383 if (fs->lfs_nowrap <= 0) {
1384 return 0;
1385 }
1386
1387 if (--fs->lfs_nowrap == 0) {
1388 log(LOG_NOTICE, "%s: re-enabled log wrap\n", fs->lfs_fsmnt);
1389 wakeup(&fs->lfs_wrappass);
1390 lfs_wakeup_cleaner(fs);
1391 }
1392 if (waitfor) {
1393 mtsleep(&fs->lfs_nextseg, PCATCH | PUSER, "segment",
1394 0, &lfs_lock);
1395 }
1396
1397 return 0;
1398 }
1399
1400 /*
1401 * Close called.
1402 *
1403 * Update the times on the inode.
1404 */
1405 /* ARGSUSED */
1406 int
1407 lfs_close(void *v)
1408 {
1409 struct vop_close_args /* {
1410 struct vnode *a_vp;
1411 int a_fflag;
1412 kauth_cred_t a_cred;
1413 } */ *ap = v;
1414 struct vnode *vp = ap->a_vp;
1415 struct inode *ip = VTOI(vp);
1416 struct lfs *fs = ip->i_lfs;
1417
1418 if ((ip->i_number == ULFS_ROOTINO || ip->i_number == LFS_IFILE_INUM) &&
1419 fs->lfs_stoplwp == curlwp) {
1420 mutex_enter(&lfs_lock);
1421 log(LOG_NOTICE, "lfs_close: releasing log wrap control\n");
1422 lfs_wrapgo(fs, ip, 0);
1423 mutex_exit(&lfs_lock);
1424 }
1425
1426 if (vp == ip->i_lfs->lfs_ivnode &&
1427 vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
1428 return 0;
1429
1430 fstrans_start(vp->v_mount, FSTRANS_SHARED);
1431 if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
1432 LFS_ITIMES(ip, NULL, NULL, NULL);
1433 }
1434 fstrans_done(vp->v_mount);
1435 return (0);
1436 }
1437
1438 /*
1439 * Close wrapper for special devices.
1440 *
1441 * Update the times on the inode then do device close.
1442 */
1443 int
1444 lfsspec_close(void *v)
1445 {
1446 struct vop_close_args /* {
1447 struct vnode *a_vp;
1448 int a_fflag;
1449 kauth_cred_t a_cred;
1450 } */ *ap = v;
1451 struct vnode *vp;
1452 struct inode *ip;
1453
1454 vp = ap->a_vp;
1455 ip = VTOI(vp);
1456 if (vp->v_usecount > 1) {
1457 LFS_ITIMES(ip, NULL, NULL, NULL);
1458 }
1459 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
1460 }
1461
1462 /*
1463 * Close wrapper for fifo's.
1464 *
1465 * Update the times on the inode then do device close.
1466 */
1467 int
1468 lfsfifo_close(void *v)
1469 {
1470 struct vop_close_args /* {
1471 struct vnode *a_vp;
1472 int a_fflag;
1473 kauth_cred_ a_cred;
1474 } */ *ap = v;
1475 struct vnode *vp;
1476 struct inode *ip;
1477
1478 vp = ap->a_vp;
1479 ip = VTOI(vp);
1480 if (ap->a_vp->v_usecount > 1) {
1481 LFS_ITIMES(ip, NULL, NULL, NULL);
1482 }
1483 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
1484 }
1485
1486 /*
1487 * Reclaim an inode so that it can be used for other purposes.
1488 */
1489
1490 int
1491 lfs_reclaim(void *v)
1492 {
1493 struct vop_reclaim_args /* {
1494 struct vnode *a_vp;
1495 } */ *ap = v;
1496 struct vnode *vp = ap->a_vp;
1497 struct inode *ip = VTOI(vp);
1498 struct lfs *fs = ip->i_lfs;
1499 int error;
1500
1501 /*
1502 * The inode must be freed and updated before being removed
1503 * from its hash chain. Other threads trying to gain a hold
1504 * or lock on the inode will be stalled.
1505 */
1506 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1507 lfs_vfree(vp, ip->i_number, ip->i_omode);
1508
1509 mutex_enter(&lfs_lock);
1510 LFS_CLR_UINO(ip, IN_ALLMOD);
1511 mutex_exit(&lfs_lock);
1512 if ((error = ulfs_reclaim(vp)))
1513 return (error);
1514
1515 /*
1516 * Take us off the paging and/or dirop queues if we were on them.
1517 * We shouldn't be on them.
1518 */
1519 mutex_enter(&lfs_lock);
1520 if (ip->i_flags & IN_PAGING) {
1521 log(LOG_WARNING, "%s: reclaimed vnode is IN_PAGING\n",
1522 fs->lfs_fsmnt);
1523 ip->i_flags &= ~IN_PAGING;
1524 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain);
1525 }
1526 if (vp->v_uflag & VU_DIROP) {
1527 panic("reclaimed vnode is VU_DIROP");
1528 vp->v_uflag &= ~VU_DIROP;
1529 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
1530 }
1531 mutex_exit(&lfs_lock);
1532
1533 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
1534 lfs_deregister_all(vp);
1535 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1536 ip->inode_ext.lfs = NULL;
1537 genfs_node_destroy(vp);
1538 pool_put(&lfs_inode_pool, vp->v_data);
1539 vp->v_data = NULL;
1540 return (0);
1541 }
1542
1543 /*
1544 * Read a block from a storage device.
1545 *
1546 * Calculate the logical to physical mapping if not done already,
1547 * then call the device strategy routine.
1548 *
1549 * In order to avoid reading blocks that are in the process of being
1550 * written by the cleaner---and hence are not mutexed by the normal
1551 * buffer cache / page cache mechanisms---check for collisions before
1552 * reading.
1553 *
1554 * We inline ulfs_strategy to make sure that the VOP_BMAP occurs *before*
1555 * the active cleaner test.
1556 *
1557 * XXX This code assumes that lfs_markv makes synchronous checkpoints.
1558 */
1559 int
1560 lfs_strategy(void *v)
1561 {
1562 struct vop_strategy_args /* {
1563 struct vnode *a_vp;
1564 struct buf *a_bp;
1565 } */ *ap = v;
1566 struct buf *bp;
1567 struct lfs *fs;
1568 struct vnode *vp;
1569 struct inode *ip;
1570 daddr_t tbn;
1571 #define MAXLOOP 25
1572 int i, sn, error, slept, loopcount;
1573
1574 bp = ap->a_bp;
1575 vp = ap->a_vp;
1576 ip = VTOI(vp);
1577 fs = ip->i_lfs;
1578
1579 /* lfs uses its strategy routine only for read */
1580 KASSERT(bp->b_flags & B_READ);
1581
1582 if (vp->v_type == VBLK || vp->v_type == VCHR)
1583 panic("lfs_strategy: spec");
1584 KASSERT(bp->b_bcount != 0);
1585 if (bp->b_blkno == bp->b_lblkno) {
1586 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1587 NULL);
1588 if (error) {
1589 bp->b_error = error;
1590 bp->b_resid = bp->b_bcount;
1591 biodone(bp);
1592 return (error);
1593 }
1594 if ((long)bp->b_blkno == -1) /* no valid data */
1595 clrbuf(bp);
1596 }
1597 if ((long)bp->b_blkno < 0) { /* block is not on disk */
1598 bp->b_resid = bp->b_bcount;
1599 biodone(bp);
1600 return (0);
1601 }
1602
1603 slept = 1;
1604 loopcount = 0;
1605 mutex_enter(&lfs_lock);
1606 while (slept && fs->lfs_seglock) {
1607 mutex_exit(&lfs_lock);
1608 /*
1609 * Look through list of intervals.
1610 * There will only be intervals to look through
1611 * if the cleaner holds the seglock.
1612 * Since the cleaner is synchronous, we can trust
1613 * the list of intervals to be current.
1614 */
1615 tbn = LFS_DBTOFSB(fs, bp->b_blkno);
1616 sn = lfs_dtosn(fs, tbn);
1617 slept = 0;
1618 for (i = 0; i < fs->lfs_cleanind; i++) {
1619 if (sn == lfs_dtosn(fs, fs->lfs_cleanint[i]) &&
1620 tbn >= fs->lfs_cleanint[i]) {
1621 DLOG((DLOG_CLEAN,
1622 "lfs_strategy: ino %d lbn %" PRId64
1623 " ind %d sn %d fsb %" PRIx32
1624 " given sn %d fsb %" PRIx64 "\n",
1625 ip->i_number, bp->b_lblkno, i,
1626 lfs_dtosn(fs, fs->lfs_cleanint[i]),
1627 fs->lfs_cleanint[i], sn, tbn));
1628 DLOG((DLOG_CLEAN,
1629 "lfs_strategy: sleeping on ino %d lbn %"
1630 PRId64 "\n", ip->i_number, bp->b_lblkno));
1631 mutex_enter(&lfs_lock);
1632 if (LFS_SEGLOCK_HELD(fs) && fs->lfs_iocount) {
1633 /*
1634 * Cleaner can't wait for itself.
1635 * Instead, wait for the blocks
1636 * to be written to disk.
1637 * XXX we need pribio in the test
1638 * XXX here.
1639 */
1640 mtsleep(&fs->lfs_iocount,
1641 (PRIBIO + 1) | PNORELOCK,
1642 "clean2", hz/10 + 1,
1643 &lfs_lock);
1644 slept = 1;
1645 ++loopcount;
1646 break;
1647 } else if (fs->lfs_seglock) {
1648 mtsleep(&fs->lfs_seglock,
1649 (PRIBIO + 1) | PNORELOCK,
1650 "clean1", 0,
1651 &lfs_lock);
1652 slept = 1;
1653 break;
1654 }
1655 mutex_exit(&lfs_lock);
1656 }
1657 }
1658 mutex_enter(&lfs_lock);
1659 if (loopcount > MAXLOOP) {
1660 printf("lfs_strategy: breaking out of clean2 loop\n");
1661 break;
1662 }
1663 }
1664 mutex_exit(&lfs_lock);
1665
1666 vp = ip->i_devvp;
1667 return VOP_STRATEGY(vp, bp);
1668 }
1669
1670 /*
1671 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
1672 * Technically this is a checkpoint (the on-disk state is valid)
1673 * even though we are leaving out all the file data.
1674 */
1675 int
1676 lfs_flush_dirops(struct lfs *fs)
1677 {
1678 struct inode *ip, *nip;
1679 struct vnode *vp;
1680 extern int lfs_dostats;
1681 struct segment *sp;
1682 int flags = 0;
1683 int error = 0;
1684
1685 ASSERT_MAYBE_SEGLOCK(fs);
1686 KASSERT(fs->lfs_nadirop == 0);
1687
1688 if (fs->lfs_ronly)
1689 return EROFS;
1690
1691 mutex_enter(&lfs_lock);
1692 if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) {
1693 mutex_exit(&lfs_lock);
1694 return 0;
1695 } else
1696 mutex_exit(&lfs_lock);
1697
1698 if (lfs_dostats)
1699 ++lfs_stats.flush_invoked;
1700
1701 lfs_imtime(fs);
1702 lfs_seglock(fs, flags);
1703 sp = fs->lfs_sp;
1704
1705 /*
1706 * lfs_writevnodes, optimized to get dirops out of the way.
1707 * Only write dirops, and don't flush files' pages, only
1708 * blocks from the directories.
1709 *
1710 * We don't need to vref these files because they are
1711 * dirops and so hold an extra reference until the
1712 * segunlock clears them of that status.
1713 *
1714 * We don't need to check for IN_ADIROP because we know that
1715 * no dirops are active.
1716 *
1717 */
1718 mutex_enter(&lfs_lock);
1719 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
1720 nip = TAILQ_NEXT(ip, i_lfs_dchain);
1721 mutex_exit(&lfs_lock);
1722 vp = ITOV(ip);
1723 mutex_enter(vp->v_interlock);
1724
1725 KASSERT((ip->i_flag & IN_ADIROP) == 0);
1726 KASSERT(vp->v_uflag & VU_DIROP);
1727 KASSERT(vdead_check(vp, VDEAD_NOWAIT) == 0);
1728
1729 /*
1730 * All writes to directories come from dirops; all
1731 * writes to files' direct blocks go through the page
1732 * cache, which we're not touching. Reads to files
1733 * and/or directories will not be affected by writing
1734 * directory blocks inodes and file inodes. So we don't
1735 * really need to lock.
1736 */
1737 if (vdead_check(vp, VDEAD_NOWAIT) != 0) {
1738 mutex_exit(vp->v_interlock);
1739 mutex_enter(&lfs_lock);
1740 continue;
1741 }
1742 mutex_exit(vp->v_interlock);
1743 /* XXX see below
1744 * waslocked = VOP_ISLOCKED(vp);
1745 */
1746 if (vp->v_type != VREG &&
1747 ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
1748 error = lfs_writefile(fs, sp, vp);
1749 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
1750 !(ip->i_flag & IN_ALLMOD)) {
1751 mutex_enter(&lfs_lock);
1752 LFS_SET_UINO(ip, IN_MODIFIED);
1753 mutex_exit(&lfs_lock);
1754 }
1755 if (error && (sp->seg_flags & SEGM_SINGLE)) {
1756 mutex_enter(&lfs_lock);
1757 error = EAGAIN;
1758 break;
1759 }
1760 }
1761 KDASSERT(ip->i_number != LFS_IFILE_INUM);
1762 error = lfs_writeinode(fs, sp, ip);
1763 mutex_enter(&lfs_lock);
1764 if (error && (sp->seg_flags & SEGM_SINGLE)) {
1765 error = EAGAIN;
1766 break;
1767 }
1768
1769 /*
1770 * We might need to update these inodes again,
1771 * for example, if they have data blocks to write.
1772 * Make sure that after this flush, they are still
1773 * marked IN_MODIFIED so that we don't forget to
1774 * write them.
1775 */
1776 /* XXX only for non-directories? --KS */
1777 LFS_SET_UINO(ip, IN_MODIFIED);
1778 }
1779 mutex_exit(&lfs_lock);
1780 /* We've written all the dirops there are */
1781 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
1782 lfs_finalize_fs_seguse(fs);
1783 (void) lfs_writeseg(fs, sp);
1784 lfs_segunlock(fs);
1785
1786 return error;
1787 }
1788
1789 /*
1790 * Flush all vnodes for which the pagedaemon has requested pageouts.
1791 * Skip over any files that are marked VU_DIROP (since lfs_flush_dirop()
1792 * has just run, this would be an error). If we have to skip a vnode
1793 * for any reason, just skip it; if we have to wait for the cleaner,
1794 * abort. The writer daemon will call us again later.
1795 */
1796 int
1797 lfs_flush_pchain(struct lfs *fs)
1798 {
1799 struct inode *ip, *nip;
1800 struct vnode *vp;
1801 extern int lfs_dostats;
1802 struct segment *sp;
1803 int error, error2;
1804
1805 ASSERT_NO_SEGLOCK(fs);
1806
1807 if (fs->lfs_ronly)
1808 return EROFS;
1809
1810 mutex_enter(&lfs_lock);
1811 if (TAILQ_FIRST(&fs->lfs_pchainhd) == NULL) {
1812 mutex_exit(&lfs_lock);
1813 return 0;
1814 } else
1815 mutex_exit(&lfs_lock);
1816
1817 /* Get dirops out of the way */
1818 if ((error = lfs_flush_dirops(fs)) != 0)
1819 return error;
1820
1821 if (lfs_dostats)
1822 ++lfs_stats.flush_invoked;
1823
1824 /*
1825 * Inline lfs_segwrite/lfs_writevnodes, but just for pageouts.
1826 */
1827 lfs_imtime(fs);
1828 lfs_seglock(fs, 0);
1829 sp = fs->lfs_sp;
1830
1831 /*
1832 * lfs_writevnodes, optimized to clear pageout requests.
1833 * Only write non-dirop files that are in the pageout queue.
1834 * We're very conservative about what we write; we want to be
1835 * fast and async.
1836 */
1837 mutex_enter(&lfs_lock);
1838 top:
1839 for (ip = TAILQ_FIRST(&fs->lfs_pchainhd); ip != NULL; ip = nip) {
1840 nip = TAILQ_NEXT(ip, i_lfs_pchain);
1841 vp = ITOV(ip);
1842
1843 if (!(ip->i_flags & IN_PAGING))
1844 goto top;
1845
1846 mutex_enter(vp->v_interlock);
1847 if (vdead_check(vp, VDEAD_NOWAIT) != 0 ||
1848 (vp->v_uflag & VU_DIROP) != 0) {
1849 mutex_exit(vp->v_interlock);
1850 continue;
1851 }
1852 if (vp->v_type != VREG) {
1853 mutex_exit(vp->v_interlock);
1854 continue;
1855 }
1856 if (lfs_vref(vp))
1857 continue;
1858 mutex_exit(&lfs_lock);
1859
1860 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_RETRY) != 0) {
1861 lfs_vunref(vp);
1862 mutex_enter(&lfs_lock);
1863 continue;
1864 }
1865
1866 error = lfs_writefile(fs, sp, vp);
1867 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
1868 !(ip->i_flag & IN_ALLMOD)) {
1869 mutex_enter(&lfs_lock);
1870 LFS_SET_UINO(ip, IN_MODIFIED);
1871 mutex_exit(&lfs_lock);
1872 }
1873 KDASSERT(ip->i_number != LFS_IFILE_INUM);
1874 error2 = lfs_writeinode(fs, sp, ip);
1875
1876 VOP_UNLOCK(vp);
1877 lfs_vunref(vp);
1878
1879 if (error == EAGAIN || error2 == EAGAIN) {
1880 lfs_writeseg(fs, sp);
1881 mutex_enter(&lfs_lock);
1882 break;
1883 }
1884 mutex_enter(&lfs_lock);
1885 }
1886 mutex_exit(&lfs_lock);
1887 (void) lfs_writeseg(fs, sp);
1888 lfs_segunlock(fs);
1889
1890 return 0;
1891 }
1892
1893 /*
1894 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
1895 */
1896 int
1897 lfs_fcntl(void *v)
1898 {
1899 struct vop_fcntl_args /* {
1900 struct vnode *a_vp;
1901 u_int a_command;
1902 void * a_data;
1903 int a_fflag;
1904 kauth_cred_t a_cred;
1905 } */ *ap = v;
1906 struct timeval tv;
1907 struct timeval *tvp;
1908 BLOCK_INFO *blkiov;
1909 CLEANERINFO *cip;
1910 SEGUSE *sup;
1911 int blkcnt, error;
1912 size_t fh_size;
1913 struct lfs_fcntl_markv blkvp;
1914 struct lwp *l;
1915 fsid_t *fsidp;
1916 struct lfs *fs;
1917 struct buf *bp;
1918 fhandle_t *fhp;
1919 daddr_t off;
1920 int oclean;
1921
1922 /* Only respect LFS fcntls on fs root or Ifile */
1923 if (VTOI(ap->a_vp)->i_number != ULFS_ROOTINO &&
1924 VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
1925 return ulfs_fcntl(v);
1926 }
1927
1928 /* Avoid locking a draining lock */
1929 if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
1930 return ESHUTDOWN;
1931 }
1932
1933 /* LFS control and monitoring fcntls are available only to root */
1934 l = curlwp;
1935 if (((ap->a_command & 0xff00) >> 8) == 'L' &&
1936 (error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
1937 KAUTH_REQ_SYSTEM_LFS_FCNTL, NULL, NULL, NULL)) != 0)
1938 return (error);
1939
1940 fs = VTOI(ap->a_vp)->i_lfs;
1941 fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx;
1942
1943 error = 0;
1944 switch ((int)ap->a_command) {
1945 case LFCNSEGWAITALL_COMPAT_50:
1946 case LFCNSEGWAITALL_COMPAT:
1947 fsidp = NULL;
1948 /* FALLSTHROUGH */
1949 case LFCNSEGWAIT_COMPAT_50:
1950 case LFCNSEGWAIT_COMPAT:
1951 {
1952 struct timeval50 *tvp50
1953 = (struct timeval50 *)ap->a_data;
1954 timeval50_to_timeval(tvp50, &tv);
1955 tvp = &tv;
1956 }
1957 goto segwait_common;
1958 case LFCNSEGWAITALL:
1959 fsidp = NULL;
1960 /* FALLSTHROUGH */
1961 case LFCNSEGWAIT:
1962 tvp = (struct timeval *)ap->a_data;
1963 segwait_common:
1964 mutex_enter(&lfs_lock);
1965 ++fs->lfs_sleepers;
1966 mutex_exit(&lfs_lock);
1967
1968 error = lfs_segwait(fsidp, tvp);
1969
1970 mutex_enter(&lfs_lock);
1971 if (--fs->lfs_sleepers == 0)
1972 wakeup(&fs->lfs_sleepers);
1973 mutex_exit(&lfs_lock);
1974 return error;
1975
1976 case LFCNBMAPV:
1977 case LFCNMARKV:
1978 blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
1979
1980 blkcnt = blkvp.blkcnt;
1981 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
1982 return (EINVAL);
1983 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
1984 if ((error = copyin(blkvp.blkiov, blkiov,
1985 blkcnt * sizeof(BLOCK_INFO))) != 0) {
1986 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
1987 return error;
1988 }
1989
1990 mutex_enter(&lfs_lock);
1991 ++fs->lfs_sleepers;
1992 mutex_exit(&lfs_lock);
1993 if (ap->a_command == LFCNBMAPV)
1994 error = lfs_bmapv(l->l_proc, fsidp, blkiov, blkcnt);
1995 else /* LFCNMARKV */
1996 error = lfs_markv(l->l_proc, fsidp, blkiov, blkcnt);
1997 if (error == 0)
1998 error = copyout(blkiov, blkvp.blkiov,
1999 blkcnt * sizeof(BLOCK_INFO));
2000 mutex_enter(&lfs_lock);
2001 if (--fs->lfs_sleepers == 0)
2002 wakeup(&fs->lfs_sleepers);
2003 mutex_exit(&lfs_lock);
2004 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
2005 return error;
2006
2007 case LFCNRECLAIM:
2008 /*
2009 * Flush dirops and write Ifile, allowing empty segments
2010 * to be immediately reclaimed.
2011 */
2012 lfs_writer_enter(fs, "pndirop");
2013 off = fs->lfs_offset;
2014 lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
2015 lfs_flush_dirops(fs);
2016 LFS_CLEANERINFO(cip, fs, bp);
2017 oclean = cip->clean;
2018 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
2019 lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
2020 fs->lfs_sp->seg_flags |= SEGM_PROT;
2021 lfs_segunlock(fs);
2022 lfs_writer_leave(fs);
2023
2024 #ifdef DEBUG
2025 LFS_CLEANERINFO(cip, fs, bp);
2026 DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64
2027 " blocks, cleaned %" PRId32 " segments (activesb %d)\n",
2028 fs->lfs_offset - off, cip->clean - oclean,
2029 fs->lfs_activesb));
2030 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
2031 #else
2032 __USE(oclean);
2033 __USE(off);
2034 #endif
2035
2036 return 0;
2037
2038 case LFCNIFILEFH_COMPAT:
2039 /* Return the filehandle of the Ifile */
2040 if ((error = kauth_authorize_system(l->l_cred,
2041 KAUTH_SYSTEM_FILEHANDLE, 0, NULL, NULL, NULL)) != 0)
2042 return (error);
2043 fhp = (struct fhandle *)ap->a_data;
2044 fhp->fh_fsid = *fsidp;
2045 fh_size = 16; /* former VFS_MAXFIDSIZ */
2046 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size);
2047
2048 case LFCNIFILEFH_COMPAT2:
2049 case LFCNIFILEFH:
2050 /* Return the filehandle of the Ifile */
2051 fhp = (struct fhandle *)ap->a_data;
2052 fhp->fh_fsid = *fsidp;
2053 fh_size = sizeof(struct lfs_fhandle) -
2054 offsetof(fhandle_t, fh_fid);
2055 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size);
2056
2057 case LFCNREWIND:
2058 /* Move lfs_offset to the lowest-numbered segment */
2059 return lfs_rewind(fs, *(int *)ap->a_data);
2060
2061 case LFCNINVAL:
2062 /* Mark a segment SEGUSE_INVAL */
2063 LFS_SEGENTRY(sup, fs, *(int *)ap->a_data, bp);
2064 if (sup->su_nbytes > 0) {
2065 brelse(bp, 0);
2066 lfs_unset_inval_all(fs);
2067 return EBUSY;
2068 }
2069 sup->su_flags |= SEGUSE_INVAL;
2070 VOP_BWRITE(bp->b_vp, bp);
2071 return 0;
2072
2073 case LFCNRESIZE:
2074 /* Resize the filesystem */
2075 return lfs_resize_fs(fs, *(int *)ap->a_data);
2076
2077 case LFCNWRAPSTOP:
2078 case LFCNWRAPSTOP_COMPAT:
2079 /*
2080 * Hold lfs_newseg at segment 0; if requested, sleep until
2081 * the filesystem wraps around. To support external agents
2082 * (dump, fsck-based regression test) that need to look at
2083 * a snapshot of the filesystem, without necessarily
2084 * requiring that all fs activity stops.
2085 */
2086 if (fs->lfs_stoplwp == curlwp)
2087 return EALREADY;
2088
2089 mutex_enter(&lfs_lock);
2090 while (fs->lfs_stoplwp != NULL)
2091 cv_wait(&fs->lfs_stopcv, &lfs_lock);
2092 fs->lfs_stoplwp = curlwp;
2093 if (fs->lfs_nowrap == 0)
2094 log(LOG_NOTICE, "%s: disabled log wrap\n", fs->lfs_fsmnt);
2095 ++fs->lfs_nowrap;
2096 if (*(int *)ap->a_data == 1
2097 || ap->a_command == LFCNWRAPSTOP_COMPAT) {
2098 log(LOG_NOTICE, "LFCNSTOPWRAP waiting for log wrap\n");
2099 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER,
2100 "segwrap", 0, &lfs_lock);
2101 log(LOG_NOTICE, "LFCNSTOPWRAP done waiting\n");
2102 if (error) {
2103 lfs_wrapgo(fs, VTOI(ap->a_vp), 0);
2104 }
2105 }
2106 mutex_exit(&lfs_lock);
2107 return 0;
2108
2109 case LFCNWRAPGO:
2110 case LFCNWRAPGO_COMPAT:
2111 /*
2112 * Having done its work, the agent wakes up the writer.
2113 * If the argument is 1, it sleeps until a new segment
2114 * is selected.
2115 */
2116 mutex_enter(&lfs_lock);
2117 error = lfs_wrapgo(fs, VTOI(ap->a_vp),
2118 ap->a_command == LFCNWRAPGO_COMPAT ? 1 :
2119 *((int *)ap->a_data));
2120 mutex_exit(&lfs_lock);
2121 return error;
2122
2123 case LFCNWRAPPASS:
2124 if ((VTOI(ap->a_vp)->i_lfs_iflags & LFSI_WRAPWAIT))
2125 return EALREADY;
2126 mutex_enter(&lfs_lock);
2127 if (fs->lfs_stoplwp != curlwp) {
2128 mutex_exit(&lfs_lock);
2129 return EALREADY;
2130 }
2131 if (fs->lfs_nowrap == 0) {
2132 mutex_exit(&lfs_lock);
2133 return EBUSY;
2134 }
2135 fs->lfs_wrappass = 1;
2136 wakeup(&fs->lfs_wrappass);
2137 /* Wait for the log to wrap, if asked */
2138 if (*(int *)ap->a_data) {
2139 mutex_enter(ap->a_vp->v_interlock);
2140 if (lfs_vref(ap->a_vp) != 0)
2141 panic("LFCNWRAPPASS: lfs_vref failed");
2142 VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT;
2143 log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n");
2144 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER,
2145 "segwrap", 0, &lfs_lock);
2146 log(LOG_NOTICE, "LFCNPASS done waiting\n");
2147 VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT;
2148 lfs_vunref(ap->a_vp);
2149 }
2150 mutex_exit(&lfs_lock);
2151 return error;
2152
2153 case LFCNWRAPSTATUS:
2154 mutex_enter(&lfs_lock);
2155 *(int *)ap->a_data = fs->lfs_wrapstatus;
2156 mutex_exit(&lfs_lock);
2157 return 0;
2158
2159 default:
2160 return ulfs_fcntl(v);
2161 }
2162 return 0;
2163 }
2164
2165 /*
2166 * Return the last logical file offset that should be written for this file
2167 * if we're doing a write that ends at "size". If writing, we need to know
2168 * about sizes on disk, i.e. fragments if there are any; if reading, we need
2169 * to know about entire blocks.
2170 */
2171 void
2172 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
2173 {
2174 struct inode *ip = VTOI(vp);
2175 struct lfs *fs = ip->i_lfs;
2176 daddr_t olbn, nlbn;
2177
2178 olbn = lfs_lblkno(fs, ip->i_size);
2179 nlbn = lfs_lblkno(fs, size);
2180 if (!(flags & GOP_SIZE_MEM) && nlbn < ULFS_NDADDR && olbn <= nlbn) {
2181 *eobp = lfs_fragroundup(fs, size);
2182 } else {
2183 *eobp = lfs_blkroundup(fs, size);
2184 }
2185 }
2186
2187 #ifdef DEBUG
2188 void lfs_dump_vop(void *);
2189
2190 void
2191 lfs_dump_vop(void *v)
2192 {
2193 struct vop_putpages_args /* {
2194 struct vnode *a_vp;
2195 voff_t a_offlo;
2196 voff_t a_offhi;
2197 int a_flags;
2198 } */ *ap = v;
2199
2200 #ifdef DDB
2201 vfs_vnode_print(ap->a_vp, 0, printf);
2202 #endif
2203 lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
2204 }
2205 #endif
2206
2207 int
2208 lfs_mmap(void *v)
2209 {
2210 struct vop_mmap_args /* {
2211 const struct vnodeop_desc *a_desc;
2212 struct vnode *a_vp;
2213 vm_prot_t a_prot;
2214 kauth_cred_t a_cred;
2215 } */ *ap = v;
2216
2217 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
2218 return EOPNOTSUPP;
2219 return ulfs_mmap(v);
2220 }
2221
2222 static int
2223 lfs_openextattr(void *v)
2224 {
2225 struct vop_openextattr_args /* {
2226 struct vnode *a_vp;
2227 kauth_cred_t a_cred;
2228 struct proc *a_p;
2229 } */ *ap = v;
2230 struct inode *ip = VTOI(ap->a_vp);
2231 struct ulfsmount *ump = ip->i_ump;
2232 //struct lfs *fs = ip->i_lfs;
2233
2234 /* Not supported for ULFS1 file systems. */
2235 if (ump->um_fstype == ULFS1)
2236 return (EOPNOTSUPP);
2237
2238 /* XXX Not implemented for ULFS2 file systems. */
2239 return (EOPNOTSUPP);
2240 }
2241
2242 static int
2243 lfs_closeextattr(void *v)
2244 {
2245 struct vop_closeextattr_args /* {
2246 struct vnode *a_vp;
2247 int a_commit;
2248 kauth_cred_t a_cred;
2249 struct proc *a_p;
2250 } */ *ap = v;
2251 struct inode *ip = VTOI(ap->a_vp);
2252 struct ulfsmount *ump = ip->i_ump;
2253 //struct lfs *fs = ip->i_lfs;
2254
2255 /* Not supported for ULFS1 file systems. */
2256 if (ump->um_fstype == ULFS1)
2257 return (EOPNOTSUPP);
2258
2259 /* XXX Not implemented for ULFS2 file systems. */
2260 return (EOPNOTSUPP);
2261 }
2262
2263 static int
2264 lfs_getextattr(void *v)
2265 {
2266 struct vop_getextattr_args /* {
2267 struct vnode *a_vp;
2268 int a_attrnamespace;
2269 const char *a_name;
2270 struct uio *a_uio;
2271 size_t *a_size;
2272 kauth_cred_t a_cred;
2273 struct proc *a_p;
2274 } */ *ap = v;
2275 struct vnode *vp = ap->a_vp;
2276 struct inode *ip = VTOI(vp);
2277 struct ulfsmount *ump = ip->i_ump;
2278 //struct lfs *fs = ip->i_lfs;
2279 int error;
2280
2281 if (ump->um_fstype == ULFS1) {
2282 #ifdef LFS_EXTATTR
2283 fstrans_start(vp->v_mount, FSTRANS_SHARED);
2284 error = ulfs_getextattr(ap);
2285 fstrans_done(vp->v_mount);
2286 #else
2287 error = EOPNOTSUPP;
2288 #endif
2289 return error;
2290 }
2291
2292 /* XXX Not implemented for ULFS2 file systems. */
2293 return (EOPNOTSUPP);
2294 }
2295
2296 static int
2297 lfs_setextattr(void *v)
2298 {
2299 struct vop_setextattr_args /* {
2300 struct vnode *a_vp;
2301 int a_attrnamespace;
2302 const char *a_name;
2303 struct uio *a_uio;
2304 kauth_cred_t a_cred;
2305 struct proc *a_p;
2306 } */ *ap = v;
2307 struct vnode *vp = ap->a_vp;
2308 struct inode *ip = VTOI(vp);
2309 struct ulfsmount *ump = ip->i_ump;
2310 //struct lfs *fs = ip->i_lfs;
2311 int error;
2312
2313 if (ump->um_fstype == ULFS1) {
2314 #ifdef LFS_EXTATTR
2315 fstrans_start(vp->v_mount, FSTRANS_SHARED);
2316 error = ulfs_setextattr(ap);
2317 fstrans_done(vp->v_mount);
2318 #else
2319 error = EOPNOTSUPP;
2320 #endif
2321 return error;
2322 }
2323
2324 /* XXX Not implemented for ULFS2 file systems. */
2325 return (EOPNOTSUPP);
2326 }
2327
2328 static int
2329 lfs_listextattr(void *v)
2330 {
2331 struct vop_listextattr_args /* {
2332 struct vnode *a_vp;
2333 int a_attrnamespace;
2334 struct uio *a_uio;
2335 size_t *a_size;
2336 kauth_cred_t a_cred;
2337 struct proc *a_p;
2338 } */ *ap = v;
2339 struct vnode *vp = ap->a_vp;
2340 struct inode *ip = VTOI(vp);
2341 struct ulfsmount *ump = ip->i_ump;
2342 //struct lfs *fs = ip->i_lfs;
2343 int error;
2344
2345 if (ump->um_fstype == ULFS1) {
2346 #ifdef LFS_EXTATTR
2347 fstrans_start(vp->v_mount, FSTRANS_SHARED);
2348 error = ulfs_listextattr(ap);
2349 fstrans_done(vp->v_mount);
2350 #else
2351 error = EOPNOTSUPP;
2352 #endif
2353 return error;
2354 }
2355
2356 /* XXX Not implemented for ULFS2 file systems. */
2357 return (EOPNOTSUPP);
2358 }
2359
2360 static int
2361 lfs_deleteextattr(void *v)
2362 {
2363 struct vop_deleteextattr_args /* {
2364 struct vnode *a_vp;
2365 int a_attrnamespace;
2366 kauth_cred_t a_cred;
2367 struct proc *a_p;
2368 } */ *ap = v;
2369 struct vnode *vp = ap->a_vp;
2370 struct inode *ip = VTOI(vp);
2371 struct ulfsmount *ump = ip->i_ump;
2372 //struct fs *fs = ip->i_lfs;
2373 int error;
2374
2375 if (ump->um_fstype == ULFS1) {
2376 #ifdef LFS_EXTATTR
2377 fstrans_start(vp->v_mount, FSTRANS_SHARED);
2378 error = ulfs_deleteextattr(ap);
2379 fstrans_done(vp->v_mount);
2380 #else
2381 error = EOPNOTSUPP;
2382 #endif
2383 return error;
2384 }
2385
2386 /* XXX Not implemented for ULFS2 file systems. */
2387 return (EOPNOTSUPP);
2388 }
2389