/src/sys/ufs/ext2fs/ |
ext2fs_bmap.c | 132 ext4_bmapext(struct vnode *vp, int32_t bn, int64_t *bnp, int *runp, int *runb) 146 if (runp != NULL) 147 *runp = 0; 155 if (runp != NULL) 156 *runp = path.ep_sparse_ext.e_len - 172 if (runp != NULL) 173 *runp = ep->e_len - (lbn - ep->e_blk) - 1; 204 int *nump, int *runp) 223 if (runp) { 230 *runp = 0 [all...] |
/src/sys/ufs/lfs/ |
ulfs_bmap.c | 136 int *nump, int *runp, ulfs_issequential_callback_t is_sequential) 155 if (runp) { 162 *runp = 0; 195 } else if (runp) { 197 for (++bn; bn < ULFS_NDADDR && *runp < maxrun && 203 ++bn, ++*runp); 205 for (++bn; bn < ULFS_NDADDR && *runp < maxrun && 211 ++bn, ++*runp); 290 if (num == 1 && daddr && runp) { 292 bn < MNINDIR(fs) && *runp < maxrun & [all...] |
/src/sys/ufs/ufs/ |
ufs_bmap.c | 116 int *nump, int *runp, ufs_issequential_callback_t is_sequential) 133 if (runp) { 140 *runp = 0; 173 } else if (runp) { 175 for (++bn; bn < UFS_NDADDR && *runp < maxrun && 181 ++bn, ++*runp); 183 for (++bn; bn < UFS_NDADDR && *runp < maxrun && 189 ++bn, ++*runp); 274 if (num == 1 && daddr && runp) { 276 bn < MNINDIR(ump) && *runp < maxrun & [all...] |
/src/usr.bin/netstat/ |
unix.c | 107 struct unpcb unp, runp; local in function:unixdomainpr 121 kread((u_long)unp.unp_conn, (char *)&runp, sizeof (runp)) == 0 && 122 runp.unp_addr && 123 kread((u_long)runp.unp_addr, (char *)&rsun, sizeof (rsun)) == 0 &&
|
/src/sys/rump/librump/rumpvfs/ |
rumpvnode_if.c | 572 int *runp) 577 error = VOP_BMAP(vp, bn, vpp, bnp, runp);
|
/src/sys/fs/nilfs/ |
nilfs_vnops.c | 314 * If runp is not NULL, the number of contiguous blocks __starting from the 315 * next block after the queried block__ will be returned in runp. 332 int *runp = ap->a_runp; local in function:nilfs_trivial_bmap 368 if (runp) 369 *runp = run;
|
/src/sys/fs/udf/ |
udf_vnops.c | 443 int *runp = ap->a_runp; local in function:udf_trivial_bmap 459 if (runp) 460 *runp = MAXPHYS / lb_size; /* or with -1 ? */
|
/src/sys/kern/ |
vnode_if.c | 1686 int *runp) 1697 a.a_runp = runp;
|