/src/sys/arch/m68k/m68k/ |
copypage.s | 55 movw #PAGE_SIZE/32-1,%d0 | number of 32 byte chunks - 1 72 movw #PAGE_SIZE/32-1,%d0 | number of 32 byte chunks - 1 84 movw #PAGE_SIZE/4-1,%d0 | number of 4 byte chunks - 1 99 movql #PAGE_SIZE/256-1,%d0 | number of 256 byte chunks - 1 122 movw #PAGE_SIZE/4-1,%d0 | number of 4 byte chunks - 1
|
/src/usr.bin/split/ |
split.c | 80 off_t chunks = 0; /* Number of chunks to split into. */ local in function:main 126 case 'n': /* Chunks. */ 128 (chunks = (size_t)strtoul(optarg, &ep, 10)) == 0 || 130 errx(EXIT_FAILURE, "%s: illegal number of chunks.", optarg); 156 else if (bytecnt || chunks) 159 if (bytecnt && chunks) 164 else if (chunks) 165 split3(chunks); 289 * Split the input into specified number of chunks [all...] |
/src/sys/external/bsd/drm/dist/shared-core/ |
radeon_cs.c | 163 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); 176 parser.chunks = drm_calloc(parser.num_chunks, sizeof(struct drm_radeon_kernel_chunk), DRM_MEM_DRIVER); 177 if (!parser.chunks) { 191 parser.chunks[i].chunk_id = user_chunk.chunk_id; 193 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) 196 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_IB) 199 if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_OLD) { 204 parser.chunks[i].length_dw = user_chunk.length_dw; 205 parser.chunks[i].chunk_data = (uint32_t *)(unsigned long)user_chunk.chunk_data; 207 parser.chunks[i].kdata = NULL [all...] |
radeon_drm.h | 775 uint64_t chunks; /* this points to uint64_t * which point to member in struct:drm_radeon_cs 776 cs chunks */
|
radeon_drv.h | 255 struct drm_radeon_kernel_chunk *chunks; member in struct:drm_radeon_cs_parser
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_cs.c | 299 /* get chunks */ 311 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); 318 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 319 if (p->chunks == NULL) { 332 p->chunks[i].length_dw = user_chunk.length_dw; 334 p->chunk_relocs = &p->chunks[i]; 337 p->chunk_ib = &p->chunks[i]; 339 if (p->chunks[i].length_dw == 0) 343 p->chunk_const_ib = &p->chunks[i]; 345 if (p->chunks[i].length_dw == 0 [all...] |
radeon.h | 1108 /* chunks */ 1110 struct radeon_cs_chunk *chunks; member in struct:radeon_cs_parser 1120 /* indices of various chunks */
|
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/ |
sanitizer_allocator_secondary.h | 17 // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total 18 // allocated chunks. To be used in memory constrained or not memory hungry cases 29 // Much less restricted LargeMmapAllocator chunks list (comparing to 30 // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks. 67 // This class can (de)allocate only large chunks of memory using mmap/unmap. 207 Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_); local in function:LargeMmapAllocator::EnsureSortedChunks 208 Sort(reinterpret_cast<uptr *>(chunks), n_chunks_); 210 AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i; 240 // There are 2 chunks left, choose one. 274 // Iterate over all existing chunks 278 const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); local in function:LargeMmapAllocator::ForEachChunk [all...] |
sanitizer_allocator_local_cache.h | 50 CompactPtrT chunk = c->chunks[--c->count]; 68 c->chunks[c->count++] = chunk; 89 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint]; member in struct:SizeClassAllocator64LocalCache::PerClass 110 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, 123 &c->chunks[first_idx_to_drain], count); 229 // of the chunks. If using a separate size class, it will always be
|
sanitizer_allocator_primary64.h | 35 // FreeArray is an array free-d chunks (stored as 4-byte offsets) 111 const CompactPtrT *chunks, uptr n_chunks) { 129 free_array[old_num_chunks + i] = chunks[i]; 137 CompactPtrT *chunks, uptr n_chunks) { 152 chunks[i] = free_array[base_idx + i]; 291 // Iterate over all existing chunks. 445 // chunks only and returns these pages back to OS. 455 // Figure out the number of chunks per page and whether we can take a fast 456 // path (the number of chunks per page is the same for all pages). 460 // Same number of chunks per page, no cross overs [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_cs.c | 143 /* get chunks */ 144 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 152 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 154 if (!p->chunks) { 171 p->chunks[i].chunk_id = user_chunk.chunk_id; 172 p->chunks[i].length_dw = user_chunk.length_dw; 174 size = p->chunks[i].length_dw; 177 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 178 if (p->chunks[i].kdata == NULL) { 184 if (copy_from_user(p->chunks[i].kdata, cdata, size)) [all...] |
amdgpu.h | 487 /* chunks */ 489 struct amdgpu_cs_chunk *chunks; member in struct:amdgpu_cs_parser
|
/src/lib/libc/stdlib/ |
jemalloc.c | 44 * + Memory is managed in chunks and runs (chunks can be split into runs), 93 * Huge : Each allocation is backed by a dedicated contiguous set of chunks. 302 * Size and alignment of memory chunks that are allocated by the OS's virtual 413 /* Number of chunks that were allocated. */ 416 /* High-water mark for number of chunks allocated. */ 420 * Current number of chunks allocated. This value isn't maintained for 434 /* Tree of chunks. */ 496 * detection of empty chunks fast. 576 * almost-empty chunks 617 rb_tree_t chunks; member in struct:arena_s [all...] |
/src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/ |
intel-ixp4xx.dtsi | 65 * done in 4 chunks of 16MB each.
|
/src/share/mk/ |
bsd.lua.mk | 21 # and installed as precompiled chunks for faster loading. Note
|
/src/sys/arch/arm/arm32/ |
arm32_kvminit.c | 530 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11]; local in function:arm32_kernel_vm_init 552 for (size_t i = 0; i < __arraycount(chunks); i++) { 553 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 810 VPRINTF("Listing Chunks\n"); 819 VPRINTF("\nMapping Chunks\n");
|
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/tests/ |
sanitizer_allocator_test.cc | 210 // Allocate a bunch of chunks. 459 uint32_t chunks[kNumChunks]; local in function:TEST 460 a->GetFromAllocator(&stats, 30, chunks, kNumChunks); 531 uint32_t chunks[kNumChunks]; local in function:TEST 534 if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) { 921 // Allocate a bunch of chunks. 1049 // In a world where regions are small and chunks are huge... 1299 // Verify that there are no released pages touched by used chunks and all 1300 // ranges of free chunks big enough to contain the entire memory pages had 1331 // chunks were released [all...] |
/src/sys/arch/sparc64/sparc64/ |
copy.S | 113 ! XXX should do this in bigger chunks when possible 160 ! XXX should do this in bigger chunks when possible
|
/src/sys/external/bsd/drm2/dist/include/uapi/drm/ |
amdgpu_drm.h | 560 /** this points to __u64 * which point to cs chunks */ 561 __u64 chunks; member in struct:drm_amdgpu_cs_in
|
radeon_drm.h | 988 /* this points to __u64 * which point to cs chunks */ 989 __u64 chunks; member in struct:drm_radeon_cs
|
/src/sys/arch/m68k/060sp/dist/ |
isp.s | 3233 # operands. A misaligned operand must be written in aligned chunks or # 3850 # operand. A misaligned operand must be written in aligned chunks or #
|
/src/sys/arch/sparc/sparc/ |
locore.s | 4714 ! XXX should do this in bigger chunks when possible
|
/src/sys/external/isc/libsodium/dist/m4/ |
libtool.m4 | 298 # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
|