/src/sys/rump/net/lib/libshmif/ |
shmif_busops.c | 66 size_t chunk; local in function:shmif_busread 69 chunk = MIN(len, BUSMEM_DATASIZE - off); 70 memcpy(dest, busmem->shm_data + off, chunk); 71 len -= chunk; 73 if (off + chunk == BUSMEM_DATASIZE) 77 return (off + chunk) % BUSMEM_DATASIZE; 81 memcpy((uint8_t *)dest + chunk, busmem->shm_data, len); 101 size_t chunk; local in function:shmif_buswrite 106 chunk = MIN(len, BUSMEM_DATASIZE - off); 107 len -= chunk; [all...] |
/src/sys/dev/acpi/ |
apei_mapreg.c | 158 uint64_t chunk; local in function:apei_mapreg_read 184 chunk = chunk32; 190 chunk = *((volatile const uint8_t *)map + i); 193 chunk = *((volatile const uint16_t *)map + i); 196 chunk = *((volatile const uint32_t *)map + i); 199 chunk = *((volatile const uint64_t *)map + i); 208 v |= chunk << (i*chunkbits); 229 uint64_t chunk = v >> (i*chunkbits); local in function:apei_mapreg_write 250 rv = AcpiOsWritePort(addr, chunk, 259 *((volatile uint8_t *)map + i) = chunk; [all...] |
/src/sys/arch/ia64/stand/common/ |
misc.c | 106 size_t chunk, resid; local in function:kern_bzero 111 chunk = min(sizeof(buf), resid); 112 archsw.arch_copyin(buf, dest, chunk); 113 resid -= chunk; 114 dest += chunk;
|
/src/sys/external/bsd/compiler_rt/dist/lib/asan/ |
asan_debugging.cc | 52 AsanChunkView chunk = FindHeapChunkByAddress(addr); local in function:__anon4e3fe3010110::AsanGetStack 53 if (!chunk.IsValid()) return 0; 57 if (chunk.AllocTid() == kInvalidTid) return 0; 58 stack = chunk.GetAllocStack(); 59 if (thread_id) *thread_id = chunk.AllocTid(); 61 if (chunk.FreeTid() == kInvalidTid) return 0; 62 stack = chunk.GetFreeStack(); 63 if (thread_id) *thread_id = chunk.FreeTid();
|
asan_descriptions.cc | 106 AsanChunkView chunk, uptr addr, 109 if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) { 111 } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) { 117 } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) { 122 descr->chunk_begin = chunk.Beg(); 123 descr->chunk_size = chunk.UsedSize(); 124 descr->user_requested_alignment = chunk.UserRequestedAlignment(); 125 descr->alloc_type = chunk.GetAllocType(); 159 AsanChunkView chunk = FindHeapChunkByAddress(addr); local in function:__asan::GetHeapAddressInformation 160 if (!chunk.IsValid()) [all...] |
/src/sys/external/bsd/drm2/dist/drm/vboxvideo/ |
vbva_base.c | 43 /* Chunk will not cross buffer boundary. */ 46 /* Chunk crosses buffer boundary. */ 83 u32 chunk = len; local in function:vbva_write 85 if (chunk >= available) { 90 if (chunk >= available) { 95 chunk = available - vbva->partial_write_tresh; 98 vbva_buffer_place_data_at(vbva_ctx, p, chunk, 101 vbva->free_offset = (vbva->free_offset + chunk) % 103 record->len_and_flags += chunk; 104 available -= chunk; [all...] |
/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_image.c | 43 struct qxl_drm_chunk *chunk; local in function:qxl_allocate_chunk 46 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); 47 if (!chunk) 50 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); 52 kfree(chunk); 56 list_add_tail(&chunk->head, &image->chunk_list); 93 struct qxl_drm_chunk *chunk, *tmp; local in function:qxl_image_free_objects 95 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { 96 qxl_bo_unref(&chunk->bo); 97 kfree(chunk); 115 struct qxl_data_chunk *chunk; local in function:qxl_image_init_helper [all...] |
/src/sys/arch/arm/at91/ |
at91spi.c | 268 struct spi_chunk *chunk; local in function:at91spi_xfer 289 if ((chunk = sc->sc_rchunk) != NULL) { 290 if ((len = chunk->chunk_rresid) > HALF_BUF_SIZE) 292 if (chunk->chunk_rptr && len > 0) { 293 memcpy(chunk->chunk_rptr, (const uint8_t *)sc->sc_dmapage + offs, len); 294 chunk->chunk_rptr += len; 296 if ((chunk->chunk_rresid -= len) <= 0) { 297 // done with this chunk, get next 298 sc->sc_rchunk = chunk->chunk_next; 303 /* start transmitting next chunk: * [all...] |
/src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/ |
sanitizer_allocator_local_cache.h | 50 CompactPtrT chunk = c->chunks[--c->count]; local in function:SizeClassAllocator64LocalCache::Allocate 53 allocator->GetRegionBeginBySizeClass(class_id), chunk)); 65 CompactPtrT chunk = allocator->PointerToCompactPtr( local in function:SizeClassAllocator64LocalCache::Deallocate 68 c->chunks[c->count++] = chunk;
|
/src/usr.sbin/acpitools/aml/ |
aml_memman.c | 65 static unsigned int memman_guess_memid(struct memman *memman, void *chunk); 140 void *chunk, *block; local in function:memman_alloc 173 chunk = memnode->node; 178 return (chunk); 279 memman_guess_memid(struct memman *memman, void *chunk) 293 if (memnode->node == chunk) { 302 memman_free(struct memman *memman, unsigned int memid, void *chunk) 313 id = memman_guess_memid(memman, chunk); 328 if (memnode->node == chunk) { 366 memman_free_flexsize(struct memman *memman, void *chunk) 388 void *chunk; local in function:memman_freeall [all...] |
/src/sys/arch/atari/dev/ |
md_root.c | 53 #define RAMD_CHUNK (9 * 512) /* Chunk-size for auto-load */ 92 int chunk; /* chunk size on input medium */ member in struct:read_info 176 rs.chunk = RAMD_CHUNK; 189 rs.chunk = dl.d_secsize * dl.d_secpercyl; 222 bp->b_bcount = uimin(rsp->chunk, bytes_left); 297 bp->b_bcount = uimin(rsp->chunk, nbyte);
|
/src/sbin/nvmectl/ |
firmware.c | 121 void *chunk; local in function:update_firmware 126 if ((chunk = aligned_alloc(PAGE_SIZE, NVME_MAX_XFER_SIZE)) == NULL) 132 memcpy(chunk, payload + off, size); 138 pt.buf = chunk;
|
/src/sys/dev/marvell/ |
mvxpbm.c | 278 struct mvxpbm_chunk *chunk; local in function:mvxpbm_alloc_buffer 280 /* initialize chunk */ 282 chunk = (struct mvxpbm_chunk *)ptr; 283 chunk->m = NULL; 284 chunk->sc = sc; 285 chunk->off = (ptr - sc->sc_buf); 286 chunk->pa = (paddr_t)(sc->sc_buf_pa + chunk->off); 287 chunk->buf_off = (ptr_data - sc->sc_buf); 288 chunk->buf_pa = (paddr_t)(sc->sc_buf_pa + chunk->buf_off) 318 struct mvxpbm_chunk *chunk = (struct mvxpbm_chunk *)arg; local in function:mvxpbm_free_mbuf 369 struct mvxpbm_chunk *chunk; local in function:mvxpbm_alloc [all...] |
mvspi.c | 290 struct spi_chunk *chunk; local in function:mvspi_sched 305 chunk = st->st_chunks; 310 for (i = chunk->chunk_wresid; i > 0; i--) { 316 if (chunk->chunk_wptr){ 317 byte = *chunk->chunk_wptr; 318 chunk->chunk_wptr++; 342 if (chunk->chunk_rptr) { 343 *chunk->chunk_rptr = 345 chunk->chunk_rptr++; 351 chunk = chunk->chunk_next [all...] |
/src/sys/arch/arm/broadcom/ |
bcm2835_spi.c | 246 struct spi_chunk *chunk; local in function:bcmspi_send 248 while ((chunk = sc->sc_wchunk) != NULL) { 249 while (chunk->chunk_wresid) { 253 if (chunk->chunk_wptr) { 254 fd = *chunk->chunk_wptr++; 259 chunk->chunk_wresid--; 270 struct spi_chunk *chunk; local in function:bcmspi_recv 272 while ((chunk = sc->sc_rchunk) != NULL) { 273 while (chunk->chunk_rresid) { 278 if (chunk->chunk_rptr) [all...] |
/src/sys/arch/mips/alchemy/dev/ |
auspi.c | 252 struct spi_chunk *chunk; local in function:auspi_send 255 while ((chunk = sc->sc_wchunk) != NULL) { 257 while (chunk->chunk_wresid) { 264 if (chunk->chunk_wptr) { 265 data = *chunk->chunk_wptr++; 269 chunk->chunk_wresid--; 272 if ((chunk->chunk_wresid == 0) && 273 (chunk->chunk_next == NULL)) { 288 struct spi_chunk *chunk; local in function:auspi_recv 290 while ((chunk = sc->sc_rchunk) != NULL) [all...] |
/src/sys/arch/arm/imx/ |
imxspi.c | 237 struct spi_chunk *chunk; local in function:imxspi_send 240 while ((chunk = sc->sc_wchunk) != NULL) { 241 while (chunk->chunk_wresid) { 246 if (chunk->chunk_wptr) { 247 data = *chunk->chunk_wptr; 248 chunk->chunk_wptr++; 252 chunk->chunk_wresid--; 268 struct spi_chunk *chunk; local in function:imxspi_recv 270 while ((chunk = sc->sc_rchunk) != NULL) { 271 while (chunk->chunk_rresid) [all...] |
/src/lib/librumpuser/ |
rumpuser_dl.c | 82 size_t chunk, newsize; local in function:reservespace 85 chunk = *storesize - storeoff; 87 if (chunk >= required) 90 newsize = *storesize + ((size_t)required - chunk);
|
/src/sys/dev/ieee1394/ |
fwcrom.c | 399 crom_add_quad(struct crom_chunk *chunk, uint32_t entry) 403 index = chunk->data.crc_len; 405 printf("too large chunk %d\n", index); 408 chunk->data.buf[index] = entry; 409 chunk->data.crc_len++; 414 crom_add_entry(struct crom_chunk *chunk, int key, int val) 424 return crom_add_quad(chunk, foo.i); 450 struct crom_chunk *chunk, const char *buf) 463 tl = (struct csrtext *) &chunk->data; 473 return crom_add_chunk(src, parent, chunk, CROM_TEXTLEAF) 492 struct crom_chunk *chunk, *parent; local in function:crom_load [all...] |
/src/sys/external/bsd/compiler_rt/dist/lib/lsan/ |
lsan_allocator.cc | 232 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); local in function:__lsan::PointsIntoChunk 233 if (!chunk) return 0; 234 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 236 if (addr < chunk) return 0; 237 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 241 if (addr < chunk + m->requested_size) 242 return chunk; 243 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 244 return chunk; 248 uptr GetUserBegin(uptr chunk) { 282 void *chunk = allocator.GetBlockBegin(p); local in function:__lsan::IgnoreObjectLocked [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_cs.c | 88 struct radeon_cs_chunk *chunk; local in function:radeon_cs_parser_relocs 97 chunk = p->chunk_relocs; 100 p->nrelocs = chunk->length_dw / 4; 114 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 866 DRM_ERROR("No relocation chunk !\n"); 884 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
/src/sys/uvm/ |
uvm_loan.c | 566 int ndone, error, chunk; local in function:uvm_loanuobjpages 571 for (ndone = 0; ndone < npages; ndone += chunk) { 572 chunk = MIN(UVM_LOAN_GET_CHUNK, npages - ndone); 574 chunk, pgpp + ndone);
|
/src/common/dist/zlib/examples/ |
gun.c | 175 if (chunk > have) { \ 176 chunk -= have; \ 180 chunk--; \ 181 if (chunk > have) { \ 182 chunk = have = 0; \ 186 have -= chunk; \ 187 next += chunk; \ 188 chunk = 0; \ 204 unsigned chunk; /* bytes left in current chunk */ local in function:lunpipe [all...] |
/src/sys/arch/sun3/sun3/ |
machdep.c | 530 int psize, todo, chunk; local in function:dumpsys 611 * The first chunk is "unmanaged" (by the VM code) and its 615 * chunk is done the normal way, using pmap_enter. 620 /* Do the first chunk (0 <= PA < avail_start) */ 622 chunk = btoc(avail_start); 623 if (chunk > todo) 624 chunk = todo; 635 } while (--chunk > 0); 637 /* Do the second chunk (avail_start <= PA < dumpsize) */
|
/src/bin/cp/ |
utils.c | 217 ssize_t chunk; local in function:copy_file 219 chunk = (remainder > MMAP_MAX_WRITE) ? 222 chunk) != chunk) { 227 remainder -= chunk; 228 ptotal += chunk;
|