HomeSort by: relevance | last modified time | path
    Searched refs:segs (Results 1 - 25 of 226) sorted by relevancy

1 2 3 4 5 6 7 8 910

  /src/sys/dev/bus_dma/
bus_dmamem_common.c 56 bus_dma_segment_t *segs,
83 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
84 segs[curseg].ds_len = PAGE_SIZE;
92 segs[curseg].ds_len += PAGE_SIZE;
95 segs[curseg].ds_addr = curaddr;
96 segs[curseg].ds_len = PAGE_SIZE;
113 bus_dma_segment_t *segs,
123 for (addr = segs[curseg].ds_addr;
124 addr < (segs[curseg].ds_addr + segs[curseg].ds_len)
    [all...]
  /src/sys/dev/hyperv/
hyperv_common.c 120 KASSERT(dma->segs == NULL);
123 dma->segs = kmem_zalloc(sizeof(*dma->segs) * nsegs, KM_SLEEP);
126 error = bus_dmamem_alloc(dmat, size, alignment, boundary, dma->segs,
133 error = bus_dmamem_map(dmat, dma->segs, rseg, size, &dma->addr,
164 fail2: bus_dmamem_free(dmat, dma->segs, rseg);
165 fail1: kmem_free(dma->segs, sizeof(*dma->segs) * nsegs);
166 dma->segs = NULL;
181 bus_dmamem_free(dmat, dma->segs, rsegs)
    [all...]
  /src/sys/external/bsd/drm2/include/drm/
bus_dma_hacks.h 143 bus_dma_segment_t *segs; local in function:bus_dmamap_load_pages
155 KASSERT(nsegs <= (SIZE_MAX / sizeof(segs[0])));
167 segs = kmem_alloc((nsegs * sizeof(segs[0])), kmflags);
168 if (segs == NULL)
171 segs = stacksegs;
179 segs[seg].ds_addr = baddr;
180 segs[seg].ds_len = PAGE_SIZE;
183 error = bus_dmamap_load_raw(tag, map, segs, nsegs, size, flags);
194 out: if (segs != stacksegs)
    [all...]
  /src/libexec/ld.elf_so/
map_object.c 74 Elf_Phdr **segs = NULL; local in function:_rtld_map_object
190 segs = xmalloc(sizeof(segs[0]) * ehdr->e_phnum);
191 if (segs == NULL) {
192 _rtld_error("No memory for segs");
207 segs[++nsegs] = phdr;
208 if ((segs[nsegs]->p_align & (_rtld_pagesz - 1)) != 0) {
214 if ((segs[nsegs]->p_flags & PF_X) == PF_X) {
216 round_up(segs[nsegs]->p_vaddr +
217 segs[nsegs]->p_memsz))
    [all...]
  /src/sys/arch/evbsh3/evbsh3/
bus_dma.c 142 bus_dma_segment_t * const segs = map->dm_segs; local in function:_bus_dmamap_load_paddr
180 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
181 segs[nseg].ds_len = sgsize;
182 segs[nseg]._ds_vaddr = vaddr;
185 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
187 (segs[nseg].ds_addr & bmask) == (paddr & bmask)))
191 segs[nseg].ds_len += sgsize;
200 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
201 segs[nseg].ds_len = sgsize;
202 segs[nseg]._ds_vaddr = vaddr
    [all...]
  /src/sys/arch/landisk/landisk/
bus_dma.c 142 bus_dma_segment_t * const segs = map->dm_segs; local in function:_bus_dmamap_load_paddr
180 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
181 segs[nseg].ds_len = sgsize;
182 segs[nseg]._ds_vaddr = vaddr;
185 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
187 (segs[nseg].ds_addr & bmask) == (paddr & bmask)))
191 segs[nseg].ds_len += sgsize;
200 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
201 segs[nseg].ds_len = sgsize;
202 segs[nseg]._ds_vaddr = vaddr
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_region.c 68 bus_dma_segment_t *segs = NULL; local in function:i915_gem_object_get_pages_buddy
76 nsegs >= SIZE_MAX/sizeof(segs[0]))
80 segs = kmem_zalloc(nsegs * sizeof(segs[0]), KM_SLEEP);
88 segs[i].ds_addr = mem->region.start + offset;
89 segs[i].ds_len = block_size;
94 ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
110 ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, segs, nsegs, size,
116 kmem_free(segs, nsegs * sizeof(segs[0]))
    [all...]
i915_gem_internal.c 54 KASSERT(obj->mm.u.internal.segs == NULL);
57 nsegs > SIZE_MAX/sizeof(obj->mm.u.internal.segs[0])) {
61 obj->mm.u.internal.segs = kmem_alloc(
62 nsegs * sizeof(obj->mm.u.internal.segs[0]),
64 if (obj->mm.u.internal.segs == NULL) {
72 obj->mm.u.internal.segs, nsegs, &obj->mm.u.internal.rsegs,
82 if (sg_alloc_table_from_bus_dmamem(sgt, dmat, obj->mm.u.internal.segs,
109 bus_dmamem_free(dmat, obj->mm.u.internal.segs,
114 kmem_free(obj->mm.u.internal.segs,
116 sizeof(obj->mm.u.internal.segs[0])))
    [all...]
  /src/sys/arch/arm/at91/
at91pdc.c 10 bus_dma_segment_t segs; local in function:at91pdc_alloc_fifo
27 err = bus_dmamem_alloc(dmat, size, 0, size, &segs, 1, &rsegs,
34 err = bus_dmamem_map(dmat, &segs, 1, size, &fifo->f_buf,
59 bus_dmamem_free(dmat, &segs, rsegs);
  /src/sys/arch/mvme68k/mvme68k/
bus_dma.c 364 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
389 map->dm_segs[i] = segs[i];
543 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
576 segs[curseg].ds_addr = segs[curseg]._ds_cpuaddr = lastaddr;
577 segs[curseg].ds_len = PAGE_SIZE;
578 segs[curseg]._ds_flags = 0;
602 segs[curseg].ds_len += PAGE_SIZE;
605 segs[curseg].ds_addr =
606 segs[curseg]._ds_cpuaddr = curaddr
    [all...]
  /src/sys/arch/virt68k/virt68k/
bus_dma.c 363 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
388 map->dm_segs[i] = segs[i];
542 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
575 segs[curseg].ds_addr = segs[curseg]._ds_cpuaddr = lastaddr;
576 segs[curseg].ds_len = PAGE_SIZE;
577 segs[curseg]._ds_flags = 0;
601 segs[curseg].ds_len += PAGE_SIZE;
604 segs[curseg].ds_addr =
605 segs[curseg]._ds_cpuaddr = curaddr
    [all...]
  /src/sys/arch/arc/arc/
bus_dma.c 376 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
558 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
563 segs, nsegs, rsegs, flags, pmap_limits.avail_start,
573 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
600 lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
601 segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
602 segs[curseg].ds_len = PAGE_SIZE;
615 segs[curseg].ds_len += PAGE_SIZE;
618 segs[curseg].ds_addr = curaddr + t->dma_offset
    [all...]
  /src/sys/arch/powerpc/powerpc/
bus_dma.c 409 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
423 busaddr = segs[iseg].ds_addr;
424 isgsize = segs[iseg].ds_len;
487 busaddr = segs[iseg].ds_addr;
488 isgsize = segs[iseg].ds_len;
640 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
654 return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
663 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
675 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr);
676 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr
    [all...]
  /src/sys/arch/x86/x86/
db_memrw.c 219 if (bootspace.segs[i].type != BTSEG_TEXT &&
220 bootspace.segs[i].type != BTSEG_RODATA) {
223 if (addr >= bootspace.segs[i].va &&
224 addr < (bootspace.segs[i].va + bootspace.segs[i].sz)) {
  /src/sys/dev/ieee1394/
fwdma.c 66 bus_dma_segment_t segs; local in function:fwdma_malloc
71 err = bus_dmamem_alloc(dmat, size, alignment, 0, &segs, 1,
78 err = bus_dmamem_map(dmat, &segs, nsegs, size, &v_addr, flags);
81 bus_dmamem_free(dmat, &segs, nsegs);
92 bus_dmamem_free(dmat, &segs, nsegs);
102 bus_dmamem_free(dmat, &segs, nsegs);
112 bus_dma_segment_t *segs; local in function:fwdma_free
114 /* XXX we shouldn't pass around the segs in the dmamap */
118 segs = kmem_alloc(segssz, KM_SLEEP);
119 memcpy(segs, dmamap->dm_segs, segssz)
    [all...]
  /src/sys/arch/vax/vax/
bus_dma.c 326 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
379 bus_size_t boundary, bus_dma_segment_t *segs,
385 segs, nsegs, rsegs, flags, round_page(avail_start),
395 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
403 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
411 for (addr = segs[curseg].ds_addr;
412 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
426 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs
    [all...]
  /src/sys/rump/dev/lib/libpci/
rumpdev_bus_dma.c 409 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
446 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
449 vaddr_t vacookie = segs[0]._ds_vacookie;
450 bus_size_t sizecookie = segs[0]._ds_sizecookie;
463 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
477 dss[i].ds_pa = segs[i].ds_addr;
478 dss[i].ds_len = segs[i].ds_len;
479 dss[i].ds_vacookie = segs[i]._ds_vacookie;
499 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
512 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs
    [all...]
  /src/sys/arch/m68k/m68k/
bus_dma.c 373 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
399 map->dm_segs[i] = segs[i];
643 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
671 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
672 segs[curseg].ds_len = PAGE_SIZE;
685 segs[curseg].ds_len += PAGE_SIZE;
688 segs[curseg].ds_addr = curaddr;
689 segs[curseg].ds_len = PAGE_SIZE;
704 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
716 for (addr = segs[curseg].ds_addr
    [all...]
  /src/sys/arch/mips/mips/
bus_dma.c 664 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
677 for (; error == 0 && nsegs-- > 0; segs++) {
682 segs->ds_addr);
685 segs->ds_addr);
688 if (segs->ds_addr >= MIPS_PHYS_MASK)
691 kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
693 kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
696 mapsize += segs->ds_len;
697 error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
700 lastvaddr = (vaddr_t)kva + segs->ds_len
    [all...]
  /src/sys/arch/arc/jazz/
bus_dma_jazz.c 115 jazz_bus_dmamap_alloc_sgmap(bus_dma_tag_t t, bus_dma_segment_t *segs,
124 off = jazz_dma_page_offs(segs[i]._ds_paddr);
125 npte = jazz_dma_page_round(segs[i].ds_len + off) /
130 segs[i].ds_addr = addr + off;
132 jazz_dmatlb_map_pa(segs[i]._ds_paddr, segs[i].ds_len, dmapte);
138 jazz_bus_dmamap_free_sgmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
144 addr = (segs[i].ds_addr - t->dma_offset) & JAZZ_DMA_PAGE_NUM;
145 npte = jazz_dma_page_round(segs[i].ds_len +
146 jazz_dma_page_offs(segs[i].ds_addr)) / JAZZ_DMA_PAGE_SIZE
    [all...]
  /src/sys/arch/hpcmips/include/
bus_dma_hpcmips.h 66 int _dm_segcnt; /* number of segs this map can map */
78 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
  /src/sys/arch/x86/include/
bootspace.h 87 } segs[BTSPACE_NSEGS]; member in struct:bootspace
  /src/sys/external/bsd/drm/dist/bsd-core/
drm_pci.c 39 drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
47 dmah->busaddr = segs[0].ds_addr;
117 dmah->segs, 1, &nsegs, BUS_DMA_WAITOK)) != 0) {
127 bus_dmamem_free(dmah->tag, dmah->segs, 1);
132 if ((ret = bus_dmamem_map(dmah->tag, dmah->segs, nsegs, size,
135 bus_dmamem_free(dmah->tag, dmah->segs, 1);
144 bus_dmamem_free(dmah->tag, dmah->segs, 1);
154 bus_dmamem_free(dmah->tag, dmah->segs, 1);
184 bus_dmamem_free(dmah->tag, dmah->segs, 1);
  /src/sys/arch/xen/x86/
xen_bus_dma.c 214 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
259 curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
262 segs[curseg].ds_len = PAGE_SIZE;
264 if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
273 segs[curseg].ds_len += PAGE_SIZE;
283 segs[curseg].ds_addr = curaddr;
284 segs[curseg].ds_len = PAGE_SIZE;
320 segs[curseg].ds_addr = 0;
321 segs[curseg].ds_len = 0;
  /src/sys/arch/amd64/stand/prekern/
mm.c 141 if (bootspace.segs[i].type == BTSEG_TEXT) {
143 } else if (bootspace.segs[i].type == BTSEG_RODATA) {
148 mm_mprotect(bootspace.segs[i].va, bootspace.segs[i].sz, prot);
221 if (bootspace.segs[i].type == BTSEG_NONE) {
224 sva = bootspace.segs[i].va;
225 eva = sva + bootspace.segs[i].sz;
257 if (bootspace.segs[i].type == BTSEG_NONE) {
260 pa = bootspace.segs[i].pa + bootspace.segs[i].sz
    [all...]

Completed in 27 milliseconds

1 2 3 4 5 6 7 8 910