Home | History | Annotate | Line # | Download | only in linux
vmalloc.h revision 1.8
      1  1.8  riastrad /*	$NetBSD: vmalloc.h,v 1.8 2021/12/19 00:59:01 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.2  riastrad /*-
      4  1.5  riastrad  * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
      5  1.2  riastrad  * All rights reserved.
      6  1.2  riastrad  *
      7  1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2  riastrad  * by Taylor R. Campbell.
      9  1.2  riastrad  *
     10  1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11  1.2  riastrad  * modification, are permitted provided that the following conditions
     12  1.2  riastrad  * are met:
     13  1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14  1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15  1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17  1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18  1.2  riastrad  *
     19  1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.2  riastrad  */
     31  1.2  riastrad 
     32  1.2  riastrad #ifndef _LINUX_VMALLOC_H_
     33  1.2  riastrad #define _LINUX_VMALLOC_H_
     34  1.2  riastrad 
     35  1.2  riastrad #include <sys/malloc.h>
     36  1.2  riastrad 
     37  1.3  riastrad #include <uvm/uvm_extern.h>
     38  1.3  riastrad 
     39  1.3  riastrad #include <linux/mm_types.h>
     40  1.8  riastrad #include <linux/overflow.h>
     41  1.3  riastrad 
     42  1.5  riastrad #include <asm/page.h>
     43  1.5  riastrad 
     44  1.6  riastrad /*
     45  1.6  riastrad  * XXX vmalloc and kmalloc both use malloc(9).  If you change this, be
     46  1.6  riastrad  * sure to update kmalloc in <linux/slab.h> and kvfree in <linux/mm.h>.
     47  1.6  riastrad  */
     48  1.6  riastrad 
     49  1.4  riastrad static inline bool
     50  1.4  riastrad is_vmalloc_addr(void *addr)
     51  1.4  riastrad {
     52  1.4  riastrad 	return true;
     53  1.4  riastrad }
     54  1.4  riastrad 
     55  1.4  riastrad static inline void *
     56  1.4  riastrad vmalloc(unsigned long size)
     57  1.4  riastrad {
     58  1.4  riastrad 	return malloc(size, M_TEMP, M_WAITOK);
     59  1.4  riastrad }
     60  1.4  riastrad 
     61  1.2  riastrad static inline void *
     62  1.2  riastrad vmalloc_user(unsigned long size)
     63  1.2  riastrad {
     64  1.2  riastrad 	return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
     65  1.2  riastrad }
     66  1.2  riastrad 
     67  1.2  riastrad static inline void *
     68  1.2  riastrad vzalloc(unsigned long size)
     69  1.2  riastrad {
     70  1.2  riastrad 	return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
     71  1.2  riastrad }
     72  1.2  riastrad 
     73  1.2  riastrad static inline void
     74  1.2  riastrad vfree(void *ptr)
     75  1.2  riastrad {
     76  1.7  riastrad 	if (ptr == NULL)
     77  1.7  riastrad 		return;
     78  1.2  riastrad 	return free(ptr, M_TEMP);
     79  1.2  riastrad }
     80  1.2  riastrad 
     81  1.5  riastrad #define	PAGE_KERNEL	UVM_PROT_RW
     82  1.3  riastrad 
     83  1.5  riastrad /*
     84  1.5  riastrad  * vmap(pages, npages, flags, prot)
     85  1.5  riastrad  *
     86  1.5  riastrad  *	Map pages[0], pages[1], ..., pages[npages-1] into contiguous
     87  1.5  riastrad  *	kernel virtual address space with the specified protection, and
     88  1.5  riastrad  *	return a KVA pointer to the start.
     89  1.5  riastrad  *
     90  1.5  riastrad  *	prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
     91  1.5  riastrad  *	PMAP_* cache flags accepted by pmap_enter().
     92  1.5  riastrad  */
     93  1.3  riastrad static inline void *
     94  1.5  riastrad vmap(struct page **pages, unsigned npages, unsigned long flags,
     95  1.5  riastrad     pgprot_t protflags)
     96  1.3  riastrad {
     97  1.5  riastrad 	vm_prot_t justprot = protflags & UVM_PROT_ALL;
     98  1.3  riastrad 	vaddr_t va;
     99  1.5  riastrad 	unsigned i;
    100  1.3  riastrad 
    101  1.5  riastrad 	/* Allocate some KVA, or return NULL if we can't.  */
    102  1.5  riastrad 	va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
    103  1.5  riastrad 	    UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
    104  1.3  riastrad 	if (va == 0)
    105  1.3  riastrad 		return NULL;
    106  1.3  riastrad 
    107  1.5  riastrad 	/* Ask pmap to map the KVA to the specified page addresses.  */
    108  1.5  riastrad 	for (i = 0; i < npages; i++) {
    109  1.5  riastrad 		pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
    110  1.5  riastrad 		    justprot, protflags);
    111  1.5  riastrad 	}
    112  1.5  riastrad 
    113  1.5  riastrad 	/* Commit the pmap updates.  */
    114  1.5  riastrad 	pmap_update(pmap_kernel());
    115  1.5  riastrad 
    116  1.3  riastrad 	return (void *)va;
    117  1.3  riastrad }
    118  1.3  riastrad 
    119  1.5  riastrad /*
    120  1.5  riastrad  * vunmap(ptr, npages)
    121  1.5  riastrad  *
    122  1.5  riastrad  *	Unmap the KVA pages starting at ptr that were mapped by a call
    123  1.5  riastrad  *	to vmap with the same npages parameter.
    124  1.5  riastrad  */
    125  1.3  riastrad static inline void
    126  1.3  riastrad vunmap(void *ptr, unsigned npages)
    127  1.3  riastrad {
    128  1.5  riastrad 	vaddr_t va = (vaddr_t)ptr;
    129  1.5  riastrad 
    130  1.5  riastrad 	/* Ask pmap to unmap the KVA.  */
    131  1.5  riastrad 	pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
    132  1.5  riastrad 
    133  1.5  riastrad 	/* Commit the pmap updates.  */
    134  1.5  riastrad 	pmap_update(pmap_kernel());
    135  1.3  riastrad 
    136  1.5  riastrad 	/*
    137  1.5  riastrad 	 * Now that the pmap is no longer mapping the KVA we allocated
    138  1.5  riastrad 	 * on any CPU, it is safe to free the KVA.
    139  1.5  riastrad 	 */
    140  1.5  riastrad 	uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
    141  1.5  riastrad 	    UVM_KMF_VAONLY);
    142  1.3  riastrad }
    143  1.3  riastrad 
    144  1.2  riastrad #endif  /* _LINUX_VMALLOC_H_ */
    145