Home | History | Annotate | Line # | Download | only in linux
      1  1.12    rillig /*	$NetBSD: vmalloc.h,v 1.12 2022/02/26 15:57:22 rillig Exp $	*/
      2   1.2  riastrad 
      3   1.2  riastrad /*-
      4   1.5  riastrad  * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
      5   1.2  riastrad  * All rights reserved.
      6   1.2  riastrad  *
      7   1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  riastrad  * by Taylor R. Campbell.
      9   1.2  riastrad  *
     10   1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.2  riastrad  * modification, are permitted provided that the following conditions
     12   1.2  riastrad  * are met:
     13   1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.2  riastrad  *
     19   1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  riastrad  */
     31   1.2  riastrad 
     32   1.2  riastrad #ifndef _LINUX_VMALLOC_H_
     33   1.2  riastrad #define _LINUX_VMALLOC_H_
     34   1.2  riastrad 
     35   1.3  riastrad #include <uvm/uvm_extern.h>
     36   1.3  riastrad 
     37  1.10  riastrad #include <linux/mm.h>
     38   1.3  riastrad #include <linux/mm_types.h>
     39   1.8  riastrad #include <linux/overflow.h>
     40  1.11  riastrad #include <linux/slab.h>
     41   1.3  riastrad 
     42   1.5  riastrad #include <asm/page.h>
     43   1.5  riastrad 
     44   1.9  riastrad struct notifier_block;
     45   1.9  riastrad 
     46   1.6  riastrad /*
     47  1.11  riastrad  * XXX vmalloc and kvmalloc both use kmalloc.  If you change that, be
     48  1.11  riastrad  * sure to update this so kvfree in <linux/mm.h> still works on vmalloc
     49  1.11  riastrad  * addresses.
     50   1.6  riastrad  */
     51   1.6  riastrad 
     52   1.4  riastrad static inline bool
     53   1.4  riastrad is_vmalloc_addr(void *addr)
     54   1.4  riastrad {
     55   1.4  riastrad 	return true;
     56   1.4  riastrad }
     57   1.4  riastrad 
     58   1.4  riastrad static inline void *
     59   1.4  riastrad vmalloc(unsigned long size)
     60   1.4  riastrad {
     61  1.11  riastrad 	return kmalloc(size, GFP_KERNEL);
     62   1.4  riastrad }
     63   1.4  riastrad 
     64   1.2  riastrad static inline void *
     65   1.2  riastrad vmalloc_user(unsigned long size)
     66   1.2  riastrad {
     67  1.11  riastrad 	return kzalloc(size, GFP_KERNEL);
     68   1.2  riastrad }
     69   1.2  riastrad 
     70   1.2  riastrad static inline void *
     71   1.2  riastrad vzalloc(unsigned long size)
     72   1.2  riastrad {
     73  1.11  riastrad 	return kzalloc(size, GFP_KERNEL);
     74   1.2  riastrad }
     75   1.2  riastrad 
     76   1.2  riastrad static inline void
     77   1.2  riastrad vfree(void *ptr)
     78   1.2  riastrad {
     79  1.12    rillig 	kfree(ptr);
     80   1.2  riastrad }
     81   1.2  riastrad 
     82   1.5  riastrad #define	PAGE_KERNEL	UVM_PROT_RW
     83   1.3  riastrad 
     84   1.5  riastrad /*
     85   1.5  riastrad  * vmap(pages, npages, flags, prot)
     86   1.5  riastrad  *
     87   1.5  riastrad  *	Map pages[0], pages[1], ..., pages[npages-1] into contiguous
     88   1.5  riastrad  *	kernel virtual address space with the specified protection, and
     89   1.5  riastrad  *	return a KVA pointer to the start.
     90   1.5  riastrad  *
     91   1.5  riastrad  *	prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
     92   1.5  riastrad  *	PMAP_* cache flags accepted by pmap_enter().
     93   1.5  riastrad  */
     94   1.3  riastrad static inline void *
     95   1.5  riastrad vmap(struct page **pages, unsigned npages, unsigned long flags,
     96   1.5  riastrad     pgprot_t protflags)
     97   1.3  riastrad {
     98   1.5  riastrad 	vm_prot_t justprot = protflags & UVM_PROT_ALL;
     99   1.3  riastrad 	vaddr_t va;
    100   1.5  riastrad 	unsigned i;
    101   1.3  riastrad 
    102   1.5  riastrad 	/* Allocate some KVA, or return NULL if we can't.  */
    103   1.5  riastrad 	va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
    104   1.5  riastrad 	    UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
    105   1.3  riastrad 	if (va == 0)
    106   1.3  riastrad 		return NULL;
    107   1.3  riastrad 
    108   1.5  riastrad 	/* Ask pmap to map the KVA to the specified page addresses.  */
    109   1.5  riastrad 	for (i = 0; i < npages; i++) {
    110   1.5  riastrad 		pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
    111   1.5  riastrad 		    justprot, protflags);
    112   1.5  riastrad 	}
    113   1.5  riastrad 
    114   1.5  riastrad 	/* Commit the pmap updates.  */
    115   1.5  riastrad 	pmap_update(pmap_kernel());
    116   1.5  riastrad 
    117   1.3  riastrad 	return (void *)va;
    118   1.3  riastrad }
    119   1.3  riastrad 
    120   1.5  riastrad /*
    121   1.5  riastrad  * vunmap(ptr, npages)
    122   1.5  riastrad  *
    123   1.5  riastrad  *	Unmap the KVA pages starting at ptr that were mapped by a call
    124   1.5  riastrad  *	to vmap with the same npages parameter.
    125   1.5  riastrad  */
    126   1.3  riastrad static inline void
    127   1.3  riastrad vunmap(void *ptr, unsigned npages)
    128   1.3  riastrad {
    129   1.5  riastrad 	vaddr_t va = (vaddr_t)ptr;
    130   1.5  riastrad 
    131   1.5  riastrad 	/* Ask pmap to unmap the KVA.  */
    132   1.5  riastrad 	pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
    133   1.5  riastrad 
    134   1.5  riastrad 	/* Commit the pmap updates.  */
    135   1.5  riastrad 	pmap_update(pmap_kernel());
    136   1.3  riastrad 
    137   1.5  riastrad 	/*
    138   1.5  riastrad 	 * Now that the pmap is no longer mapping the KVA we allocated
    139   1.5  riastrad 	 * on any CPU, it is safe to free the KVA.
    140   1.5  riastrad 	 */
    141   1.5  riastrad 	uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
    142   1.5  riastrad 	    UVM_KMF_VAONLY);
    143   1.3  riastrad }
    144   1.3  riastrad 
    145   1.9  riastrad static inline int
    146   1.9  riastrad register_vmap_purge_notifier(struct notifier_block *nb __unused)
    147   1.9  riastrad {
    148   1.9  riastrad 	return 0;
    149   1.9  riastrad }
    150   1.9  riastrad 
    151   1.9  riastrad static inline int
    152   1.9  riastrad unregister_vmap_purge_notifier(struct notifier_block *nb __unused)
    153   1.9  riastrad {
    154   1.9  riastrad 	return 0;
    155   1.9  riastrad }
    156   1.9  riastrad 
    157   1.2  riastrad #endif  /* _LINUX_VMALLOC_H_ */
    158