Home | History | Annotate | Line # | Download | only in linux
mm.h revision 1.23
      1 /*	$NetBSD: mm.h,v 1.23 2021/12/19 12:07:55 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_MM_H_
     33 #define _LINUX_MM_H_
     34 
     35 #include <uvm/uvm_extern.h>
     36 #include <uvm/uvm_object.h>
     37 
     38 #include <asm/page.h>
     39 #include <linux/shrinker.h>
     40 #include <linux/slab.h>
     41 #include <linux/sizes.h>
     42 
     43 struct file;
     44 
     45 /* XXX Ugh bletch!  Whattakludge!  Linux's sense is reversed...  */
     46 #undef	PAGE_MASK
     47 #define	PAGE_MASK	(~(PAGE_SIZE-1))
     48 
     49 #define	PAGE_ALIGN(x)		(((x) + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1))
     50 #define	offset_in_page(x)	((uintptr_t)(x) & (PAGE_SIZE-1))
     51 
     52 #define	untagged_addr(x)	(x)
     53 
     54 struct sysinfo {
     55 	unsigned long totalram;
     56 	unsigned long totalhigh;
     57 	uint32_t mem_unit;
     58 };
     59 
     60 static inline void
     61 si_meminfo(struct sysinfo *si)
     62 {
     63 
     64 	si->totalram = uvmexp.npages;
     65 	si->totalhigh = kernel_map->size >> PAGE_SHIFT;
     66 	si->mem_unit = PAGE_SIZE;
     67 	/* XXX Fill in more as needed.  */
     68 }
     69 
     70 static inline size_t
     71 si_mem_available(void)
     72 {
     73 
     74 	/* XXX ? */
     75 	return uvmexp.free;
     76 }
     77 
     78 static inline unsigned long
     79 vm_mmap(struct file *file __unused, unsigned long base __unused,
     80     unsigned long size __unused, unsigned long prot __unused,
     81     unsigned long flags __unused, unsigned long token __unused)
     82 {
     83 
     84 	return -ENODEV;
     85 }
     86 
     87 static inline unsigned long
     88 totalram_pages(void)
     89 {
     90 
     91 	return uvmexp.npages;
     92 }
     93 
     94 static inline unsigned long
     95 get_num_physpages(void)
     96 {
     97 
     98 	return uvmexp.npages;
     99 }
    100 
    101 static inline void *
    102 kvmalloc(size_t size, gfp_t gfp)
    103 {
    104 
    105 	return kmalloc(size, gfp);
    106 }
    107 
    108 static inline void *
    109 kvzalloc(size_t size, gfp_t gfp)
    110 {
    111 
    112 	return kmalloc(size, gfp | __GFP_ZERO);
    113 }
    114 
    115 static inline void *
    116 kvcalloc(size_t nelem, size_t elemsize, gfp_t gfp)
    117 {
    118 
    119 	KASSERT(elemsize > 0);
    120 	if (SIZE_MAX/elemsize < nelem)
    121 		return NULL;
    122 	return kvzalloc(nelem * elemsize, gfp);
    123 }
    124 
    125 static inline void *
    126 kvmalloc_array(size_t nelem, size_t elemsize, gfp_t gfp)
    127 {
    128 
    129 	KASSERT(elemsize != 0);
    130 	if (nelem > SIZE_MAX/elemsize)
    131 		return NULL;
    132 	return kmalloc(nelem * elemsize, gfp);
    133 }
    134 
    135 /*
    136  * XXX kvfree must additionally work on kmalloc (linux/slab.h) and
    137  * vmalloc (linux/vmalloc.h).  If you change either of those, be sure
    138  * to change this too.
    139  */
    140 
    141 static inline void
    142 kvfree(void *ptr)
    143 {
    144 	kfree(ptr);
    145 }
    146 
    147 static inline void
    148 set_page_dirty(struct page *page)
    149 {
    150 	struct vm_page *pg = &page->p_vmp;
    151 
    152 	/* XXX */
    153 	if (pg->uobject != NULL) {
    154 		rw_enter(pg->uobject->vmobjlock, RW_WRITER);
    155 		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    156 		rw_exit(pg->uobject->vmobjlock);
    157 	} else {
    158 		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    159 	}
    160 }
    161 
    162 #endif  /* _LINUX_MM_H_ */
    163