Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: linux_io_mapping.c,v 1.1 2021/12/19 12:28:04 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013-2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: linux_io_mapping.c,v 1.1 2021/12/19 12:28:04 riastradh Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/bus.h>
     34 #include <sys/kmem.h>
     35 #include <sys/systm.h>
     36 #include <sys/mman.h>
     37 
     38 #include <uvm/uvm_extern.h>
     39 
     40 #include <linux/io-mapping.h>
     41 
     42 bool
     43 bus_space_io_mapping_init_wc(bus_space_tag_t bst, struct io_mapping *mapping,
     44     bus_addr_t addr, bus_size_t size)
     45 {
     46 	bus_size_t offset;
     47 
     48 	KASSERT(PAGE_SIZE <= size);
     49 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
     50 	KASSERT(__type_fit(off_t, size));
     51 
     52 	/*
     53 	 * XXX For x86: Reserve the region (bus_space_reserve) and set
     54 	 * an MTRR to make it write-combining.  Doesn't matter if we
     55 	 * have PAT and we use pmap_kenter_pa, but matters if we don't
     56 	 * have PAT or if we later make this use direct map.
     57 	 */
     58 
     59 	/* Make sure the region is mappable.  */
     60 	for (offset = 0; offset < size; offset += PAGE_SIZE) {
     61 		if (bus_space_mmap(bst, addr, offset, PROT_READ|PROT_WRITE,
     62 			BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE)
     63 		    == (paddr_t)-1)
     64 			return false;
     65 	}
     66 
     67 	/* Initialize the mapping record.  */
     68 	mapping->diom_bst = bst;
     69 	mapping->base = addr;
     70 	mapping->size = size;
     71 	mapping->diom_atomic = false;
     72 
     73 	/* Allocate kva for one page.  */
     74 	mapping->diom_va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
     75 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
     76 	KASSERT(mapping->diom_va != 0);
     77 
     78 	return true;
     79 }
     80 
     81 void
     82 io_mapping_fini(struct io_mapping *mapping)
     83 {
     84 
     85 	KASSERT(!mapping->diom_atomic);
     86 
     87 	uvm_km_free(kernel_map, mapping->diom_va, PAGE_SIZE, UVM_KMF_VAONLY);
     88 	mapping->diom_va = 0;	/* paranoia */
     89 }
     90 
     91 struct io_mapping *
     92 bus_space_io_mapping_create_wc(bus_space_tag_t bst, bus_addr_t addr,
     93     bus_size_t size)
     94 {
     95 	struct io_mapping *mapping;
     96 
     97 	mapping = kmem_alloc(sizeof(*mapping), KM_SLEEP);
     98 	if (!bus_space_io_mapping_init_wc(bst, mapping, addr, size)) {
     99 		kmem_free(mapping, sizeof(*mapping));
    100 		return NULL;
    101 	}
    102 
    103 	return mapping;
    104 }
    105 
    106 void
    107 io_mapping_free(struct io_mapping *mapping)
    108 {
    109 
    110 	io_mapping_fini(mapping);
    111 	kmem_free(mapping, sizeof(*mapping));
    112 }
    113 
    114 void *
    115 io_mapping_map_wc(struct io_mapping *mapping, bus_addr_t offset,
    116     bus_size_t size)
    117 {
    118 	bus_size_t pg, npgs = size >> PAGE_SHIFT;
    119 	vaddr_t va;
    120 	paddr_t cookie;
    121 
    122 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    123 	KASSERT(PAGE_SIZE <= mapping->size);
    124 	KASSERT(offset <= (mapping->size - PAGE_SIZE));
    125 	KASSERT(__type_fit(off_t, offset));
    126 
    127 	va = uvm_km_alloc(kernel_map, size, PAGE_SIZE,
    128 	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
    129 	KASSERT(va != mapping->diom_va);
    130 	for (pg = 0; pg < npgs; pg++) {
    131 		cookie = bus_space_mmap(mapping->diom_bst, mapping->base,
    132 		    offset + pg*PAGE_SIZE,
    133 		    PROT_READ|PROT_WRITE,
    134 		    BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE);
    135 		KASSERT(cookie != (paddr_t)-1);
    136 
    137 		pmap_kenter_pa(va + pg*PAGE_SIZE, pmap_phys_address(cookie),
    138 		    PROT_READ|PROT_WRITE, pmap_mmap_flags(cookie));
    139 	}
    140 	pmap_update(pmap_kernel());
    141 
    142 	return (void *)va;
    143 }
    144 
    145 void
    146 io_mapping_unmap(struct io_mapping *mapping, void *ptr, bus_size_t size)
    147 {
    148 	vaddr_t va = (vaddr_t)ptr;
    149 
    150 	KASSERT(mapping->diom_va != va);
    151 
    152 	pmap_kremove(va, size);
    153 	pmap_update(pmap_kernel());
    154 
    155 	uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
    156 }
    157 
    158 void *
    159 io_mapping_map_atomic_wc(struct io_mapping *mapping, bus_addr_t offset)
    160 {
    161 	paddr_t cookie;
    162 
    163 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
    164 	KASSERT(PAGE_SIZE <= mapping->size);
    165 	KASSERT(offset <= (mapping->size - PAGE_SIZE));
    166 	KASSERT(__type_fit(off_t, offset));
    167 	KASSERT(!mapping->diom_atomic);
    168 
    169 	cookie = bus_space_mmap(mapping->diom_bst, mapping->base, offset,
    170 	    PROT_READ|PROT_WRITE,
    171 	    BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE);
    172 	KASSERT(cookie != (paddr_t)-1);
    173 
    174 	pmap_kenter_pa(mapping->diom_va, pmap_phys_address(cookie),
    175 	    PROT_READ|PROT_WRITE, pmap_mmap_flags(cookie));
    176 	pmap_update(pmap_kernel());
    177 
    178 	mapping->diom_atomic = true;
    179 	return (void *)mapping->diom_va;
    180 }
    181 
    182 void
    183 io_mapping_unmap_atomic(struct io_mapping *mapping, void *ptr __diagused)
    184 {
    185 
    186 	KASSERT(mapping->diom_atomic);
    187 	KASSERT(mapping->diom_va == (vaddr_t)ptr);
    188 
    189 	pmap_kremove(mapping->diom_va, PAGE_SIZE);
    190 	pmap_update(pmap_kernel());
    191 
    192 	mapping->diom_atomic = false;
    193 }
    194