Home | History | Annotate | Line # | Download | only in pmap
pmap_pvt.c revision 1.9
      1  1.9        ad /*	$NetBSD: pmap_pvt.c,v 1.9 2020/03/16 19:56:39 ad Exp $	*/
      2  1.1     skrll 
      3  1.1     skrll /*-
      4  1.9        ad  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
      5  1.1     skrll  * All rights reserved.
      6  1.1     skrll  *
      7  1.1     skrll  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1     skrll  * by Taylor R. Campbell.
      9  1.1     skrll  *
     10  1.1     skrll  * Redistribution and use in source and binary forms, with or without
     11  1.1     skrll  * modification, are permitted provided that the following conditions
     12  1.1     skrll  * are met:
     13  1.1     skrll  * 1. Redistributions of source code must retain the above copyright
     14  1.1     skrll  *    notice, this list of conditions and the following disclaimer.
     15  1.1     skrll  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1     skrll  *    notice, this list of conditions and the following disclaimer in the
     17  1.1     skrll  *    documentation and/or other materials provided with the distribution.
     18  1.1     skrll  *
     19  1.1     skrll  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1     skrll  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1     skrll  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1     skrll  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1     skrll  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1     skrll  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1     skrll  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1     skrll  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1     skrll  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1     skrll  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1     skrll  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1     skrll  */
     31  1.1     skrll 
     32  1.1     skrll #include <sys/cdefs.h>
     33  1.9        ad __RCSID("$NetBSD: pmap_pvt.c,v 1.9 2020/03/16 19:56:39 ad Exp $");
     34  1.1     skrll 
     35  1.6     skrll #include <sys/atomic.h>
     36  1.1     skrll #include <sys/kmem.h>
     37  1.1     skrll #include <sys/pserialize.h>
     38  1.1     skrll 
     39  1.1     skrll #include <uvm/uvm.h>
     40  1.1     skrll #include <uvm/pmap/pmap_pvt.h>
     41  1.1     skrll 
     42  1.1     skrll /*
     43  1.1     skrll  * unmanaged pv-tracked ranges
     44  1.1     skrll  *
     45  1.1     skrll  * This is a linear list for now because the only user are the DRM
     46  1.1     skrll  * graphics drivers, with a single tracked range per device, for the
     47  1.1     skrll  * graphics aperture, so there are expected to be few of them.
     48  1.1     skrll  *
     49  1.1     skrll  * This is used only after the VM system is initialized well enough
     50  1.1     skrll  * that we can use kmem_alloc.
     51  1.1     skrll  */
     52  1.1     skrll 
     53  1.1     skrll struct pv_track {
     54  1.1     skrll 	paddr_t			pvt_start;
     55  1.1     skrll 	psize_t			pvt_size;
     56  1.1     skrll 	struct pv_track		*pvt_next;
     57  1.1     skrll 	struct pmap_page	pvt_pages[];
     58  1.1     skrll };
     59  1.1     skrll 
     60  1.1     skrll static struct {
     61  1.1     skrll 	kmutex_t	lock;
     62  1.1     skrll 	pserialize_t	psz;
     63  1.1     skrll 	struct pv_track	*list;
     64  1.1     skrll } pv_unmanaged __cacheline_aligned;
     65  1.1     skrll 
     66  1.1     skrll void
     67  1.1     skrll pmap_pv_init(void)
     68  1.1     skrll {
     69  1.1     skrll 
     70  1.3  riastrad 	mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
     71  1.1     skrll 	pv_unmanaged.psz = pserialize_create();
     72  1.1     skrll 	pv_unmanaged.list = NULL;
     73  1.1     skrll }
     74  1.1     skrll 
     75  1.1     skrll void
     76  1.1     skrll pmap_pv_track(paddr_t start, psize_t size)
     77  1.1     skrll {
     78  1.1     skrll 	struct pv_track *pvt;
     79  1.1     skrll 	size_t npages;
     80  1.9        ad #ifdef PMAP_PAGE_INIT
     81  1.9        ad 	size_t i;
     82  1.9        ad #endif
     83  1.1     skrll 
     84  1.1     skrll 	KASSERT(start == trunc_page(start));
     85  1.1     skrll 	KASSERT(size == trunc_page(size));
     86  1.1     skrll 
     87  1.3  riastrad 	/* We may sleep for allocation.  */
     88  1.3  riastrad 	ASSERT_SLEEPABLE();
     89  1.3  riastrad 
     90  1.1     skrll 	npages = size >> PAGE_SHIFT;
     91  1.1     skrll 	pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
     92  1.1     skrll 	    KM_SLEEP);
     93  1.1     skrll 	pvt->pvt_start = start;
     94  1.1     skrll 	pvt->pvt_size = size;
     95  1.1     skrll 
     96  1.9        ad #ifdef PMAP_PAGE_INIT
     97  1.9        ad 	for (i = 0; i < npages; i++)
     98  1.9        ad 		PMAP_PAGE_INIT(&pvt->pvt_pages[i]);
     99  1.9        ad #endif
    100  1.9        ad 
    101  1.1     skrll 	mutex_enter(&pv_unmanaged.lock);
    102  1.1     skrll 	pvt->pvt_next = pv_unmanaged.list;
    103  1.5  riastrad 	atomic_store_release(&pv_unmanaged.list, pvt);
    104  1.1     skrll 	mutex_exit(&pv_unmanaged.lock);
    105  1.1     skrll }
    106  1.1     skrll 
    107  1.1     skrll void
    108  1.1     skrll pmap_pv_untrack(paddr_t start, psize_t size)
    109  1.1     skrll {
    110  1.1     skrll 	struct pv_track **pvtp, *pvt;
    111  1.1     skrll 	size_t npages;
    112  1.1     skrll 
    113  1.1     skrll 	KASSERT(start == trunc_page(start));
    114  1.1     skrll 	KASSERT(size == trunc_page(size));
    115  1.1     skrll 
    116  1.3  riastrad 	/* We may sleep for pserialize_perform.  */
    117  1.3  riastrad 	ASSERT_SLEEPABLE();
    118  1.3  riastrad 
    119  1.1     skrll 	mutex_enter(&pv_unmanaged.lock);
    120  1.1     skrll 	for (pvtp = &pv_unmanaged.list;
    121  1.1     skrll 	     (pvt = *pvtp) != NULL;
    122  1.1     skrll 	     pvtp = &pvt->pvt_next) {
    123  1.1     skrll 		if (pvt->pvt_start != start)
    124  1.1     skrll 			continue;
    125  1.1     skrll 		if (pvt->pvt_size != size)
    126  1.1     skrll 			panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
    127  1.1     skrll 			    ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
    128  1.1     skrll 			    pvt->pvt_start, pvt->pvt_size, size);
    129  1.5  riastrad 
    130  1.5  riastrad 		/*
    131  1.5  riastrad 		 * Remove from list.  Readers can safely see the old
    132  1.5  riastrad 		 * and new states of the list.
    133  1.5  riastrad 		 */
    134  1.5  riastrad 		atomic_store_relaxed(pvtp, pvt->pvt_next);
    135  1.5  riastrad 
    136  1.5  riastrad 		/* Wait for readers who can see the old state to finish.  */
    137  1.1     skrll 		pserialize_perform(pv_unmanaged.psz);
    138  1.5  riastrad 
    139  1.5  riastrad 		/*
    140  1.5  riastrad 		 * We now have exclusive access to pvt and can destroy
    141  1.5  riastrad 		 * it.  Poison it to catch bugs.
    142  1.5  riastrad 		 */
    143  1.5  riastrad 		explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
    144  1.1     skrll 		goto out;
    145  1.1     skrll 	}
    146  1.1     skrll 	panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
    147  1.1     skrll 	    " (0x%"PRIxPSIZE" bytes)",
    148  1.1     skrll 	    start, size);
    149  1.1     skrll out:	mutex_exit(&pv_unmanaged.lock);
    150  1.1     skrll 
    151  1.1     skrll 	npages = size >> PAGE_SHIFT;
    152  1.1     skrll 	kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
    153  1.1     skrll }
    154  1.1     skrll 
    155  1.1     skrll struct pmap_page *
    156  1.1     skrll pmap_pv_tracked(paddr_t pa)
    157  1.1     skrll {
    158  1.1     skrll 	struct pv_track *pvt;
    159  1.1     skrll 	size_t pgno;
    160  1.1     skrll 	int s;
    161  1.1     skrll 
    162  1.1     skrll 	KASSERT(pa == trunc_page(pa));
    163  1.1     skrll 
    164  1.1     skrll 	s = pserialize_read_enter();
    165  1.5  riastrad 	for (pvt = atomic_load_consume(&pv_unmanaged.list);
    166  1.5  riastrad 	     pvt != NULL;
    167  1.5  riastrad 	     pvt = pvt->pvt_next) {
    168  1.1     skrll 		if ((pvt->pvt_start <= pa) &&
    169  1.1     skrll 		    ((pa - pvt->pvt_start) < pvt->pvt_size))
    170  1.1     skrll 			break;
    171  1.1     skrll 	}
    172  1.1     skrll 	pserialize_read_exit(s);
    173  1.1     skrll 
    174  1.1     skrll 	if (pvt == NULL)
    175  1.1     skrll 		return NULL;
    176  1.1     skrll 	KASSERT(pvt->pvt_start <= pa);
    177  1.1     skrll 	KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
    178  1.1     skrll 	pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
    179  1.1     skrll 	return &pvt->pvt_pages[pgno];
    180  1.1     skrll }
    181  1.1     skrll 
    182