Home | History | Annotate | Line # | Download | only in drm
drm_cache.c revision 1.15
      1 /*	$NetBSD: drm_cache.c,v 1.15 2021/12/19 00:49:36 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.15 2021/12/19 00:49:36 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/types.h>
     37 #include <sys/xcall.h>
     38 
     39 #include <uvm/uvm_extern.h>
     40 
     41 #include <linux/mm_types.h>
     42 
     43 #include <drm/drmP.h>
     44 #include <drm/drm_cache.h>
     45 
     46 #if !defined(__arm__) && !defined(__aarch64__)
     47 #define DRM_CLFLUSH	1
     48 #endif
     49 
     50 #if defined(DRM_CLFLUSH)
     51 static bool		drm_md_clflush_finegrained_p(void);
     52 static void		drm_md_clflush_all(void);
     53 static void		drm_md_clflush_begin(void);
     54 static void		drm_md_clflush_commit(void);
     55 static void		drm_md_clflush_page(struct page *);
     56 static void		drm_md_clflush_virt_range(const void *, size_t);
     57 #endif
     58 
     59 void
     60 drm_clflush_pages(struct page **pages, unsigned long npages)
     61 {
     62 #if defined(DRM_CLFLUSH)
     63 	if (drm_md_clflush_finegrained_p()) {
     64 		drm_md_clflush_begin();
     65 		while (npages--)
     66 			drm_md_clflush_page(pages[npages]);
     67 		drm_md_clflush_commit();
     68 	} else {
     69 		drm_md_clflush_all();
     70 	}
     71 #endif
     72 }
     73 
     74 void
     75 drm_clflush_pglist(struct pglist *list)
     76 {
     77 #if defined(DRM_CLFLUSH)
     78 	if (drm_md_clflush_finegrained_p()) {
     79 		struct vm_page *page;
     80 
     81 		drm_md_clflush_begin();
     82 		TAILQ_FOREACH(page, list, pageq.queue)
     83 			drm_md_clflush_page(container_of(page, struct page,
     84 				p_vmp));
     85 		drm_md_clflush_commit();
     86 	} else {
     87 		drm_md_clflush_all();
     88 	}
     89 #endif
     90 }
     91 
     92 void
     93 drm_clflush_page(struct page *page)
     94 {
     95 #if defined(DRM_CLFLUSH)
     96 	if (drm_md_clflush_finegrained_p()) {
     97 		drm_md_clflush_begin();
     98 		drm_md_clflush_page(page);
     99 		drm_md_clflush_commit();
    100 	} else {
    101 		drm_md_clflush_all();
    102 	}
    103 #endif
    104 }
    105 
    106 void
    107 drm_clflush_virt_range(const void *vaddr, size_t nbytes)
    108 {
    109 #if defined(DRM_CLFLUSH)
    110 	if (drm_md_clflush_finegrained_p()) {
    111 		drm_md_clflush_begin();
    112 		drm_md_clflush_virt_range(vaddr, nbytes);
    113 		drm_md_clflush_commit();
    114 	} else {
    115 		drm_md_clflush_all();
    116 	}
    117 #endif
    118 }
    119 
    120 #if defined(__i386__) || defined(__x86_64__)
    121 
    122 #include <machine/cpufunc.h>
    123 
    124 static bool
    125 drm_md_clflush_finegrained_p(void)
    126 {
    127 	return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CLFSH);
    128 }
    129 
    130 static void
    131 drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
    132 {
    133 	wbinvd();
    134 }
    135 
    136 static void
    137 drm_md_clflush_all(void)
    138 {
    139 	xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
    140 }
    141 
    142 static void
    143 drm_md_clflush_begin(void)
    144 {
    145 	/* Support for CLFLUSH implies support for MFENCE.  */
    146 	x86_mfence();
    147 }
    148 
    149 static void
    150 drm_md_clflush_commit(void)
    151 {
    152 	x86_mfence();
    153 }
    154 
    155 static void
    156 drm_md_clflush_page(struct page *page)
    157 {
    158 	void *const vaddr = kmap_atomic(page);
    159 
    160 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    161 
    162 	kunmap_atomic(vaddr);
    163 }
    164 
    165 static void
    166 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    167 {
    168 	const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
    169 	const vaddr_t vaddr = (vaddr_t)ptr;
    170 	const vaddr_t start = rounddown(vaddr, clflush_size);
    171 	const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
    172 	vaddr_t va;
    173 
    174 	for (va = start; va < end; va += clflush_size)
    175 		asm volatile ("clflush %0" : : "m" (*(const char *)va));
    176 }
    177 
    178 #elif defined(__sparc__) || defined(__sparc64__)
    179 
    180 #ifdef __sparc64__
    181 #include <sparc64/sparc64/cache.h>
    182 #else
    183 #include <sparc/sparc/cache.h>
    184 #endif
    185 
    186 static bool
    187 drm_md_clflush_finegrained_p(void)
    188 {
    189 	return true;
    190 }
    191 
    192 static void
    193 drm_md_clflush_all(void)
    194 {
    195 	panic("don't know how to flush entire cache on sparc64");
    196 }
    197 
    198 static void
    199 drm_md_clflush_begin(void)
    200 {
    201 	membar_Sync();		/* unsure if needed */
    202 }
    203 
    204 static void
    205 drm_md_clflush_commit(void)
    206 {
    207 	membar_Sync();		/* unsure if needed */
    208 }
    209 
    210 static void
    211 drm_md_clflush_page(struct page *page)
    212 {
    213 #ifdef __sparc64__
    214 	paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
    215 
    216 	cache_flush_phys(pa, PAGE_SIZE, 0);
    217 #else
    218 	void *const vaddr = kmap_atomic(page);
    219 
    220 	cache_flush(vaddr, PAGE_SIZE);
    221 
    222 	kunmap_atomic(vaddr);
    223 #endif
    224 }
    225 
    226 static void
    227 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    228 {
    229 #ifdef __sparc64__
    230 	/* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr.  */
    231 	blast_dcache();
    232 #else
    233 	cache_flush(ptr, nbytes);
    234 #endif
    235 }
    236 
    237 #elif defined(__powerpc__)
    238 
    239 static bool
    240 drm_md_clflush_finegrained_p(void)
    241 {
    242 	return true;
    243 }
    244 
    245 static void
    246 drm_md_clflush_all(void)
    247 {
    248 	panic("don't know how to flush entire cache on powerpc");
    249 }
    250 
    251 static void
    252 drm_md_clflush_begin(void)
    253 {
    254 }
    255 
    256 static void
    257 drm_md_clflush_commit(void)
    258 {
    259 	asm volatile ("sync" ::: "memory");
    260 }
    261 
    262 static void
    263 drm_md_clflush_page(struct page *page)
    264 {
    265 	void *const vaddr = kmap_atomic(page);
    266 
    267 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    268 
    269 	kunmap_atomic(vaddr);
    270 }
    271 
    272 static void
    273 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    274 {
    275 	const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
    276 	vaddr_t va = (vaddr_t)ptr;
    277 	vaddr_t start = rounddown(va, dcsize);
    278 	vaddr_t end = roundup(va + nbytes, dcsize);
    279 	vsize_t len = end - start;
    280 	vsize_t off;
    281 
    282 	for (off = 0; off < len; off += dcsize)
    283 		asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
    284 }
    285 
    286 #endif
    287