Home | History | Annotate | Line # | Download | only in drm
drm_cache.c revision 1.19
      1 /*	$NetBSD: drm_cache.c,v 1.19 2022/07/19 23:19:27 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.19 2022/07/19 23:19:27 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/types.h>
     37 #include <sys/xcall.h>
     38 
     39 #include <uvm/uvm_extern.h>
     40 
     41 #include <linux/highmem.h>
     42 #include <linux/mm_types.h>
     43 
     44 #include <drm/drm_cache.h>
     45 
     46 #if !defined(__arm__) && !defined(__aarch64__) && !defined(__alpha__)
     47 #define DRM_CLFLUSH	1
     48 #endif
     49 
     50 #if defined(DRM_CLFLUSH)
     51 static bool		drm_md_clflush_finegrained_p(void);
     52 static void		drm_md_clflush_all(void);
     53 static void		drm_md_clflush_begin(void);
     54 static void		drm_md_clflush_commit(void);
     55 static void		drm_md_clflush_page(struct page *);
     56 static void		drm_md_clflush_virt_range(const void *, size_t);
     57 #endif
     58 
     59 void
     60 drm_clflush_pages(struct page **pages, unsigned long npages)
     61 {
     62 #if defined(DRM_CLFLUSH)
     63 	if (drm_md_clflush_finegrained_p()) {
     64 		drm_md_clflush_begin();
     65 		while (npages--)
     66 			drm_md_clflush_page(pages[npages]);
     67 		drm_md_clflush_commit();
     68 	} else {
     69 		drm_md_clflush_all();
     70 	}
     71 #endif
     72 }
     73 
     74 void
     75 drm_clflush_sg(struct sg_table *sgt)
     76 {
     77 	drm_clflush_pages(sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
     78 }
     79 
     80 void
     81 drm_clflush_virt_range(void *vaddr, unsigned long nbytes)
     82 {
     83 #if defined(DRM_CLFLUSH)
     84 	if (drm_md_clflush_finegrained_p()) {
     85 		drm_md_clflush_begin();
     86 		drm_md_clflush_virt_range(vaddr, nbytes);
     87 		drm_md_clflush_commit();
     88 	} else {
     89 		drm_md_clflush_all();
     90 	}
     91 #endif
     92 }
     93 
     94 #if defined(__i386__) || defined(__x86_64__)
     95 
     96 #include <machine/cpufunc.h>
     97 
     98 static bool
     99 drm_md_clflush_finegrained_p(void)
    100 {
    101 	return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CLFSH);
    102 }
    103 
    104 static void
    105 drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
    106 {
    107 	wbinvd();
    108 }
    109 
    110 static void
    111 drm_md_clflush_all(void)
    112 {
    113 	xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
    114 }
    115 
    116 static void
    117 drm_md_clflush_begin(void)
    118 {
    119 	/* Support for CLFLUSH implies support for MFENCE.  */
    120 	x86_mfence();
    121 }
    122 
    123 static void
    124 drm_md_clflush_commit(void)
    125 {
    126 	x86_mfence();
    127 }
    128 
    129 static void
    130 drm_md_clflush_page(struct page *page)
    131 {
    132 	void *const vaddr = kmap_atomic(page);
    133 
    134 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    135 
    136 	kunmap_atomic(vaddr);
    137 }
    138 
    139 static void
    140 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    141 {
    142 	const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
    143 	const vaddr_t vaddr = (vaddr_t)ptr;
    144 	const vaddr_t start = rounddown(vaddr, clflush_size);
    145 	const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
    146 	vaddr_t va;
    147 
    148 	for (va = start; va < end; va += clflush_size)
    149 		asm volatile ("clflush %0" : : "m" (*(const char *)va));
    150 }
    151 
    152 #elif defined(__sparc__) || defined(__sparc64__)
    153 
    154 #ifdef __sparc64__
    155 #include <sparc64/sparc64/cache.h>
    156 #else
    157 #include <sparc/sparc/cache.h>
    158 #endif
    159 
    160 static bool
    161 drm_md_clflush_finegrained_p(void)
    162 {
    163 	return true;
    164 }
    165 
    166 static void
    167 drm_md_clflush_all(void)
    168 {
    169 	panic("don't know how to flush entire cache on sparc64");
    170 }
    171 
    172 static void
    173 drm_md_clflush_begin(void)
    174 {
    175 	membar_Sync();		/* unsure if needed */
    176 }
    177 
    178 static void
    179 drm_md_clflush_commit(void)
    180 {
    181 	membar_Sync();		/* unsure if needed */
    182 }
    183 
    184 static void
    185 drm_md_clflush_page(struct page *page)
    186 {
    187 #ifdef __sparc64__
    188 	paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
    189 
    190 	cache_flush_phys(pa, PAGE_SIZE, 0);
    191 #else
    192 	void *const vaddr = kmap_atomic(page);
    193 
    194 	cache_flush(vaddr, PAGE_SIZE);
    195 
    196 	kunmap_atomic(vaddr);
    197 #endif
    198 }
    199 
    200 static void
    201 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    202 {
    203 #ifdef __sparc64__
    204 	/* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr.  */
    205 	blast_dcache();
    206 #else
    207 	cache_flush(ptr, nbytes);
    208 #endif
    209 }
    210 
    211 #elif defined(__powerpc__)
    212 
    213 static bool
    214 drm_md_clflush_finegrained_p(void)
    215 {
    216 	return true;
    217 }
    218 
    219 static void
    220 drm_md_clflush_all(void)
    221 {
    222 	panic("don't know how to flush entire cache on powerpc");
    223 }
    224 
    225 static void
    226 drm_md_clflush_begin(void)
    227 {
    228 }
    229 
    230 static void
    231 drm_md_clflush_commit(void)
    232 {
    233 	asm volatile ("sync" ::: "memory");
    234 }
    235 
    236 static void
    237 drm_md_clflush_page(struct page *page)
    238 {
    239 	void *const vaddr = kmap_atomic(page);
    240 
    241 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    242 
    243 	kunmap_atomic(vaddr);
    244 }
    245 
    246 static void
    247 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    248 {
    249 	const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
    250 	vaddr_t va = (vaddr_t)ptr;
    251 	vaddr_t start = rounddown(va, dcsize);
    252 	vaddr_t end = roundup(va + nbytes, dcsize);
    253 	vsize_t len = end - start;
    254 	vsize_t off;
    255 
    256 	for (off = 0; off < len; off += dcsize)
    257 		asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
    258 }
    259 
    260 #endif
    261