Home | History | Annotate | Line # | Download | only in drm
drm_cache.c revision 1.9
      1  1.9  riastrad /*	$NetBSD: drm_cache.c,v 1.9 2018/08/27 15:11:46 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.2  riastrad /*-
      4  1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  1.2  riastrad  * All rights reserved.
      6  1.2  riastrad  *
      7  1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2  riastrad  * by Taylor R. Campbell.
      9  1.2  riastrad  *
     10  1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11  1.2  riastrad  * modification, are permitted provided that the following conditions
     12  1.2  riastrad  * are met:
     13  1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14  1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15  1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17  1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18  1.2  riastrad  *
     19  1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.2  riastrad  */
     31  1.2  riastrad 
     32  1.2  riastrad #include <sys/cdefs.h>
     33  1.9  riastrad __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.9 2018/08/27 15:11:46 riastradh Exp $");
     34  1.2  riastrad 
     35  1.8  jmcneill #include <sys/param.h>
     36  1.2  riastrad #include <sys/types.h>
     37  1.2  riastrad #include <sys/xcall.h>
     38  1.2  riastrad 
     39  1.2  riastrad #include <uvm/uvm_extern.h>
     40  1.2  riastrad 
     41  1.2  riastrad #include <linux/mm_types.h>
     42  1.2  riastrad 
     43  1.2  riastrad #include <drm/drmP.h>
     44  1.2  riastrad 
     45  1.8  jmcneill #if !defined(__arm__)
     46  1.8  jmcneill #define DRM_CLFLUSH	1
     47  1.8  jmcneill #endif
     48  1.8  jmcneill 
     49  1.8  jmcneill #if defined(DRM_CLFLUSH)
     50  1.2  riastrad static bool		drm_md_clflush_finegrained_p(void);
     51  1.2  riastrad static void		drm_md_clflush_all(void);
     52  1.2  riastrad static void		drm_md_clflush_page(struct page *);
     53  1.2  riastrad static void		drm_md_clflush_virt_range(const void *, size_t);
     54  1.8  jmcneill #endif
     55  1.2  riastrad 
     56  1.2  riastrad void
     57  1.3  riastrad drm_clflush_pages(struct page **pages, unsigned long npages)
     58  1.3  riastrad {
     59  1.8  jmcneill #if defined(DRM_CLFLUSH)
     60  1.3  riastrad 	if (drm_md_clflush_finegrained_p()) {
     61  1.3  riastrad 		while (npages--)
     62  1.3  riastrad 			drm_md_clflush_page(pages[npages]);
     63  1.3  riastrad 	} else {
     64  1.3  riastrad 		drm_md_clflush_all();
     65  1.3  riastrad 	}
     66  1.8  jmcneill #endif
     67  1.3  riastrad }
     68  1.3  riastrad 
     69  1.3  riastrad void
     70  1.2  riastrad drm_clflush_pglist(struct pglist *list)
     71  1.2  riastrad {
     72  1.8  jmcneill #if defined(DRM_CLFLUSH)
     73  1.2  riastrad 	if (drm_md_clflush_finegrained_p()) {
     74  1.2  riastrad 		struct vm_page *page;
     75  1.2  riastrad 
     76  1.2  riastrad 		TAILQ_FOREACH(page, list, pageq.queue)
     77  1.2  riastrad 			drm_md_clflush_page(container_of(page, struct page,
     78  1.2  riastrad 				p_vmp));
     79  1.2  riastrad 	} else {
     80  1.2  riastrad 		drm_md_clflush_all();
     81  1.2  riastrad 	}
     82  1.8  jmcneill #endif
     83  1.2  riastrad }
     84  1.2  riastrad 
     85  1.2  riastrad void
     86  1.2  riastrad drm_clflush_page(struct page *page)
     87  1.2  riastrad {
     88  1.8  jmcneill #if defined(DRM_CLFLUSH)
     89  1.2  riastrad 	if (drm_md_clflush_finegrained_p())
     90  1.2  riastrad 		drm_md_clflush_page(page);
     91  1.2  riastrad 	else
     92  1.2  riastrad 		drm_md_clflush_all();
     93  1.8  jmcneill #endif
     94  1.2  riastrad }
     95  1.2  riastrad 
     96  1.2  riastrad void
     97  1.2  riastrad drm_clflush_virt_range(const void *vaddr, size_t nbytes)
     98  1.2  riastrad {
     99  1.8  jmcneill #if defined(DRM_CLFLUSH)
    100  1.2  riastrad 	if (drm_md_clflush_finegrained_p())
    101  1.2  riastrad 		drm_md_clflush_virt_range(vaddr, nbytes);
    102  1.2  riastrad 	else
    103  1.2  riastrad 		drm_md_clflush_all();
    104  1.8  jmcneill #endif
    105  1.2  riastrad }
    106  1.2  riastrad 
    107  1.2  riastrad #if defined(__i386__) || defined(__x86_64__)
    108  1.2  riastrad 
    109  1.7  jmcneill #include <machine/cpufunc.h>
    110  1.7  jmcneill 
    111  1.2  riastrad static bool
    112  1.2  riastrad drm_md_clflush_finegrained_p(void)
    113  1.2  riastrad {
    114  1.2  riastrad 	return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CFLUSH);
    115  1.2  riastrad }
    116  1.2  riastrad 
    117  1.2  riastrad static void
    118  1.2  riastrad drm_x86_clflush(const void *vaddr)
    119  1.2  riastrad {
    120  1.2  riastrad 	asm volatile ("clflush %0" : : "m" (*(const char *)vaddr));
    121  1.2  riastrad }
    122  1.2  riastrad 
    123  1.2  riastrad static size_t
    124  1.2  riastrad drm_x86_clflush_size(void)
    125  1.2  riastrad {
    126  1.2  riastrad 	KASSERT(drm_md_clflush_finegrained_p());
    127  1.2  riastrad 	return cpu_info_primary.ci_cflush_lsize;
    128  1.2  riastrad }
    129  1.2  riastrad 
    130  1.2  riastrad static void
    131  1.2  riastrad drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
    132  1.2  riastrad {
    133  1.6  riastrad 	wbinvd();
    134  1.2  riastrad }
    135  1.2  riastrad 
    136  1.2  riastrad static void
    137  1.2  riastrad drm_md_clflush_all(void)
    138  1.2  riastrad {
    139  1.2  riastrad 	xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
    140  1.2  riastrad }
    141  1.2  riastrad 
    142  1.2  riastrad static void
    143  1.2  riastrad drm_md_clflush_page(struct page *page)
    144  1.2  riastrad {
    145  1.2  riastrad 	void *const vaddr = kmap_atomic(page);
    146  1.2  riastrad 
    147  1.2  riastrad 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    148  1.2  riastrad 
    149  1.2  riastrad 	kunmap_atomic(vaddr);
    150  1.2  riastrad }
    151  1.2  riastrad 
    152  1.2  riastrad static void
    153  1.2  riastrad drm_md_clflush_virt_range(const void *vaddr, size_t nbytes)
    154  1.2  riastrad {
    155  1.4  riastrad 	const unsigned clflush_size = drm_x86_clflush_size();
    156  1.4  riastrad 	const vaddr_t va = (vaddr_t)vaddr;
    157  1.4  riastrad 	const char *const start = (const void *)rounddown(va, clflush_size);
    158  1.4  riastrad 	const char *const end = (const void *)roundup(va + nbytes,
    159  1.4  riastrad 	    clflush_size);
    160  1.2  riastrad 	const char *p;
    161  1.2  riastrad 
    162  1.5  riastrad 	/* Support for CLFLUSH implies support for MFENCE.  */
    163  1.2  riastrad 	KASSERT(drm_md_clflush_finegrained_p());
    164  1.5  riastrad 	x86_mfence();
    165  1.2  riastrad 	for (p = start; p < end; p += clflush_size)
    166  1.2  riastrad 		drm_x86_clflush(p);
    167  1.5  riastrad 	x86_mfence();
    168  1.2  riastrad }
    169  1.2  riastrad 
    170  1.9  riastrad #elif defined(__powerpc__)
    171  1.9  riastrad 
    172  1.9  riastrad static void
    173  1.9  riastrad drm_ppc_dcbf(vaddr_t va, vsize_t off)
    174  1.9  riastrad {
    175  1.9  riastrad 	asm volatile ("dcbf\t%0,%1" : : "b"(va), "r"(off));
    176  1.9  riastrad }
    177  1.9  riastrad 
    178  1.9  riastrad static bool
    179  1.9  riastrad drm_md_clflush_finegrained_p(void)
    180  1.9  riastrad {
    181  1.9  riastrad 	return true;
    182  1.9  riastrad }
    183  1.9  riastrad 
    184  1.9  riastrad static void
    185  1.9  riastrad drm_md_clflush_all(void)
    186  1.9  riastrad {
    187  1.9  riastrad 	panic("don't know how to flush entire cache on powerpc");
    188  1.9  riastrad }
    189  1.9  riastrad 
    190  1.9  riastrad static void
    191  1.9  riastrad drm_md_clflush_page(struct page *page)
    192  1.9  riastrad {
    193  1.9  riastrad 	void *const vaddr = kmap_atomic(page);
    194  1.9  riastrad 
    195  1.9  riastrad 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
    196  1.9  riastrad 
    197  1.9  riastrad 	kunmap_atomic(vaddr);
    198  1.9  riastrad }
    199  1.9  riastrad 
    200  1.9  riastrad static void
    201  1.9  riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
    202  1.9  riastrad {
    203  1.9  riastrad 	const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
    204  1.9  riastrad 	vaddr_t va = (vaddr_t)ptr;
    205  1.9  riastrad 	vaddr_t start = rounddown(va, dcsize);
    206  1.9  riastrad 	vaddr_t end = roundup(va + nbytes, dcsize);
    207  1.9  riastrad 	vsize_t len = end - start;
    208  1.9  riastrad 	vsize_t off;
    209  1.9  riastrad 
    210  1.9  riastrad 	for (off = 0; off < len; off += dcsize)
    211  1.9  riastrad 		drm_ppc_dcbf(start, off);
    212  1.9  riastrad }
    213  1.9  riastrad 
    214  1.9  riastrad #endif
    215