Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: drm_cache.c,v 1.4 2021/12/19 01:24:25 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * Authors: Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.4 2021/12/19 01:24:25 riastradh Exp $");
     35 
     36 #include <linux/export.h>
     37 #include <linux/highmem.h>
     38 
     39 #include <drm/drm_cache.h>
     40 
     41 #if defined(CONFIG_X86)
     42 #include <asm/smp.h>
     43 
     44 /*
     45  * clflushopt is an unordered instruction which needs fencing with mfence or
     46  * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
     47  * in the caller.
     48  */
     49 static void
     50 drm_clflush_page(struct page *page)
     51 {
     52 	uint8_t *page_virtual;
     53 	unsigned int i;
     54 	const int size = boot_cpu_data.x86_clflush_size;
     55 
     56 	if (unlikely(page == NULL))
     57 		return;
     58 
     59 	page_virtual = kmap_atomic(page);
     60 	for (i = 0; i < PAGE_SIZE; i += size)
     61 		clflushopt(page_virtual + i);
     62 	kunmap_atomic(page_virtual);
     63 }
     64 
     65 static void drm_cache_flush_clflush(struct page *pages[],
     66 				    unsigned long num_pages)
     67 {
     68 	unsigned long i;
     69 
     70 	mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
     71 	for (i = 0; i < num_pages; i++)
     72 		drm_clflush_page(*pages++);
     73 	mb(); /*Also used after CLFLUSH so that all cache is flushed*/
     74 }
     75 #endif
     76 
     77 /**
     78  * drm_clflush_pages - Flush dcache lines of a set of pages.
     79  * @pages: List of pages to be flushed.
     80  * @num_pages: Number of pages in the array.
     81  *
     82  * Flush every data cache line entry that points to an address belonging
     83  * to a page in the array.
     84  */
     85 void
     86 drm_clflush_pages(struct page *pages[], unsigned long num_pages)
     87 {
     88 
     89 #if defined(CONFIG_X86)
     90 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
     91 		drm_cache_flush_clflush(pages, num_pages);
     92 		return;
     93 	}
     94 
     95 	if (wbinvd_on_all_cpus())
     96 		pr_err("Timed out waiting for cache flush\n");
     97 
     98 #elif defined(__powerpc__)
     99 	unsigned long i;
    100 
    101 	for (i = 0; i < num_pages; i++) {
    102 		struct page *page = pages[i];
    103 		void *page_virtual;
    104 
    105 		if (unlikely(page == NULL))
    106 			continue;
    107 
    108 		page_virtual = kmap_atomic(page);
    109 		flush_dcache_range((unsigned long)page_virtual,
    110 				   (unsigned long)page_virtual + PAGE_SIZE);
    111 		kunmap_atomic(page_virtual);
    112 	}
    113 #else
    114 	pr_err("Architecture has no drm_cache.c support\n");
    115 	WARN_ON_ONCE(1);
    116 #endif
    117 }
    118 EXPORT_SYMBOL(drm_clflush_pages);
    119 
    120 /**
    121  * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
    122  * @st: struct sg_table.
    123  *
    124  * Flush every data cache line entry that points to an address in the
    125  * sg.
    126  */
    127 void
    128 drm_clflush_sg(struct sg_table *st)
    129 {
    130 #if defined(CONFIG_X86)
    131 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
    132 		struct sg_page_iter sg_iter;
    133 
    134 		mb(); /*CLFLUSH is ordered only by using memory barriers*/
    135 		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
    136 			drm_clflush_page(sg_page_iter_page(&sg_iter));
    137 		mb(); /*Make sure that all cache line entry is flushed*/
    138 
    139 		return;
    140 	}
    141 
    142 	if (wbinvd_on_all_cpus())
    143 		pr_err("Timed out waiting for cache flush\n");
    144 #else
    145 	pr_err("Architecture has no drm_cache.c support\n");
    146 	WARN_ON_ONCE(1);
    147 #endif
    148 }
    149 EXPORT_SYMBOL(drm_clflush_sg);
    150 
    151 /**
    152  * drm_clflush_virt_range - Flush dcache lines of a region
    153  * @addr: Initial kernel memory address.
    154  * @length: Region size.
    155  *
    156  * Flush every data cache line entry that points to an address in the
    157  * region requested.
    158  */
    159 void
    160 drm_clflush_virt_range(void *addr, unsigned long length)
    161 {
    162 #if defined(CONFIG_X86)
    163 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
    164 #ifdef __NetBSD__
    165 		const int size = cpu_info_primary.ci_cflush_lsize;
    166 #else
    167 		const int size = boot_cpu_data.x86_clflush_size;
    168 #endif
    169 		void *end = addr + length;
    170 
    171 		addr = (void *)(((unsigned long)addr) & -size);
    172 		mb(); /*CLFLUSH is only ordered with a full memory barrier*/
    173 		for (; addr < end; addr += size)
    174 			clflushopt(addr);
    175 		clflushopt(end - 1); /* force serialisation */
    176 		mb(); /*Ensure that evry data cache line entry is flushed*/
    177 		return;
    178 	}
    179 
    180 	if (wbinvd_on_all_cpus())
    181 		pr_err("Timed out waiting for cache flush\n");
    182 #else
    183 	pr_err("Architecture has no drm_cache.c support\n");
    184 	WARN_ON_ONCE(1);
    185 #endif
    186 }
    187 EXPORT_SYMBOL(drm_clflush_virt_range);
    188