drm_cache.c revision 1.19 1 1.19 riastrad /* $NetBSD: drm_cache.c,v 1.19 2022/07/19 23:19:27 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #include <sys/cdefs.h>
33 1.19 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.19 2022/07/19 23:19:27 riastradh Exp $");
34 1.2 riastrad
35 1.8 jmcneill #include <sys/param.h>
36 1.2 riastrad #include <sys/types.h>
37 1.2 riastrad #include <sys/xcall.h>
38 1.2 riastrad
39 1.2 riastrad #include <uvm/uvm_extern.h>
40 1.2 riastrad
41 1.16 riastrad #include <linux/highmem.h>
42 1.2 riastrad #include <linux/mm_types.h>
43 1.2 riastrad
44 1.15 riastrad #include <drm/drm_cache.h>
45 1.2 riastrad
46 1.19 riastrad #if !defined(__arm__) && !defined(__aarch64__) && !defined(__alpha__)
47 1.8 jmcneill #define DRM_CLFLUSH 1
48 1.8 jmcneill #endif
49 1.8 jmcneill
50 1.8 jmcneill #if defined(DRM_CLFLUSH)
51 1.2 riastrad static bool drm_md_clflush_finegrained_p(void);
52 1.2 riastrad static void drm_md_clflush_all(void);
53 1.11 riastrad static void drm_md_clflush_begin(void);
54 1.10 riastrad static void drm_md_clflush_commit(void);
55 1.2 riastrad static void drm_md_clflush_page(struct page *);
56 1.2 riastrad static void drm_md_clflush_virt_range(const void *, size_t);
57 1.8 jmcneill #endif
58 1.2 riastrad
59 1.2 riastrad void
60 1.3 riastrad drm_clflush_pages(struct page **pages, unsigned long npages)
61 1.3 riastrad {
62 1.8 jmcneill #if defined(DRM_CLFLUSH)
63 1.3 riastrad if (drm_md_clflush_finegrained_p()) {
64 1.11 riastrad drm_md_clflush_begin();
65 1.3 riastrad while (npages--)
66 1.3 riastrad drm_md_clflush_page(pages[npages]);
67 1.10 riastrad drm_md_clflush_commit();
68 1.3 riastrad } else {
69 1.3 riastrad drm_md_clflush_all();
70 1.3 riastrad }
71 1.8 jmcneill #endif
72 1.3 riastrad }
73 1.3 riastrad
74 1.3 riastrad void
75 1.18 riastrad drm_clflush_sg(struct sg_table *sgt)
76 1.2 riastrad {
77 1.18 riastrad drm_clflush_pages(sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
78 1.2 riastrad }
79 1.2 riastrad
80 1.2 riastrad void
81 1.18 riastrad drm_clflush_virt_range(void *vaddr, unsigned long nbytes)
82 1.2 riastrad {
83 1.8 jmcneill #if defined(DRM_CLFLUSH)
84 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
85 1.11 riastrad drm_md_clflush_begin();
86 1.2 riastrad drm_md_clflush_virt_range(vaddr, nbytes);
87 1.10 riastrad drm_md_clflush_commit();
88 1.10 riastrad } else {
89 1.2 riastrad drm_md_clflush_all();
90 1.10 riastrad }
91 1.8 jmcneill #endif
92 1.2 riastrad }
93 1.2 riastrad
94 1.2 riastrad #if defined(__i386__) || defined(__x86_64__)
95 1.2 riastrad
96 1.7 jmcneill #include <machine/cpufunc.h>
97 1.7 jmcneill
98 1.2 riastrad static bool
99 1.2 riastrad drm_md_clflush_finegrained_p(void)
100 1.2 riastrad {
101 1.14 maxv return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CLFSH);
102 1.2 riastrad }
103 1.2 riastrad
104 1.2 riastrad static void
105 1.11 riastrad drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
106 1.2 riastrad {
107 1.11 riastrad wbinvd();
108 1.2 riastrad }
109 1.2 riastrad
110 1.2 riastrad static void
111 1.11 riastrad drm_md_clflush_all(void)
112 1.2 riastrad {
113 1.11 riastrad xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
114 1.2 riastrad }
115 1.2 riastrad
116 1.2 riastrad static void
117 1.11 riastrad drm_md_clflush_begin(void)
118 1.2 riastrad {
119 1.11 riastrad /* Support for CLFLUSH implies support for MFENCE. */
120 1.11 riastrad x86_mfence();
121 1.2 riastrad }
122 1.2 riastrad
123 1.2 riastrad static void
124 1.10 riastrad drm_md_clflush_commit(void)
125 1.10 riastrad {
126 1.11 riastrad x86_mfence();
127 1.10 riastrad }
128 1.10 riastrad
129 1.10 riastrad static void
130 1.2 riastrad drm_md_clflush_page(struct page *page)
131 1.2 riastrad {
132 1.2 riastrad void *const vaddr = kmap_atomic(page);
133 1.2 riastrad
134 1.2 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
135 1.2 riastrad
136 1.2 riastrad kunmap_atomic(vaddr);
137 1.2 riastrad }
138 1.2 riastrad
139 1.2 riastrad static void
140 1.11 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
141 1.2 riastrad {
142 1.11 riastrad const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
143 1.11 riastrad const vaddr_t vaddr = (vaddr_t)ptr;
144 1.11 riastrad const vaddr_t start = rounddown(vaddr, clflush_size);
145 1.11 riastrad const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
146 1.11 riastrad vaddr_t va;
147 1.2 riastrad
148 1.11 riastrad for (va = start; va < end; va += clflush_size)
149 1.11 riastrad asm volatile ("clflush %0" : : "m" (*(const char *)va));
150 1.2 riastrad }
151 1.2 riastrad
152 1.12 riastrad #elif defined(__sparc__) || defined(__sparc64__)
153 1.12 riastrad
154 1.12 riastrad #ifdef __sparc64__
155 1.12 riastrad #include <sparc64/sparc64/cache.h>
156 1.12 riastrad #else
157 1.12 riastrad #include <sparc/sparc/cache.h>
158 1.12 riastrad #endif
159 1.12 riastrad
160 1.12 riastrad static bool
161 1.12 riastrad drm_md_clflush_finegrained_p(void)
162 1.12 riastrad {
163 1.12 riastrad return true;
164 1.12 riastrad }
165 1.12 riastrad
166 1.12 riastrad static void
167 1.12 riastrad drm_md_clflush_all(void)
168 1.12 riastrad {
169 1.12 riastrad panic("don't know how to flush entire cache on sparc64");
170 1.12 riastrad }
171 1.12 riastrad
172 1.12 riastrad static void
173 1.12 riastrad drm_md_clflush_begin(void)
174 1.12 riastrad {
175 1.12 riastrad membar_Sync(); /* unsure if needed */
176 1.12 riastrad }
177 1.12 riastrad
178 1.12 riastrad static void
179 1.12 riastrad drm_md_clflush_commit(void)
180 1.12 riastrad {
181 1.12 riastrad membar_Sync(); /* unsure if needed */
182 1.12 riastrad }
183 1.12 riastrad
184 1.12 riastrad static void
185 1.12 riastrad drm_md_clflush_page(struct page *page)
186 1.12 riastrad {
187 1.12 riastrad #ifdef __sparc64__
188 1.12 riastrad paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
189 1.12 riastrad
190 1.12 riastrad cache_flush_phys(pa, PAGE_SIZE, 0);
191 1.12 riastrad #else
192 1.12 riastrad void *const vaddr = kmap_atomic(page);
193 1.12 riastrad
194 1.12 riastrad cache_flush(vaddr, PAGE_SIZE);
195 1.12 riastrad
196 1.12 riastrad kunmap_atomic(vaddr);
197 1.12 riastrad #endif
198 1.12 riastrad }
199 1.12 riastrad
200 1.12 riastrad static void
201 1.12 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
202 1.12 riastrad {
203 1.12 riastrad #ifdef __sparc64__
204 1.12 riastrad /* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr. */
205 1.12 riastrad blast_dcache();
206 1.12 riastrad #else
207 1.12 riastrad cache_flush(ptr, nbytes);
208 1.12 riastrad #endif
209 1.12 riastrad }
210 1.12 riastrad
211 1.9 riastrad #elif defined(__powerpc__)
212 1.9 riastrad
213 1.9 riastrad static bool
214 1.9 riastrad drm_md_clflush_finegrained_p(void)
215 1.9 riastrad {
216 1.9 riastrad return true;
217 1.9 riastrad }
218 1.9 riastrad
219 1.9 riastrad static void
220 1.9 riastrad drm_md_clflush_all(void)
221 1.9 riastrad {
222 1.9 riastrad panic("don't know how to flush entire cache on powerpc");
223 1.9 riastrad }
224 1.9 riastrad
225 1.9 riastrad static void
226 1.11 riastrad drm_md_clflush_begin(void)
227 1.11 riastrad {
228 1.11 riastrad }
229 1.11 riastrad
230 1.11 riastrad static void
231 1.10 riastrad drm_md_clflush_commit(void)
232 1.10 riastrad {
233 1.11 riastrad asm volatile ("sync" ::: "memory");
234 1.10 riastrad }
235 1.10 riastrad
236 1.10 riastrad static void
237 1.9 riastrad drm_md_clflush_page(struct page *page)
238 1.9 riastrad {
239 1.9 riastrad void *const vaddr = kmap_atomic(page);
240 1.9 riastrad
241 1.9 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
242 1.9 riastrad
243 1.9 riastrad kunmap_atomic(vaddr);
244 1.9 riastrad }
245 1.9 riastrad
246 1.9 riastrad static void
247 1.9 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
248 1.9 riastrad {
249 1.9 riastrad const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
250 1.9 riastrad vaddr_t va = (vaddr_t)ptr;
251 1.9 riastrad vaddr_t start = rounddown(va, dcsize);
252 1.9 riastrad vaddr_t end = roundup(va + nbytes, dcsize);
253 1.9 riastrad vsize_t len = end - start;
254 1.9 riastrad vsize_t off;
255 1.9 riastrad
256 1.9 riastrad for (off = 0; off < len; off += dcsize)
257 1.11 riastrad asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
258 1.9 riastrad }
259 1.9 riastrad
260 1.9 riastrad #endif
261