drm_cache.c revision 1.15 1 1.15 riastrad /* $NetBSD: drm_cache.c,v 1.15 2021/12/19 00:49:36 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #include <sys/cdefs.h>
33 1.15 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.15 2021/12/19 00:49:36 riastradh Exp $");
34 1.2 riastrad
35 1.8 jmcneill #include <sys/param.h>
36 1.2 riastrad #include <sys/types.h>
37 1.2 riastrad #include <sys/xcall.h>
38 1.2 riastrad
39 1.2 riastrad #include <uvm/uvm_extern.h>
40 1.2 riastrad
41 1.2 riastrad #include <linux/mm_types.h>
42 1.2 riastrad
43 1.2 riastrad #include <drm/drmP.h>
44 1.15 riastrad #include <drm/drm_cache.h>
45 1.2 riastrad
46 1.13 jmcneill #if !defined(__arm__) && !defined(__aarch64__)
47 1.8 jmcneill #define DRM_CLFLUSH 1
48 1.8 jmcneill #endif
49 1.8 jmcneill
50 1.8 jmcneill #if defined(DRM_CLFLUSH)
51 1.2 riastrad static bool drm_md_clflush_finegrained_p(void);
52 1.2 riastrad static void drm_md_clflush_all(void);
53 1.11 riastrad static void drm_md_clflush_begin(void);
54 1.10 riastrad static void drm_md_clflush_commit(void);
55 1.2 riastrad static void drm_md_clflush_page(struct page *);
56 1.2 riastrad static void drm_md_clflush_virt_range(const void *, size_t);
57 1.8 jmcneill #endif
58 1.2 riastrad
59 1.2 riastrad void
60 1.3 riastrad drm_clflush_pages(struct page **pages, unsigned long npages)
61 1.3 riastrad {
62 1.8 jmcneill #if defined(DRM_CLFLUSH)
63 1.3 riastrad if (drm_md_clflush_finegrained_p()) {
64 1.11 riastrad drm_md_clflush_begin();
65 1.3 riastrad while (npages--)
66 1.3 riastrad drm_md_clflush_page(pages[npages]);
67 1.10 riastrad drm_md_clflush_commit();
68 1.3 riastrad } else {
69 1.3 riastrad drm_md_clflush_all();
70 1.3 riastrad }
71 1.8 jmcneill #endif
72 1.3 riastrad }
73 1.3 riastrad
74 1.3 riastrad void
75 1.2 riastrad drm_clflush_pglist(struct pglist *list)
76 1.2 riastrad {
77 1.8 jmcneill #if defined(DRM_CLFLUSH)
78 1.2 riastrad if (drm_md_clflush_finegrained_p()) {
79 1.2 riastrad struct vm_page *page;
80 1.2 riastrad
81 1.11 riastrad drm_md_clflush_begin();
82 1.2 riastrad TAILQ_FOREACH(page, list, pageq.queue)
83 1.2 riastrad drm_md_clflush_page(container_of(page, struct page,
84 1.2 riastrad p_vmp));
85 1.10 riastrad drm_md_clflush_commit();
86 1.2 riastrad } else {
87 1.2 riastrad drm_md_clflush_all();
88 1.2 riastrad }
89 1.8 jmcneill #endif
90 1.2 riastrad }
91 1.2 riastrad
92 1.2 riastrad void
93 1.2 riastrad drm_clflush_page(struct page *page)
94 1.2 riastrad {
95 1.8 jmcneill #if defined(DRM_CLFLUSH)
96 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
97 1.11 riastrad drm_md_clflush_begin();
98 1.2 riastrad drm_md_clflush_page(page);
99 1.10 riastrad drm_md_clflush_commit();
100 1.10 riastrad } else {
101 1.2 riastrad drm_md_clflush_all();
102 1.10 riastrad }
103 1.8 jmcneill #endif
104 1.2 riastrad }
105 1.2 riastrad
106 1.2 riastrad void
107 1.2 riastrad drm_clflush_virt_range(const void *vaddr, size_t nbytes)
108 1.2 riastrad {
109 1.8 jmcneill #if defined(DRM_CLFLUSH)
110 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
111 1.11 riastrad drm_md_clflush_begin();
112 1.2 riastrad drm_md_clflush_virt_range(vaddr, nbytes);
113 1.10 riastrad drm_md_clflush_commit();
114 1.10 riastrad } else {
115 1.2 riastrad drm_md_clflush_all();
116 1.10 riastrad }
117 1.8 jmcneill #endif
118 1.2 riastrad }
119 1.2 riastrad
120 1.2 riastrad #if defined(__i386__) || defined(__x86_64__)
121 1.2 riastrad
122 1.7 jmcneill #include <machine/cpufunc.h>
123 1.7 jmcneill
124 1.2 riastrad static bool
125 1.2 riastrad drm_md_clflush_finegrained_p(void)
126 1.2 riastrad {
127 1.14 maxv return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CLFSH);
128 1.2 riastrad }
129 1.2 riastrad
130 1.2 riastrad static void
131 1.11 riastrad drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
132 1.2 riastrad {
133 1.11 riastrad wbinvd();
134 1.2 riastrad }
135 1.2 riastrad
136 1.2 riastrad static void
137 1.11 riastrad drm_md_clflush_all(void)
138 1.2 riastrad {
139 1.11 riastrad xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
140 1.2 riastrad }
141 1.2 riastrad
142 1.2 riastrad static void
143 1.11 riastrad drm_md_clflush_begin(void)
144 1.2 riastrad {
145 1.11 riastrad /* Support for CLFLUSH implies support for MFENCE. */
146 1.11 riastrad x86_mfence();
147 1.2 riastrad }
148 1.2 riastrad
149 1.2 riastrad static void
150 1.10 riastrad drm_md_clflush_commit(void)
151 1.10 riastrad {
152 1.11 riastrad x86_mfence();
153 1.10 riastrad }
154 1.10 riastrad
155 1.10 riastrad static void
156 1.2 riastrad drm_md_clflush_page(struct page *page)
157 1.2 riastrad {
158 1.2 riastrad void *const vaddr = kmap_atomic(page);
159 1.2 riastrad
160 1.2 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
161 1.2 riastrad
162 1.2 riastrad kunmap_atomic(vaddr);
163 1.2 riastrad }
164 1.2 riastrad
165 1.2 riastrad static void
166 1.11 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
167 1.2 riastrad {
168 1.11 riastrad const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
169 1.11 riastrad const vaddr_t vaddr = (vaddr_t)ptr;
170 1.11 riastrad const vaddr_t start = rounddown(vaddr, clflush_size);
171 1.11 riastrad const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
172 1.11 riastrad vaddr_t va;
173 1.2 riastrad
174 1.11 riastrad for (va = start; va < end; va += clflush_size)
175 1.11 riastrad asm volatile ("clflush %0" : : "m" (*(const char *)va));
176 1.2 riastrad }
177 1.2 riastrad
178 1.12 riastrad #elif defined(__sparc__) || defined(__sparc64__)
179 1.12 riastrad
180 1.12 riastrad #ifdef __sparc64__
181 1.12 riastrad #include <sparc64/sparc64/cache.h>
182 1.12 riastrad #else
183 1.12 riastrad #include <sparc/sparc/cache.h>
184 1.12 riastrad #endif
185 1.12 riastrad
186 1.12 riastrad static bool
187 1.12 riastrad drm_md_clflush_finegrained_p(void)
188 1.12 riastrad {
189 1.12 riastrad return true;
190 1.12 riastrad }
191 1.12 riastrad
192 1.12 riastrad static void
193 1.12 riastrad drm_md_clflush_all(void)
194 1.12 riastrad {
195 1.12 riastrad panic("don't know how to flush entire cache on sparc64");
196 1.12 riastrad }
197 1.12 riastrad
198 1.12 riastrad static void
199 1.12 riastrad drm_md_clflush_begin(void)
200 1.12 riastrad {
201 1.12 riastrad membar_Sync(); /* unsure if needed */
202 1.12 riastrad }
203 1.12 riastrad
204 1.12 riastrad static void
205 1.12 riastrad drm_md_clflush_commit(void)
206 1.12 riastrad {
207 1.12 riastrad membar_Sync(); /* unsure if needed */
208 1.12 riastrad }
209 1.12 riastrad
210 1.12 riastrad static void
211 1.12 riastrad drm_md_clflush_page(struct page *page)
212 1.12 riastrad {
213 1.12 riastrad #ifdef __sparc64__
214 1.12 riastrad paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
215 1.12 riastrad
216 1.12 riastrad cache_flush_phys(pa, PAGE_SIZE, 0);
217 1.12 riastrad #else
218 1.12 riastrad void *const vaddr = kmap_atomic(page);
219 1.12 riastrad
220 1.12 riastrad cache_flush(vaddr, PAGE_SIZE);
221 1.12 riastrad
222 1.12 riastrad kunmap_atomic(vaddr);
223 1.12 riastrad #endif
224 1.12 riastrad }
225 1.12 riastrad
226 1.12 riastrad static void
227 1.12 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
228 1.12 riastrad {
229 1.12 riastrad #ifdef __sparc64__
230 1.12 riastrad /* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr. */
231 1.12 riastrad blast_dcache();
232 1.12 riastrad #else
233 1.12 riastrad cache_flush(ptr, nbytes);
234 1.12 riastrad #endif
235 1.12 riastrad }
236 1.12 riastrad
237 1.9 riastrad #elif defined(__powerpc__)
238 1.9 riastrad
239 1.9 riastrad static bool
240 1.9 riastrad drm_md_clflush_finegrained_p(void)
241 1.9 riastrad {
242 1.9 riastrad return true;
243 1.9 riastrad }
244 1.9 riastrad
245 1.9 riastrad static void
246 1.9 riastrad drm_md_clflush_all(void)
247 1.9 riastrad {
248 1.9 riastrad panic("don't know how to flush entire cache on powerpc");
249 1.9 riastrad }
250 1.9 riastrad
251 1.9 riastrad static void
252 1.11 riastrad drm_md_clflush_begin(void)
253 1.11 riastrad {
254 1.11 riastrad }
255 1.11 riastrad
256 1.11 riastrad static void
257 1.10 riastrad drm_md_clflush_commit(void)
258 1.10 riastrad {
259 1.11 riastrad asm volatile ("sync" ::: "memory");
260 1.10 riastrad }
261 1.10 riastrad
262 1.10 riastrad static void
263 1.9 riastrad drm_md_clflush_page(struct page *page)
264 1.9 riastrad {
265 1.9 riastrad void *const vaddr = kmap_atomic(page);
266 1.9 riastrad
267 1.9 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
268 1.9 riastrad
269 1.9 riastrad kunmap_atomic(vaddr);
270 1.9 riastrad }
271 1.9 riastrad
272 1.9 riastrad static void
273 1.9 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
274 1.9 riastrad {
275 1.9 riastrad const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
276 1.9 riastrad vaddr_t va = (vaddr_t)ptr;
277 1.9 riastrad vaddr_t start = rounddown(va, dcsize);
278 1.9 riastrad vaddr_t end = roundup(va + nbytes, dcsize);
279 1.9 riastrad vsize_t len = end - start;
280 1.9 riastrad vsize_t off;
281 1.9 riastrad
282 1.9 riastrad for (off = 0; off < len; off += dcsize)
283 1.11 riastrad asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
284 1.9 riastrad }
285 1.9 riastrad
286 1.9 riastrad #endif
287