drm_cache.c revision 1.12 1 1.9 riastrad /* $NetBSD: drm_cache.c,v 1.12 2018/08/27 15:29:19 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #include <sys/cdefs.h>
33 1.9 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.12 2018/08/27 15:29:19 riastradh Exp $");
34 1.2 riastrad
35 1.8 jmcneill #include <sys/param.h>
36 1.2 riastrad #include <sys/types.h>
37 1.2 riastrad #include <sys/xcall.h>
38 1.2 riastrad
39 1.2 riastrad #include <uvm/uvm_extern.h>
40 1.2 riastrad
41 1.2 riastrad #include <linux/mm_types.h>
42 1.2 riastrad
43 1.2 riastrad #include <drm/drmP.h>
44 1.2 riastrad
45 1.8 jmcneill #if !defined(__arm__)
46 1.8 jmcneill #define DRM_CLFLUSH 1
47 1.8 jmcneill #endif
48 1.8 jmcneill
49 1.8 jmcneill #if defined(DRM_CLFLUSH)
50 1.2 riastrad static bool drm_md_clflush_finegrained_p(void);
51 1.2 riastrad static void drm_md_clflush_all(void);
52 1.11 riastrad static void drm_md_clflush_begin(void);
53 1.10 riastrad static void drm_md_clflush_commit(void);
54 1.2 riastrad static void drm_md_clflush_page(struct page *);
55 1.2 riastrad static void drm_md_clflush_virt_range(const void *, size_t);
56 1.8 jmcneill #endif
57 1.2 riastrad
58 1.2 riastrad void
59 1.3 riastrad drm_clflush_pages(struct page **pages, unsigned long npages)
60 1.3 riastrad {
61 1.8 jmcneill #if defined(DRM_CLFLUSH)
62 1.3 riastrad if (drm_md_clflush_finegrained_p()) {
63 1.11 riastrad drm_md_clflush_begin();
64 1.3 riastrad while (npages--)
65 1.3 riastrad drm_md_clflush_page(pages[npages]);
66 1.10 riastrad drm_md_clflush_commit();
67 1.3 riastrad } else {
68 1.3 riastrad drm_md_clflush_all();
69 1.3 riastrad }
70 1.8 jmcneill #endif
71 1.3 riastrad }
72 1.3 riastrad
73 1.3 riastrad void
74 1.2 riastrad drm_clflush_pglist(struct pglist *list)
75 1.2 riastrad {
76 1.8 jmcneill #if defined(DRM_CLFLUSH)
77 1.2 riastrad if (drm_md_clflush_finegrained_p()) {
78 1.2 riastrad struct vm_page *page;
79 1.2 riastrad
80 1.11 riastrad drm_md_clflush_begin();
81 1.2 riastrad TAILQ_FOREACH(page, list, pageq.queue)
82 1.2 riastrad drm_md_clflush_page(container_of(page, struct page,
83 1.2 riastrad p_vmp));
84 1.10 riastrad drm_md_clflush_commit();
85 1.2 riastrad } else {
86 1.2 riastrad drm_md_clflush_all();
87 1.2 riastrad }
88 1.8 jmcneill #endif
89 1.2 riastrad }
90 1.2 riastrad
91 1.2 riastrad void
92 1.2 riastrad drm_clflush_page(struct page *page)
93 1.2 riastrad {
94 1.8 jmcneill #if defined(DRM_CLFLUSH)
95 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
96 1.11 riastrad drm_md_clflush_begin();
97 1.2 riastrad drm_md_clflush_page(page);
98 1.10 riastrad drm_md_clflush_commit();
99 1.10 riastrad } else {
100 1.2 riastrad drm_md_clflush_all();
101 1.10 riastrad }
102 1.8 jmcneill #endif
103 1.2 riastrad }
104 1.2 riastrad
105 1.2 riastrad void
106 1.2 riastrad drm_clflush_virt_range(const void *vaddr, size_t nbytes)
107 1.2 riastrad {
108 1.8 jmcneill #if defined(DRM_CLFLUSH)
109 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
110 1.11 riastrad drm_md_clflush_begin();
111 1.2 riastrad drm_md_clflush_virt_range(vaddr, nbytes);
112 1.10 riastrad drm_md_clflush_commit();
113 1.10 riastrad } else {
114 1.2 riastrad drm_md_clflush_all();
115 1.10 riastrad }
116 1.8 jmcneill #endif
117 1.2 riastrad }
118 1.2 riastrad
119 1.2 riastrad #if defined(__i386__) || defined(__x86_64__)
120 1.2 riastrad
121 1.7 jmcneill #include <machine/cpufunc.h>
122 1.7 jmcneill
123 1.2 riastrad static bool
124 1.2 riastrad drm_md_clflush_finegrained_p(void)
125 1.2 riastrad {
126 1.2 riastrad return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CFLUSH);
127 1.2 riastrad }
128 1.2 riastrad
129 1.2 riastrad static void
130 1.11 riastrad drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
131 1.2 riastrad {
132 1.11 riastrad wbinvd();
133 1.2 riastrad }
134 1.2 riastrad
135 1.2 riastrad static void
136 1.11 riastrad drm_md_clflush_all(void)
137 1.2 riastrad {
138 1.11 riastrad xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
139 1.2 riastrad }
140 1.2 riastrad
141 1.2 riastrad static void
142 1.11 riastrad drm_md_clflush_begin(void)
143 1.2 riastrad {
144 1.11 riastrad /* Support for CLFLUSH implies support for MFENCE. */
145 1.11 riastrad x86_mfence();
146 1.2 riastrad }
147 1.2 riastrad
148 1.2 riastrad static void
149 1.10 riastrad drm_md_clflush_commit(void)
150 1.10 riastrad {
151 1.11 riastrad x86_mfence();
152 1.10 riastrad }
153 1.10 riastrad
154 1.10 riastrad static void
155 1.2 riastrad drm_md_clflush_page(struct page *page)
156 1.2 riastrad {
157 1.2 riastrad void *const vaddr = kmap_atomic(page);
158 1.2 riastrad
159 1.2 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
160 1.2 riastrad
161 1.2 riastrad kunmap_atomic(vaddr);
162 1.2 riastrad }
163 1.2 riastrad
164 1.2 riastrad static void
165 1.11 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
166 1.2 riastrad {
167 1.11 riastrad const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
168 1.11 riastrad const vaddr_t vaddr = (vaddr_t)ptr;
169 1.11 riastrad const vaddr_t start = rounddown(vaddr, clflush_size);
170 1.11 riastrad const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
171 1.11 riastrad vaddr_t va;
172 1.2 riastrad
173 1.11 riastrad for (va = start; va < end; va += clflush_size)
174 1.11 riastrad asm volatile ("clflush %0" : : "m" (*(const char *)va));
175 1.2 riastrad }
176 1.2 riastrad
177 1.12 riastrad #elif defined(__sparc__) || defined(__sparc64__)
178 1.12 riastrad
179 1.12 riastrad #ifdef __sparc64__
180 1.12 riastrad #include <sparc64/sparc64/cache.h>
181 1.12 riastrad #else
182 1.12 riastrad #include <sparc/sparc/cache.h>
183 1.12 riastrad #endif
184 1.12 riastrad
185 1.12 riastrad static bool
186 1.12 riastrad drm_md_clflush_finegrained_p(void)
187 1.12 riastrad {
188 1.12 riastrad return true;
189 1.12 riastrad }
190 1.12 riastrad
191 1.12 riastrad static void
192 1.12 riastrad drm_md_clflush_all(void)
193 1.12 riastrad {
194 1.12 riastrad panic("don't know how to flush entire cache on sparc64");
195 1.12 riastrad }
196 1.12 riastrad
197 1.12 riastrad static void
198 1.12 riastrad drm_md_clflush_begin(void)
199 1.12 riastrad {
200 1.12 riastrad membar_Sync(); /* unsure if needed */
201 1.12 riastrad }
202 1.12 riastrad
203 1.12 riastrad static void
204 1.12 riastrad drm_md_clflush_commit(void)
205 1.12 riastrad {
206 1.12 riastrad membar_Sync(); /* unsure if needed */
207 1.12 riastrad }
208 1.12 riastrad
209 1.12 riastrad static void
210 1.12 riastrad drm_md_clflush_page(struct page *page)
211 1.12 riastrad {
212 1.12 riastrad #ifdef __sparc64__
213 1.12 riastrad paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
214 1.12 riastrad
215 1.12 riastrad cache_flush_phys(pa, PAGE_SIZE, 0);
216 1.12 riastrad #else
217 1.12 riastrad void *const vaddr = kmap_atomic(page);
218 1.12 riastrad
219 1.12 riastrad cache_flush(vaddr, PAGE_SIZE);
220 1.12 riastrad
221 1.12 riastrad kunmap_atomic(vaddr);
222 1.12 riastrad #endif
223 1.12 riastrad }
224 1.12 riastrad
225 1.12 riastrad static void
226 1.12 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
227 1.12 riastrad {
228 1.12 riastrad #ifdef __sparc64__
229 1.12 riastrad /* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr. */
230 1.12 riastrad blast_dcache();
231 1.12 riastrad #else
232 1.12 riastrad cache_flush(ptr, nbytes);
233 1.12 riastrad #endif
234 1.12 riastrad }
235 1.12 riastrad
236 1.9 riastrad #elif defined(__powerpc__)
237 1.9 riastrad
238 1.9 riastrad static bool
239 1.9 riastrad drm_md_clflush_finegrained_p(void)
240 1.9 riastrad {
241 1.9 riastrad return true;
242 1.9 riastrad }
243 1.9 riastrad
244 1.9 riastrad static void
245 1.9 riastrad drm_md_clflush_all(void)
246 1.9 riastrad {
247 1.9 riastrad panic("don't know how to flush entire cache on powerpc");
248 1.9 riastrad }
249 1.9 riastrad
250 1.9 riastrad static void
251 1.11 riastrad drm_md_clflush_begin(void)
252 1.11 riastrad {
253 1.11 riastrad }
254 1.11 riastrad
255 1.11 riastrad static void
256 1.10 riastrad drm_md_clflush_commit(void)
257 1.10 riastrad {
258 1.11 riastrad asm volatile ("sync" ::: "memory");
259 1.10 riastrad }
260 1.10 riastrad
261 1.10 riastrad static void
262 1.9 riastrad drm_md_clflush_page(struct page *page)
263 1.9 riastrad {
264 1.9 riastrad void *const vaddr = kmap_atomic(page);
265 1.9 riastrad
266 1.9 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
267 1.9 riastrad
268 1.9 riastrad kunmap_atomic(vaddr);
269 1.9 riastrad }
270 1.9 riastrad
271 1.9 riastrad static void
272 1.9 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
273 1.9 riastrad {
274 1.9 riastrad const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
275 1.9 riastrad vaddr_t va = (vaddr_t)ptr;
276 1.9 riastrad vaddr_t start = rounddown(va, dcsize);
277 1.9 riastrad vaddr_t end = roundup(va + nbytes, dcsize);
278 1.9 riastrad vsize_t len = end - start;
279 1.9 riastrad vsize_t off;
280 1.9 riastrad
281 1.9 riastrad for (off = 0; off < len; off += dcsize)
282 1.11 riastrad asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
283 1.9 riastrad }
284 1.9 riastrad
285 1.9 riastrad #endif
286