drm_cache.c revision 1.11 1 1.9 riastrad /* $NetBSD: drm_cache.c,v 1.11 2018/08/27 15:24:27 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #include <sys/cdefs.h>
33 1.9 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.11 2018/08/27 15:24:27 riastradh Exp $");
34 1.2 riastrad
35 1.8 jmcneill #include <sys/param.h>
36 1.2 riastrad #include <sys/types.h>
37 1.2 riastrad #include <sys/xcall.h>
38 1.2 riastrad
39 1.2 riastrad #include <uvm/uvm_extern.h>
40 1.2 riastrad
41 1.2 riastrad #include <linux/mm_types.h>
42 1.2 riastrad
43 1.2 riastrad #include <drm/drmP.h>
44 1.2 riastrad
45 1.8 jmcneill #if !defined(__arm__)
46 1.8 jmcneill #define DRM_CLFLUSH 1
47 1.8 jmcneill #endif
48 1.8 jmcneill
49 1.8 jmcneill #if defined(DRM_CLFLUSH)
50 1.2 riastrad static bool drm_md_clflush_finegrained_p(void);
51 1.2 riastrad static void drm_md_clflush_all(void);
52 1.11 riastrad static void drm_md_clflush_begin(void);
53 1.10 riastrad static void drm_md_clflush_commit(void);
54 1.2 riastrad static void drm_md_clflush_page(struct page *);
55 1.2 riastrad static void drm_md_clflush_virt_range(const void *, size_t);
56 1.8 jmcneill #endif
57 1.2 riastrad
58 1.2 riastrad void
59 1.3 riastrad drm_clflush_pages(struct page **pages, unsigned long npages)
60 1.3 riastrad {
61 1.8 jmcneill #if defined(DRM_CLFLUSH)
62 1.3 riastrad if (drm_md_clflush_finegrained_p()) {
63 1.11 riastrad drm_md_clflush_begin();
64 1.3 riastrad while (npages--)
65 1.3 riastrad drm_md_clflush_page(pages[npages]);
66 1.10 riastrad drm_md_clflush_commit();
67 1.3 riastrad } else {
68 1.3 riastrad drm_md_clflush_all();
69 1.3 riastrad }
70 1.8 jmcneill #endif
71 1.3 riastrad }
72 1.3 riastrad
73 1.3 riastrad void
74 1.2 riastrad drm_clflush_pglist(struct pglist *list)
75 1.2 riastrad {
76 1.8 jmcneill #if defined(DRM_CLFLUSH)
77 1.2 riastrad if (drm_md_clflush_finegrained_p()) {
78 1.2 riastrad struct vm_page *page;
79 1.2 riastrad
80 1.11 riastrad drm_md_clflush_begin();
81 1.2 riastrad TAILQ_FOREACH(page, list, pageq.queue)
82 1.2 riastrad drm_md_clflush_page(container_of(page, struct page,
83 1.2 riastrad p_vmp));
84 1.10 riastrad drm_md_clflush_commit();
85 1.2 riastrad } else {
86 1.2 riastrad drm_md_clflush_all();
87 1.2 riastrad }
88 1.8 jmcneill #endif
89 1.2 riastrad }
90 1.2 riastrad
91 1.2 riastrad void
92 1.2 riastrad drm_clflush_page(struct page *page)
93 1.2 riastrad {
94 1.8 jmcneill #if defined(DRM_CLFLUSH)
95 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
96 1.11 riastrad drm_md_clflush_begin();
97 1.2 riastrad drm_md_clflush_page(page);
98 1.10 riastrad drm_md_clflush_commit();
99 1.10 riastrad } else {
100 1.2 riastrad drm_md_clflush_all();
101 1.10 riastrad }
102 1.8 jmcneill #endif
103 1.2 riastrad }
104 1.2 riastrad
105 1.2 riastrad void
106 1.2 riastrad drm_clflush_virt_range(const void *vaddr, size_t nbytes)
107 1.2 riastrad {
108 1.8 jmcneill #if defined(DRM_CLFLUSH)
109 1.10 riastrad if (drm_md_clflush_finegrained_p()) {
110 1.11 riastrad drm_md_clflush_begin();
111 1.2 riastrad drm_md_clflush_virt_range(vaddr, nbytes);
112 1.10 riastrad drm_md_clflush_commit();
113 1.10 riastrad } else {
114 1.2 riastrad drm_md_clflush_all();
115 1.10 riastrad }
116 1.8 jmcneill #endif
117 1.2 riastrad }
118 1.2 riastrad
119 1.2 riastrad #if defined(__i386__) || defined(__x86_64__)
120 1.2 riastrad
121 1.7 jmcneill #include <machine/cpufunc.h>
122 1.7 jmcneill
123 1.2 riastrad static bool
124 1.2 riastrad drm_md_clflush_finegrained_p(void)
125 1.2 riastrad {
126 1.2 riastrad return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CFLUSH);
127 1.2 riastrad }
128 1.2 riastrad
129 1.2 riastrad static void
130 1.11 riastrad drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
131 1.2 riastrad {
132 1.11 riastrad wbinvd();
133 1.2 riastrad }
134 1.2 riastrad
135 1.2 riastrad static void
136 1.11 riastrad drm_md_clflush_all(void)
137 1.2 riastrad {
138 1.11 riastrad xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
139 1.2 riastrad }
140 1.2 riastrad
141 1.2 riastrad static void
142 1.11 riastrad drm_md_clflush_begin(void)
143 1.2 riastrad {
144 1.11 riastrad /* Support for CLFLUSH implies support for MFENCE. */
145 1.11 riastrad x86_mfence();
146 1.2 riastrad }
147 1.2 riastrad
148 1.2 riastrad static void
149 1.10 riastrad drm_md_clflush_commit(void)
150 1.10 riastrad {
151 1.11 riastrad x86_mfence();
152 1.10 riastrad }
153 1.10 riastrad
154 1.10 riastrad static void
155 1.2 riastrad drm_md_clflush_page(struct page *page)
156 1.2 riastrad {
157 1.2 riastrad void *const vaddr = kmap_atomic(page);
158 1.2 riastrad
159 1.2 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
160 1.2 riastrad
161 1.2 riastrad kunmap_atomic(vaddr);
162 1.2 riastrad }
163 1.2 riastrad
164 1.2 riastrad static void
165 1.11 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
166 1.2 riastrad {
167 1.11 riastrad const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
168 1.11 riastrad const vaddr_t vaddr = (vaddr_t)ptr;
169 1.11 riastrad const vaddr_t start = rounddown(vaddr, clflush_size);
170 1.11 riastrad const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
171 1.11 riastrad vaddr_t va;
172 1.2 riastrad
173 1.11 riastrad for (va = start; va < end; va += clflush_size)
174 1.11 riastrad asm volatile ("clflush %0" : : "m" (*(const char *)va));
175 1.2 riastrad }
176 1.2 riastrad
177 1.9 riastrad #elif defined(__powerpc__)
178 1.9 riastrad
179 1.9 riastrad static bool
180 1.9 riastrad drm_md_clflush_finegrained_p(void)
181 1.9 riastrad {
182 1.9 riastrad return true;
183 1.9 riastrad }
184 1.9 riastrad
185 1.9 riastrad static void
186 1.9 riastrad drm_md_clflush_all(void)
187 1.9 riastrad {
188 1.9 riastrad panic("don't know how to flush entire cache on powerpc");
189 1.9 riastrad }
190 1.9 riastrad
191 1.9 riastrad static void
192 1.11 riastrad drm_md_clflush_begin(void)
193 1.11 riastrad {
194 1.11 riastrad }
195 1.11 riastrad
196 1.11 riastrad static void
197 1.10 riastrad drm_md_clflush_commit(void)
198 1.10 riastrad {
199 1.11 riastrad asm volatile ("sync" ::: "memory");
200 1.10 riastrad }
201 1.10 riastrad
202 1.10 riastrad static void
203 1.9 riastrad drm_md_clflush_page(struct page *page)
204 1.9 riastrad {
205 1.9 riastrad void *const vaddr = kmap_atomic(page);
206 1.9 riastrad
207 1.9 riastrad drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
208 1.9 riastrad
209 1.9 riastrad kunmap_atomic(vaddr);
210 1.9 riastrad }
211 1.9 riastrad
212 1.9 riastrad static void
213 1.9 riastrad drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
214 1.9 riastrad {
215 1.9 riastrad const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
216 1.9 riastrad vaddr_t va = (vaddr_t)ptr;
217 1.9 riastrad vaddr_t start = rounddown(va, dcsize);
218 1.9 riastrad vaddr_t end = roundup(va + nbytes, dcsize);
219 1.9 riastrad vsize_t len = end - start;
220 1.9 riastrad vsize_t off;
221 1.9 riastrad
222 1.9 riastrad for (off = 0; off < len; off += dcsize)
223 1.11 riastrad asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
224 1.9 riastrad }
225 1.9 riastrad
226 1.9 riastrad #endif
227