drm_cache.c revision 1.3.6.1 1 /* $NetBSD: drm_cache.c,v 1.3.6.1 2015/04/06 15:18:17 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.3.6.1 2015/04/06 15:18:17 skrll Exp $");
34
35 #include <sys/types.h>
36 #include <sys/xcall.h>
37
38 #include <uvm/uvm_extern.h>
39
40 #include <machine/cpufunc.h>
41
42 #include <linux/mm_types.h>
43
44 #include <drm/drmP.h>
45
46 static bool drm_md_clflush_finegrained_p(void);
47 static void drm_md_clflush_all(void);
48 static void drm_md_clflush_page(struct page *);
49 static void drm_md_clflush_virt_range(const void *, size_t);
50
51 void
52 drm_clflush_pages(struct page **pages, unsigned long npages)
53 {
54
55 if (drm_md_clflush_finegrained_p()) {
56 while (npages--)
57 drm_md_clflush_page(pages[npages]);
58 } else {
59 drm_md_clflush_all();
60 }
61 }
62
63 void
64 drm_clflush_pglist(struct pglist *list)
65 {
66
67 if (drm_md_clflush_finegrained_p()) {
68 struct vm_page *page;
69
70 TAILQ_FOREACH(page, list, pageq.queue)
71 drm_md_clflush_page(container_of(page, struct page,
72 p_vmp));
73 } else {
74 drm_md_clflush_all();
75 }
76 }
77
78 void
79 drm_clflush_page(struct page *page)
80 {
81
82 if (drm_md_clflush_finegrained_p())
83 drm_md_clflush_page(page);
84 else
85 drm_md_clflush_all();
86 }
87
88 void
89 drm_clflush_virt_range(const void *vaddr, size_t nbytes)
90 {
91
92 if (drm_md_clflush_finegrained_p())
93 drm_md_clflush_virt_range(vaddr, nbytes);
94 else
95 drm_md_clflush_all();
96 }
97
98 #if defined(__i386__) || defined(__x86_64__)
99
100 static bool
101 drm_md_clflush_finegrained_p(void)
102 {
103 return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CFLUSH);
104 }
105
106 static void
107 drm_x86_clflush(const void *vaddr)
108 {
109 asm volatile ("clflush %0" : : "m" (*(const char *)vaddr));
110 }
111
112 static size_t
113 drm_x86_clflush_size(void)
114 {
115 KASSERT(drm_md_clflush_finegrained_p());
116 return cpu_info_primary.ci_cflush_lsize;
117 }
118
119 static void
120 drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
121 {
122 wbinvd();
123 }
124
125 static void
126 drm_md_clflush_all(void)
127 {
128 xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
129 }
130
131 static void
132 drm_md_clflush_page(struct page *page)
133 {
134 void *const vaddr = kmap_atomic(page);
135
136 drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
137
138 kunmap_atomic(vaddr);
139 }
140
141 static void
142 drm_md_clflush_virt_range(const void *vaddr, size_t nbytes)
143 {
144 const unsigned clflush_size = drm_x86_clflush_size();
145 const vaddr_t va = (vaddr_t)vaddr;
146 const char *const start = (const void *)rounddown(va, clflush_size);
147 const char *const end = (const void *)roundup(va + nbytes,
148 clflush_size);
149 const char *p;
150
151 /* Support for CLFLUSH implies support for MFENCE. */
152 KASSERT(drm_md_clflush_finegrained_p());
153 x86_mfence();
154 for (p = start; p < end; p += clflush_size)
155 drm_x86_clflush(p);
156 x86_mfence();
157 }
158
159 #endif /* defined(__i386__) || defined(__x86_64__) */
160