cache.h revision 1.17 1 /* $NetBSD: cache.h,v 1.17 2025/05/03 02:00:46 riastradh Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #ifndef _MIPS_CACHE_H_
39 #define _MIPS_CACHE_H_
40
41 #ifdef _KERNEL_OPT
42 #include "opt_cputype.h"
43 #endif
44
45 #include <sys/types.h>
46
47 /*
48 * Cache operations.
49 *
50 * We define the following primitives:
51 *
52 * --- Instruction cache synchronization (mandatory):
53 *
54 * icache_sync_all Synchronize I-cache
55 *
56 * icache_sync_range Synchronize I-cache range
57 *
58 * icache_sync_range_index (index ops)
59 *
60 * --- Primary data cache (mandatory):
61 *
62 * pdcache_wbinv_all Write-back Invalidate primary D-cache
63 *
64 * pdcache_wbinv_range Write-back Invalidate primary D-cache range
65 *
66 * pdcache_wbinv_range_index (index ops)
67 *
68 * pdcache_inv_range Invalidate primary D-cache range
69 *
70 * pdcache_wb_range Write-back primary D-cache range
71 *
72 * --- Secondary data cache (optional):
73 *
74 * sdcache_wbinv_all Write-back Invalidate secondary D-cache
75 *
76 * sdcache_wbinv_range Write-back Invalidate secondary D-cache range
77 *
78 * sdcache_wbinv_range_index (index ops)
79 *
80 * sdcache_inv_range Invalidate secondary D-cache range
81 *
82 * sdcache_wb_range Write-back secondary D-cache range
83 *
84 * There are some rules that must be followed:
85 *
86 * I-cache Synch (all or range):
87 * The goal is to synchronize the instruction stream,
88 * so you may need to write-back dirty data cache
89 * blocks first. If a range is requested, and you
90 * can't synchronize just a range, you have to hit
91 * the whole thing.
92 *
93 * D-cache Write-back Invalidate range:
94 * If you can't WB-Inv a range, you must WB-Inv the
95 * entire D-cache.
96 *
97 * D-cache Invalidate:
98 * If you can't Inv the D-cache without doing a
99 * Write-back, YOU MUST PANIC. This is to catch
100 * errors in calling code. Callers must be aware
101 * of this scenario, and must handle it appropriately
102 * (consider the bus_dma(9) operations).
103 *
104 * D-cache Write-back:
105 * If you can't Write-back without doing an invalidate,
106 * that's fine. Then treat this as a WB-Inv. Skipping
107 * the invalidate is merely an optimization.
108 *
109 * All operations:
110 * Valid virtual addresses must be passed to the
111 * cache operation.
112 *
113 * Finally, these primitives are grouped together in reasonable
114 * ways. For all operations described here, first the primary
115 * cache is frobbed, then the secondary cache frobbed, if the
116 * operation for the secondary cache exists.
117 *
118 * mips_icache_sync_all Synchronize I-cache
119 *
120 * mips_icache_sync_range Synchronize I-cache range
121 *
122 * mips_icache_sync_range_index (index ops)
123 *
124 * mips_dcache_wbinv_all Write-back Invalidate D-cache
125 *
126 * mips_dcache_wbinv_range Write-back Invalidate D-cache range
127 *
128 * mips_dcache_wbinv_range_index (index ops)
129 *
130 * mips_dcache_inv_range Invalidate D-cache range
131 *
132 * mips_dcache_wb_range Write-back D-cache range
133 */
134
135 struct mips_cache_ops {
136 void (*mco_icache_sync_all)(void);
137 void (*mco_icache_sync_range)(register_t, vsize_t);
138 void (*mco_icache_sync_range_index)(vaddr_t, vsize_t);
139
140 void (*mco_pdcache_wbinv_all)(void);
141 void (*mco_pdcache_wbinv_range)(register_t, vsize_t);
142 void (*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
143 void (*mco_pdcache_inv_range)(register_t, vsize_t);
144 void (*mco_pdcache_wb_range)(register_t, vsize_t);
145
146 /* These are called only by the (mipsNN) icache functions. */
147 void (*mco_intern_icache_sync_range_index)(vaddr_t, vsize_t);
148 void (*mco_intern_icache_sync_range)(register_t, vsize_t);
149 void (*mco_intern_pdcache_sync_all)(void);
150 void (*mco_intern_pdcache_sync_range_index)(vaddr_t, vsize_t);
151 void (*mco_intern_pdcache_sync_range)(register_t, vsize_t);
152 /* This is used internally by the (mipsNN) pdcache functions. */
153 void (*mco_intern_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
154
155 void (*mco_sdcache_wbinv_all)(void);
156 void (*mco_sdcache_wbinv_range)(register_t, vsize_t);
157 void (*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
158 void (*mco_sdcache_inv_range)(register_t, vsize_t);
159 void (*mco_sdcache_wb_range)(register_t, vsize_t);
160
161 /* These are called only by the (mipsNN) icache functions. */
162 void (*mco_intern_sdcache_sync_all)(void);
163 void (*mco_intern_sdcache_sync_range_index)(vaddr_t, vsize_t);
164 void (*mco_intern_sdcache_sync_range)(register_t, vsize_t);
165
166 /* This is used internally by the (mipsNN) sdcache functions. */
167 void (*mco_intern_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
168 };
169
170 extern struct mips_cache_ops mips_cache_ops;
171
172 /* PRIMARY CACHE VARIABLES */
173 struct mips_cache_info {
174 u_int mci_picache_size;
175 u_int mci_picache_line_size;
176 u_int mci_picache_ways;
177 u_int mci_picache_way_size;
178 u_int mci_picache_way_mask;
179 bool mci_picache_vivt; /* virtually indexed and tagged */
180
181 u_int mci_pdcache_size; /* and unified */
182 u_int mci_pdcache_line_size;
183 u_int mci_pdcache_ways;
184 u_int mci_pdcache_way_size;
185 u_int mci_pdcache_way_mask;
186 bool mci_pdcache_write_through;
187
188 bool mci_pcache_unified;
189
190 /* SECONDARY CACHE VARIABLES */
191 u_int mci_sicache_size;
192 u_int mci_sicache_line_size;
193 u_int mci_sicache_ways;
194 u_int mci_sicache_way_size;
195 u_int mci_sicache_way_mask;
196
197 u_int mci_sdcache_size; /* and unified */
198 u_int mci_sdcache_line_size;
199 u_int mci_sdcache_ways;
200 u_int mci_sdcache_way_size;
201 u_int mci_sdcache_way_mask;
202 bool mci_sdcache_write_through;
203
204 bool mci_scache_unified;
205
206 /* TERTIARY CACHE VARIABLES */
207 u_int mci_tcache_size; /* always unified */
208 u_int mci_tcache_line_size;
209 u_int mci_tcache_ways;
210 u_int mci_tcache_way_size;
211 u_int mci_tcache_way_mask;
212 bool mci_tcache_write_through;
213
214 /*
215 * These two variables inform the rest of the kernel about the
216 * size of the largest D-cache line present in the system. The
217 * mask can be used to determine if a region of memory is cache
218 * line size aligned.
219 *
220 * Whenever any code updates a data cache line size, it should
221 * call mips_dcache_compute_align() to recompute these values.
222 */
223 u_int mci_dcache_align;
224 u_int mci_dcache_align_mask;
225
226 u_int mci_cache_prefer_mask;
227 u_int mci_cache_alias_mask;
228 u_int mci_icache_alias_mask;
229
230 bool mci_cache_virtual_alias;
231 bool mci_icache_virtual_alias;
232 };
233
234
235 #if (MIPS1 + MIPS64_RMIXL + MIPS64R2_RMIXL + MIPS64_OCTEON) > 0 && \
236 (MIPS3 + MIPS4) == 0 \
237 && !defined(_MODULE)
238 #define MIPS_CACHE_ALIAS_MASK 0
239 #define MIPS_CACHE_VIRTUAL_ALIAS false
240 #else
241 #define MIPS_CACHE_ALIAS_MASK mips_cache_info.mci_cache_alias_mask
242 #define MIPS_CACHE_VIRTUAL_ALIAS mips_cache_info.mci_cache_virtual_alias
243 #endif
244 #if (MIPS1 + MIPS64_RMIXL + MIPS64_OCTEON) > 0 && \
245 (MIPS3 + MIPS4) == 0 \
246 && !defined(_MODULE)
247 #define MIPS_ICACHE_ALIAS_MASK 0
248 #define MIPS_ICACHE_VIRTUAL_ALIAS false
249 #else
250 #define MIPS_ICACHE_ALIAS_MASK mips_cache_info.mci_icache_alias_mask
251 #define MIPS_ICACHE_VIRTUAL_ALIAS mips_cache_info.mci_icache_virtual_alias
252 #endif
253
254 extern struct mips_cache_info mips_cache_info;
255
256
257 /*
258 * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
259 */
260 #define mips_cache_indexof(x) (((vaddr_t)(x)) & MIPS_CACHE_ALIAS_MASK)
261 #define mips_cache_badalias(x,y) (((vaddr_t)(x)^(vaddr_t)(y)) & MIPS_CACHE_ALIAS_MASK)
262
263 #define __mco_noargs(prefix, x) \
264 do { \
265 (*mips_cache_ops.mco_ ## prefix ## p ## x )(); \
266 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
267 (*mips_cache_ops.mco_ ## prefix ## s ## x )(); \
268 } while (/*CONSTCOND*/0)
269
270 #define __mco_2args(prefix, x, a, b) \
271 do { \
272 (*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b)); \
273 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
274 (*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b)); \
275 } while (/*CONSTCOND*/0)
276
277 #define mips_icache_sync_all() \
278 (*mips_cache_ops.mco_icache_sync_all)()
279
280 #define mips_icache_sync_range(v, s) \
281 (*mips_cache_ops.mco_icache_sync_range)((v), (s))
282
283 #define mips_icache_sync_range_index(v, s) \
284 (*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
285
286 #define mips_dcache_wbinv_all() \
287 __mco_noargs(, dcache_wbinv_all)
288
289 #define mips_dcache_wbinv_range(v, s) \
290 __mco_2args(, dcache_wbinv_range, (v), (s))
291
292 #define mips_dcache_wbinv_range_index(v, s) \
293 __mco_2args(, dcache_wbinv_range_index, (v), (s))
294
295 #define mips_dcache_inv_range(v, s) \
296 __mco_2args(, dcache_inv_range, (v), (s))
297
298 #define mips_dcache_wb_range(v, s) \
299 __mco_2args(, dcache_wb_range, (v), (s))
300
301
302 /*
303 * Private D-cache functions only called from (currently only the
304 * mipsNN) I-cache functions.
305 */
306 #define mips_intern_dcache_sync_all() \
307 __mco_noargs(intern_, dcache_sync_all)
308
309 #define mips_intern_dcache_sync_range_index(v, s) \
310 __mco_2args(intern_, dcache_sync_range_index, (v), (s))
311
312 #define mips_intern_dcache_sync_range(v, s) \
313 __mco_2args(intern_, dcache_sync_range, (v), (s))
314
315 #define mips_intern_pdcache_wbinv_range_index(v, s) \
316 (*mips_cache_ops.mco_intern_pdcache_wbinv_range_index)((v), (s))
317
318 #define mips_intern_sdcache_wbinv_range_index(v, s) \
319 (*mips_cache_ops.mco_intern_sdcache_wbinv_range_index)((v), (s))
320
321 #define mips_intern_icache_sync_range(v, s) \
322 (*mips_cache_ops.mco_intern_icache_sync_range)((v), (s))
323
324 #define mips_intern_icache_sync_range_index(v, s) \
325 (*mips_cache_ops.mco_intern_icache_sync_range_index)((v), (s))
326
327 void mips_config_cache(void);
328 void mips_dcache_compute_align(void);
329
330 #include <mips/cache_mipsNN.h>
331
332 #endif /* _MIPS_CACHE_H_ */
333