dv-bfin_mmu.c revision 1.1.1.9 1 /* Blackfin Memory Management Unit (MMU) model.
2
3 Copyright (C) 2010-2023 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 /* This must come before any other includes. */
22 #include "defs.h"
23
24 #include "sim-main.h"
25 #include "sim-options.h"
26 #include "devices.h"
27 #include "dv-bfin_mmu.h"
28 #include "dv-bfin_cec.h"
29
30 /* XXX: Should this really be two blocks of registers ? PRM describes
31 these as two Content Addressable Memory (CAM) blocks. */
32
33 struct bfin_mmu
34 {
35 bu32 base;
36
37 /* Order after here is important -- matches hardware MMR layout. */
38 bu32 sram_base_address;
39
40 bu32 dmem_control, dcplb_fault_status, dcplb_fault_addr;
41 char _dpad0[0x100 - 0x0 - (4 * 4)];
42 bu32 dcplb_addr[16];
43 char _dpad1[0x200 - 0x100 - (4 * 16)];
44 bu32 dcplb_data[16];
45 char _dpad2[0x300 - 0x200 - (4 * 16)];
46 bu32 dtest_command;
47 char _dpad3[0x400 - 0x300 - (4 * 1)];
48 bu32 dtest_data[2];
49
50 char _dpad4[0x1000 - 0x400 - (4 * 2)];
51
52 bu32 idk; /* Filler MMR; hardware simply ignores. */
53 bu32 imem_control, icplb_fault_status, icplb_fault_addr;
54 char _ipad0[0x100 - 0x0 - (4 * 4)];
55 bu32 icplb_addr[16];
56 char _ipad1[0x200 - 0x100 - (4 * 16)];
57 bu32 icplb_data[16];
58 char _ipad2[0x300 - 0x200 - (4 * 16)];
59 bu32 itest_command;
60 char _ipad3[0x400 - 0x300 - (4 * 1)];
61 bu32 itest_data[2];
62 };
63 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address)
64 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base())
65 #define mmr_idx(mmr) (mmr_offset (mmr) / 4)
66
67 static const char * const mmr_names[BFIN_COREMMR_MMU_SIZE / 4] =
68 {
69 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR",
70 [mmr_idx (dcplb_addr[0])] = "DCPLB_ADDR0",
71 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5",
72 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10",
73 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15",
74 [mmr_idx (dcplb_data[0])] = "DCPLB_DATA0",
75 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5",
76 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10",
77 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15",
78 [mmr_idx (dtest_command)] = "DTEST_COMMAND",
79 [mmr_idx (dtest_data[0])] = "DTEST_DATA0", "DTEST_DATA1",
80 [mmr_idx (imem_control)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR",
81 [mmr_idx (icplb_addr[0])] = "ICPLB_ADDR0",
82 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5",
83 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10",
84 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15",
85 [mmr_idx (icplb_data[0])] = "ICPLB_DATA0",
86 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5",
87 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10",
88 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15",
89 [mmr_idx (itest_command)] = "ITEST_COMMAND",
90 [mmr_idx (itest_data[0])] = "ITEST_DATA0", "ITEST_DATA1",
91 };
92 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>")
93
94 static bool bfin_mmu_skip_cplbs = false;
95
96 static unsigned
97 bfin_mmu_io_write_buffer (struct hw *me, const void *source,
98 int space, address_word addr, unsigned nr_bytes)
99 {
100 struct bfin_mmu *mmu = hw_data (me);
101 bu32 mmr_off;
102 bu32 value;
103 bu32 *valuep;
104
105 /* Invalid access mode is higher priority than missing register. */
106 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, true))
107 return 0;
108
109 value = dv_load_4 (source);
110
111 mmr_off = addr - mmu->base;
112 valuep = (void *)((uintptr_t)mmu + mmr_base() + mmr_off);
113
114 HW_TRACE_WRITE ();
115
116 switch (mmr_off)
117 {
118 case mmr_offset(dmem_control):
119 case mmr_offset(imem_control):
120 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */
121 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[1]):
122 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[1]):
123 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
124 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
125 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
126 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
127 *valuep = value;
128 break;
129 case mmr_offset(sram_base_address):
130 case mmr_offset(dcplb_fault_status):
131 case mmr_offset(dcplb_fault_addr):
132 case mmr_offset(idk):
133 case mmr_offset(icplb_fault_status):
134 case mmr_offset(icplb_fault_addr):
135 /* Discard writes to these. */
136 break;
137 case mmr_offset(itest_command):
138 /* XXX: Not supported atm. */
139 if (value)
140 hw_abort (me, "ITEST_COMMAND unimplemented");
141 break;
142 case mmr_offset(dtest_command):
143 /* Access L1 memory indirectly. */
144 *valuep = value;
145 if (value)
146 {
147 bu32 addr = mmu->sram_base_address |
148 ((value >> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */
149 ((value >> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */
150 ((value >> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */
151 ((value >> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */
152 (value & 0x47F8); /* addr bits 14 & 10:3 */
153
154 if (!(value & TEST_DATA_ARRAY))
155 hw_abort (me, "DTEST_COMMAND tag array unimplemented");
156 if (value & 0xfa7cb801)
157 hw_abort (me, "DTEST_COMMAND bits undefined");
158
159 if (value & TEST_WRITE)
160 sim_write (hw_system (me), addr, mmu->dtest_data, 8);
161 else
162 sim_read (hw_system (me), addr, mmu->dtest_data, 8);
163 }
164 break;
165 default:
166 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
167 return 0;
168 }
169
170 return nr_bytes;
171 }
172
173 static unsigned
174 bfin_mmu_io_read_buffer (struct hw *me, void *dest,
175 int space, address_word addr, unsigned nr_bytes)
176 {
177 struct bfin_mmu *mmu = hw_data (me);
178 bu32 mmr_off;
179 bu32 *valuep;
180
181 /* Invalid access mode is higher priority than missing register. */
182 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, false))
183 return 0;
184
185 mmr_off = addr - mmu->base;
186 valuep = (void *)((uintptr_t)mmu + mmr_base() + mmr_off);
187
188 HW_TRACE_READ ();
189
190 switch (mmr_off)
191 {
192 case mmr_offset(dmem_control):
193 case mmr_offset(imem_control):
194 case mmr_offset(dtest_command):
195 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[2]):
196 case mmr_offset(itest_command):
197 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[2]):
198 /* XXX: should do something here. */
199 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
200 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
201 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
202 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
203 case mmr_offset(sram_base_address):
204 case mmr_offset(dcplb_fault_status):
205 case mmr_offset(dcplb_fault_addr):
206 case mmr_offset(idk):
207 case mmr_offset(icplb_fault_status):
208 case mmr_offset(icplb_fault_addr):
209 dv_store_4 (dest, *valuep);
210 break;
211 default:
212 dv_bfin_mmr_invalid (me, addr, nr_bytes, false);
213 return 0;
214 }
215
216 return nr_bytes;
217 }
218
219 static void
220 attach_bfin_mmu_regs (struct hw *me, struct bfin_mmu *mmu)
221 {
222 address_word attach_address;
223 int attach_space;
224 unsigned attach_size;
225 reg_property_spec reg;
226
227 if (hw_find_property (me, "reg") == NULL)
228 hw_abort (me, "Missing \"reg\" property");
229
230 if (!hw_find_reg_array_property (me, "reg", 0, ®))
231 hw_abort (me, "\"reg\" property must contain three addr/size entries");
232
233 hw_unit_address_to_attach_address (hw_parent (me),
234 ®.address,
235 &attach_space, &attach_address, me);
236 hw_unit_size_to_attach_size (hw_parent (me), ®.size, &attach_size, me);
237
238 if (attach_size != BFIN_COREMMR_MMU_SIZE)
239 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE);
240
241 hw_attach_address (hw_parent (me),
242 0, attach_space, attach_address, attach_size, me);
243
244 mmu->base = attach_address;
245 }
246
247 static void
248 bfin_mmu_finish (struct hw *me)
249 {
250 struct bfin_mmu *mmu;
251
252 mmu = HW_ZALLOC (me, struct bfin_mmu);
253
254 set_hw_data (me, mmu);
255 set_hw_io_read_buffer (me, bfin_mmu_io_read_buffer);
256 set_hw_io_write_buffer (me, bfin_mmu_io_write_buffer);
257
258 attach_bfin_mmu_regs (me, mmu);
259
260 /* Initialize the MMU. */
261 mmu->sram_base_address = 0xff800000 - 0;
262 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/
263 mmu->dmem_control = 0x00000001;
264 mmu->imem_control = 0x00000001;
265 }
266
267 const struct hw_descriptor dv_bfin_mmu_descriptor[] =
268 {
269 {"bfin_mmu", bfin_mmu_finish,},
270 {NULL, NULL},
271 };
272
273 /* Device option parsing. */
275
276 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler);
277
278 enum {
279 OPTION_MMU_SKIP_TABLES = OPTION_START,
280 };
281
282 static const OPTION bfin_mmu_options[] =
283 {
284 { {"mmu-skip-cplbs", no_argument, NULL, OPTION_MMU_SKIP_TABLES },
285 '\0', NULL, "Skip parsing of CPLB tables (big speed increase)",
286 bfin_mmu_option_handler, NULL },
287
288 { {NULL, no_argument, NULL, 0}, '\0', NULL, NULL, NULL, NULL }
289 };
290
291 static SIM_RC
292 bfin_mmu_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt,
293 char *arg, int is_command)
294 {
295 switch (opt)
296 {
297 case OPTION_MMU_SKIP_TABLES:
298 bfin_mmu_skip_cplbs = true;
299 return SIM_RC_OK;
300
301 default:
302 sim_io_eprintf (sd, "Unknown Blackfin MMU option %d\n", opt);
303 return SIM_RC_FAIL;
304 }
305 }
306
307 /* Provide a prototype to silence -Wmissing-prototypes. */
308 extern MODULE_INIT_FN sim_install_bfin_mmu;
309
310 SIM_RC
311 sim_install_bfin_mmu (SIM_DESC sd)
312 {
313 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
314 return sim_add_option_table (sd, NULL, bfin_mmu_options);
315 }
316
317 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu)
319
320 static void
321 _mmu_log_ifault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 pc, bool supv)
322 {
323 mmu->icplb_fault_addr = pc;
324 mmu->icplb_fault_status = supv << 17;
325 }
326
327 void
328 mmu_log_ifault (SIM_CPU *cpu)
329 {
330 _mmu_log_ifault (cpu, MMU_STATE (cpu), PCREG, cec_get_ivg (cpu) >= 0);
331 }
332
333 static void
334 _mmu_log_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
335 bool inst, bool miss, bool supv, bool dag1, bu32 faults)
336 {
337 bu32 *fault_status, *fault_addr;
338
339 /* No logging in non-OS mode. */
340 if (!mmu)
341 return;
342
343 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
344 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
345 /* ICPLB regs always get updated. */
346 if (!inst)
347 _mmu_log_ifault (cpu, mmu, PCREG, supv);
348
349 *fault_addr = addr;
350 *fault_status =
351 (miss << 19) |
352 (dag1 << 18) |
353 (supv << 17) |
354 (write << 16) |
355 faults;
356 }
357
358 static void
359 _mmu_process_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
360 bool inst, bool unaligned, bool miss, bool supv, bool dag1)
361 {
362 int excp;
363
364 /* See order in mmu_check_addr() */
365 if (unaligned)
366 excp = inst ? VEC_MISALI_I : VEC_MISALI_D;
367 else if (addr >= BFIN_SYSTEM_MMR_BASE)
368 excp = VEC_ILL_RES;
369 else if (!mmu)
370 excp = inst ? VEC_CPLB_I_M : VEC_CPLB_M;
371 else
372 {
373 /* Misses are hardware errors. */
374 cec_hwerr (cpu, HWERR_EXTERN_ADDR);
375 return;
376 }
377
378 _mmu_log_fault (cpu, mmu, addr, write, inst, miss, supv, dag1, 0);
379 cec_exception (cpu, excp);
380 }
381
382 void
383 mmu_process_fault (SIM_CPU *cpu, bu32 addr, bool write, bool inst,
384 bool unaligned, bool miss)
385 {
386 SIM_DESC sd = CPU_STATE (cpu);
387 struct bfin_mmu *mmu;
388
389 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
390 mmu = NULL;
391 else
392 mmu = MMU_STATE (cpu);
393
394 _mmu_process_fault (cpu, mmu, addr, write, inst, unaligned, miss,
395 cec_is_supervisor_mode (cpu),
396 BFIN_CPU_STATE.multi_pc == PCREG + 6);
397 }
398
399 /* Return values:
400 -2: no known problems
401 -1: valid
402 0: miss
403 1: protection violation
404 2: multiple hits
405 3: unaligned
406 4: miss; hwerr */
407 static int
408 mmu_check_implicit_addr (SIM_CPU *cpu, bu32 addr, bool inst, int size,
409 bool supv, bool dag1)
410 {
411 bool l1 = ((addr & 0xFF000000) == 0xFF000000);
412 bu32 amask = (addr & 0xFFF00000);
413
414 if (addr & (size - 1))
415 return 3;
416
417 /* MMRs may never be executable or accessed from usermode. */
418 if (addr >= BFIN_SYSTEM_MMR_BASE)
419 {
420 if (inst)
421 return 0;
422 else if (!supv || dag1)
423 return 1;
424 else
425 return -1;
426 }
427 else if (inst)
428 {
429 /* Some regions are not executable. */
430 /* XXX: Should this be in the model data ? Core B 561 ? */
431 if (l1)
432 return (amask == 0xFFA00000) ? -1 : 1;
433 }
434 else
435 {
436 /* Some regions are not readable. */
437 /* XXX: Should this be in the model data ? Core B 561 ? */
438 if (l1)
439 return (amask != 0xFFA00000) ? -1 : 4;
440 }
441
442 return -2;
443 }
444
445 /* Exception order per the PRM (first has highest):
446 Inst Multiple CPLB Hits
447 Inst Misaligned Access
448 Inst Protection Violation
449 Inst CPLB Miss
450 Only the alignment matters in non-OS mode though. */
451 static int
452 _mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
453 {
454 SIM_DESC sd = CPU_STATE (cpu);
455 struct bfin_mmu *mmu;
456 bu32 *fault_status, *fault_addr, *mem_control, *cplb_addr, *cplb_data;
457 bu32 faults;
458 bool supv, do_excp, dag1;
459 int i, hits;
460
461 supv = cec_is_supervisor_mode (cpu);
462 dag1 = (BFIN_CPU_STATE.multi_pc == PCREG + 6);
463
464 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT || bfin_mmu_skip_cplbs)
465 {
466 int ret = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
467 /* Valid hits and misses are OK in non-OS envs. */
468 if (ret < 0)
469 return 0;
470 _mmu_process_fault (cpu, NULL, addr, write, inst, (ret == 3), false, supv, dag1);
471 }
472
473 mmu = MMU_STATE (cpu);
474 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
475 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
476 mem_control = inst ? &mmu->imem_control : &mmu->dmem_control;
477 cplb_addr = inst ? &mmu->icplb_addr[0] : &mmu->dcplb_addr[0];
478 cplb_data = inst ? &mmu->icplb_data[0] : &mmu->dcplb_data[0];
479
480 faults = 0;
481 hits = 0;
482 do_excp = false;
483
484 /* CPLBs disabled -> little to do. */
485 if (!(*mem_control & ENCPLB))
486 {
487 hits = 1;
488 goto implicit_check;
489 }
490
491 /* Check all the CPLBs first. */
492 for (i = 0; i < 16; ++i)
493 {
494 const bu32 pages[4] = { 0x400, 0x1000, 0x100000, 0x400000 };
495 bu32 addr_lo, addr_hi;
496
497 /* Skip invalid entries. */
498 if (!(cplb_data[i] & CPLB_VALID))
499 continue;
500
501 /* See if this entry covers this address. */
502 addr_lo = cplb_addr[i];
503 addr_hi = cplb_addr[i] + pages[(cplb_data[i] & PAGE_SIZE) >> 16];
504 if (addr < addr_lo || addr >= addr_hi)
505 continue;
506
507 ++hits;
508 faults |= (1 << i);
509 if (write)
510 {
511 if (!supv && !(cplb_data[i] & CPLB_USER_WR))
512 do_excp = true;
513 if (supv && !(cplb_data[i] & CPLB_SUPV_WR))
514 do_excp = true;
515 if ((cplb_data[i] & (CPLB_WT | CPLB_L1_CHBL | CPLB_DIRTY)) == CPLB_L1_CHBL)
516 do_excp = true;
517 }
518 else
519 {
520 if (!supv && !(cplb_data[i] & CPLB_USER_RD))
521 do_excp = true;
522 }
523 }
524
525 /* Handle default/implicit CPLBs. */
526 if (!do_excp && hits < 2)
527 {
528 int ihits;
529 implicit_check:
530 ihits = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
531 switch (ihits)
532 {
533 /* No faults and one match -> good to go. */
534 case -1: return 0;
535 case -2:
536 if (hits == 1)
537 return 0;
538 break;
539 case 4:
540 cec_hwerr (cpu, HWERR_EXTERN_ADDR);
541 return 0;
542 default:
543 hits = ihits;
544 }
545 }
546 else
547 /* Normalize hit count so hits==2 is always multiple hit exception. */
548 hits = min (2, hits);
549
550 _mmu_log_fault (cpu, mmu, addr, write, inst, hits == 0, supv, dag1, faults);
551
552 if (inst)
553 {
554 int iexcps[] = { VEC_CPLB_I_M, VEC_CPLB_I_VL, VEC_CPLB_I_MHIT, VEC_MISALI_I };
555 return iexcps[hits];
556 }
557 else
558 {
559 int dexcps[] = { VEC_CPLB_M, VEC_CPLB_VL, VEC_CPLB_MHIT, VEC_MISALI_D };
560 return dexcps[hits];
561 }
562 }
563
564 void
565 mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
566 {
567 int excp = _mmu_check_addr (cpu, addr, write, inst, size);
568 if (excp)
569 cec_exception (cpu, excp);
570 }
571
572 void
573 mmu_check_cache_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst)
574 {
575 bu32 cacheaddr;
576 int excp;
577
578 cacheaddr = addr & ~(BFIN_L1_CACHE_BYTES - 1);
579 excp = _mmu_check_addr (cpu, cacheaddr, write, inst, BFIN_L1_CACHE_BYTES);
580 if (excp == 0)
581 return;
582
583 /* Most exceptions are ignored with cache funcs. */
584 /* XXX: Not sure if we should be ignoring CPLB misses. */
585 if (inst)
586 {
587 if (excp == VEC_CPLB_I_VL)
588 return;
589 }
590 else
591 {
592 if (excp == VEC_CPLB_VL)
593 return;
594 }
595 cec_exception (cpu, excp);
596 }
597