frv.c revision 1.8 1 /* frv simulator support code
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #define WANT_CPU
21 #define WANT_CPU_FRVBF
22
23 #include "sim-main.h"
24 #include "cgen-mem.h"
25 #include "cgen-ops.h"
26 #include "cgen-engine.h"
27 #include "cgen-par.h"
28 #include "bfd.h"
29 #include "gdb/sim-frv.h"
30 #include <math.h>
31
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
34 insns. */
35 int frvbf_write_next_vliw_addr_to_LR;
36
37 /* The contents of BUF are in target byte order. */
38 int
39 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
40 {
41 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
42 {
43 int hi_available, lo_available;
44 int grn = rn - SIM_FRV_GR0_REGNUM;
45
46 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
47
48 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
49 return 0;
50 else
51 SETTSI (buf, GET_H_GR (grn));
52 }
53 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
54 {
55 int hi_available, lo_available;
56 int frn = rn - SIM_FRV_FR0_REGNUM;
57
58 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
59
60 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
61 return 0;
62 else
63 SETTSI (buf, GET_H_FR (frn));
64 }
65 else if (rn == SIM_FRV_PC_REGNUM)
66 SETTSI (buf, GET_H_PC ());
67 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
68 {
69 /* Make sure the register is implemented. */
70 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
71 int spr = rn - SIM_FRV_SPR0_REGNUM;
72 if (! control->spr[spr].implemented)
73 return 0;
74 SETTSI (buf, GET_H_SPR (spr));
75 }
76 else
77 {
78 SETTSI (buf, 0xdeadbeef);
79 return 0;
80 }
81
82 return len;
83 }
84
85 /* The contents of BUF are in target byte order. */
86
87 int
88 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
89 {
90 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
91 {
92 int hi_available, lo_available;
93 int grn = rn - SIM_FRV_GR0_REGNUM;
94
95 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
96
97 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
98 return 0;
99 else
100 SET_H_GR (grn, GETTSI (buf));
101 }
102 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
103 {
104 int hi_available, lo_available;
105 int frn = rn - SIM_FRV_FR0_REGNUM;
106
107 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
108
109 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
110 return 0;
111 else
112 SET_H_FR (frn, GETTSI (buf));
113 }
114 else if (rn == SIM_FRV_PC_REGNUM)
115 SET_H_PC (GETTSI (buf));
116 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
117 {
118 /* Make sure the register is implemented. */
119 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
120 int spr = rn - SIM_FRV_SPR0_REGNUM;
121 if (! control->spr[spr].implemented)
122 return 0;
123 SET_H_SPR (spr, GETTSI (buf));
124 }
125 else
126 return 0;
127
128 return len;
129 }
130
131 /* Cover fns to access the general registers. */
133 USI
134 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
135 {
136 frv_check_gr_access (current_cpu, gr);
137 return CPU (h_gr[gr]);
138 }
139
140 void
141 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
142 {
143 frv_check_gr_access (current_cpu, gr);
144
145 if (gr == 0)
146 return; /* Storing into gr0 has no effect. */
147
148 CPU (h_gr[gr]) = newval;
149 }
150
151 /* Cover fns to access the floating point registers. */
153 SF
154 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
155 {
156 frv_check_fr_access (current_cpu, fr);
157 return CPU (h_fr[fr]);
158 }
159
160 void
161 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
162 {
163 frv_check_fr_access (current_cpu, fr);
164 CPU (h_fr[fr]) = newval;
165 }
166
167 /* Cover fns to access the general registers as double words. */
169 static UINT
170 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
171 {
172 if (reg & align_mask)
173 {
174 SIM_DESC sd = CPU_STATE (current_cpu);
175 switch (STATE_ARCHITECTURE (sd)->mach)
176 {
177 /* Note: there is a discrepancy between V2.2 of the FR400
178 instruction manual and the various FR4xx LSI specs.
179 The former claims that unaligned registers cause a
180 register_exception while the latter say it's an
181 illegal_instruction. The LSI specs appear to be
182 correct; in fact, the FR4xx series is not documented
183 as having a register_exception. */
184 case bfd_mach_fr400:
185 case bfd_mach_fr450:
186 case bfd_mach_fr550:
187 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
188 break;
189 case bfd_mach_frvtomcat:
190 case bfd_mach_fr500:
191 case bfd_mach_frv:
192 frv_queue_register_exception_interrupt (current_cpu,
193 FRV_REC_UNALIGNED);
194 break;
195 default:
196 break;
197 }
198
199 reg &= ~align_mask;
200 }
201
202 return reg;
203 }
204
205 static UINT
206 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
207 {
208 if (reg & align_mask)
209 {
210 SIM_DESC sd = CPU_STATE (current_cpu);
211 switch (STATE_ARCHITECTURE (sd)->mach)
212 {
213 /* See comment in check_register_alignment(). */
214 case bfd_mach_fr400:
215 case bfd_mach_fr450:
216 case bfd_mach_fr550:
217 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
218 break;
219 case bfd_mach_frvtomcat:
220 case bfd_mach_fr500:
221 case bfd_mach_frv:
222 {
223 struct frv_fp_exception_info fp_info = {
224 FSR_NO_EXCEPTION, FTT_INVALID_FR
225 };
226 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
227 }
228 break;
229 default:
230 break;
231 }
232
233 reg &= ~align_mask;
234 }
235
236 return reg;
237 }
238
239 static UINT
240 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
241 {
242 if (address & align_mask)
243 {
244 SIM_DESC sd = CPU_STATE (current_cpu);
245 switch (STATE_ARCHITECTURE (sd)->mach)
246 {
247 /* See comment in check_register_alignment(). */
248 case bfd_mach_fr400:
249 case bfd_mach_fr450:
250 frv_queue_data_access_error_interrupt (current_cpu, address);
251 break;
252 case bfd_mach_frvtomcat:
253 case bfd_mach_fr500:
254 case bfd_mach_frv:
255 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
256 break;
257 default:
258 break;
259 }
260
261 address &= ~align_mask;
262 }
263
264 return address;
265 }
266
267 DI
268 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
269 {
270 DI value;
271
272 if (gr == 0)
273 return 0; /* gr0 is always 0. */
274
275 /* Check the register alignment. */
276 gr = check_register_alignment (current_cpu, gr, 1);
277
278 value = GET_H_GR (gr);
279 value <<= 32;
280 value |= (USI) GET_H_GR (gr + 1);
281 return value;
282 }
283
284 void
285 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
286 {
287 if (gr == 0)
288 return; /* Storing into gr0 has no effect. */
289
290 /* Check the register alignment. */
291 gr = check_register_alignment (current_cpu, gr, 1);
292
293 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
294 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
295 }
296
297 /* Cover fns to access the floating point register as double words. */
299 DF
300 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
301 {
302 union {
303 SF as_sf[2];
304 DF as_df;
305 } value;
306
307 /* Check the register alignment. */
308 fr = check_fr_register_alignment (current_cpu, fr, 1);
309
310 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
311 {
312 value.as_sf[1] = GET_H_FR (fr);
313 value.as_sf[0] = GET_H_FR (fr + 1);
314 }
315 else
316 {
317 value.as_sf[0] = GET_H_FR (fr);
318 value.as_sf[1] = GET_H_FR (fr + 1);
319 }
320
321 return value.as_df;
322 }
323
324 void
325 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
326 {
327 union {
328 SF as_sf[2];
329 DF as_df;
330 } value;
331
332 /* Check the register alignment. */
333 fr = check_fr_register_alignment (current_cpu, fr, 1);
334
335 value.as_df = newval;
336 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
337 {
338 SET_H_FR (fr , value.as_sf[1]);
339 SET_H_FR (fr + 1, value.as_sf[0]);
340 }
341 else
342 {
343 SET_H_FR (fr , value.as_sf[0]);
344 SET_H_FR (fr + 1, value.as_sf[1]);
345 }
346 }
347
348 /* Cover fns to access the floating point register as integer words. */
350 USI
351 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
352 {
353 union {
354 SF as_sf;
355 USI as_usi;
356 } value;
357
358 value.as_sf = GET_H_FR (fr);
359 return value.as_usi;
360 }
361
362 void
363 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
364 {
365 union {
366 SF as_sf;
367 USI as_usi;
368 } value;
369
370 value.as_usi = newval;
371 SET_H_FR (fr, value.as_sf);
372 }
373
374 /* Cover fns to access the coprocessor registers as double words. */
376 DI
377 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
378 {
379 DI value;
380
381 /* Check the register alignment. */
382 cpr = check_register_alignment (current_cpu, cpr, 1);
383
384 value = GET_H_CPR (cpr);
385 value <<= 32;
386 value |= (USI) GET_H_CPR (cpr + 1);
387 return value;
388 }
389
390 void
391 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
392 {
393 /* Check the register alignment. */
394 cpr = check_register_alignment (current_cpu, cpr, 1);
395
396 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
397 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
398 }
399
400 /* Cover fns to write registers as quad words. */
402 void
403 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
404 {
405 if (gr == 0)
406 return; /* Storing into gr0 has no effect. */
407
408 /* Check the register alignment. */
409 gr = check_register_alignment (current_cpu, gr, 3);
410
411 SET_H_GR (gr , newval[0]);
412 SET_H_GR (gr + 1, newval[1]);
413 SET_H_GR (gr + 2, newval[2]);
414 SET_H_GR (gr + 3, newval[3]);
415 }
416
417 void
418 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
419 {
420 /* Check the register alignment. */
421 fr = check_fr_register_alignment (current_cpu, fr, 3);
422
423 SET_H_FR (fr , newval[0]);
424 SET_H_FR (fr + 1, newval[1]);
425 SET_H_FR (fr + 2, newval[2]);
426 SET_H_FR (fr + 3, newval[3]);
427 }
428
429 void
430 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
431 {
432 /* Check the register alignment. */
433 cpr = check_register_alignment (current_cpu, cpr, 3);
434
435 SET_H_CPR (cpr , newval[0]);
436 SET_H_CPR (cpr + 1, newval[1]);
437 SET_H_CPR (cpr + 2, newval[2]);
438 SET_H_CPR (cpr + 3, newval[3]);
439 }
440
441 /* Cover fns to access the special purpose registers. */
443 USI
444 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
445 {
446 /* Check access restrictions. */
447 frv_check_spr_read_access (current_cpu, spr);
448
449 switch (spr)
450 {
451 case H_SPR_PSR:
452 return spr_psr_get_handler (current_cpu);
453 case H_SPR_TBR:
454 return spr_tbr_get_handler (current_cpu);
455 case H_SPR_BPSR:
456 return spr_bpsr_get_handler (current_cpu);
457 case H_SPR_CCR:
458 return spr_ccr_get_handler (current_cpu);
459 case H_SPR_CCCR:
460 return spr_cccr_get_handler (current_cpu);
461 case H_SPR_SR0:
462 case H_SPR_SR1:
463 case H_SPR_SR2:
464 case H_SPR_SR3:
465 return spr_sr_get_handler (current_cpu, spr);
466 break;
467 default:
468 return CPU (h_spr[spr]);
469 }
470 return 0;
471 }
472
473 void
474 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
475 {
476 FRV_REGISTER_CONTROL *control;
477 USI mask;
478 USI oldval;
479
480 /* Check access restrictions. */
481 frv_check_spr_write_access (current_cpu, spr);
482
483 /* Only set those fields which are writeable. */
484 control = CPU_REGISTER_CONTROL (current_cpu);
485 mask = control->spr[spr].read_only_mask;
486 oldval = GET_H_SPR (spr);
487
488 newval = (newval & ~mask) | (oldval & mask);
489
490 /* Some registers are represented by individual components which are
491 referenced more often than the register itself. */
492 switch (spr)
493 {
494 case H_SPR_PSR:
495 spr_psr_set_handler (current_cpu, newval);
496 break;
497 case H_SPR_TBR:
498 spr_tbr_set_handler (current_cpu, newval);
499 break;
500 case H_SPR_BPSR:
501 spr_bpsr_set_handler (current_cpu, newval);
502 break;
503 case H_SPR_CCR:
504 spr_ccr_set_handler (current_cpu, newval);
505 break;
506 case H_SPR_CCCR:
507 spr_cccr_set_handler (current_cpu, newval);
508 break;
509 case H_SPR_SR0:
510 case H_SPR_SR1:
511 case H_SPR_SR2:
512 case H_SPR_SR3:
513 spr_sr_set_handler (current_cpu, spr, newval);
514 break;
515 case H_SPR_IHSR8:
516 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
517 break;
518 default:
519 CPU (h_spr[spr]) = newval;
520 break;
521 }
522 }
523
524 /* Cover fns to access the gr_hi and gr_lo registers. */
526 UHI
527 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
528 {
529 return (GET_H_GR(gr) >> 16) & 0xffff;
530 }
531
532 void
533 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
534 {
535 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
536 SET_H_GR (gr, value);
537 }
538
539 UHI
540 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
541 {
542 return GET_H_GR(gr) & 0xffff;
543 }
544
545 void
546 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
547 {
548 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
549 SET_H_GR (gr, value);
550 }
551
552 /* Cover fns to access the tbr bits. */
554 USI
555 spr_tbr_get_handler (SIM_CPU *current_cpu)
556 {
557 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
558 ((GET_H_TBR_TT () & 0xff) << 4);
559
560 return tbr;
561 }
562
563 void
564 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
565 {
566 int tbr = newval;
567
568 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
569 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
570 }
571
572 /* Cover fns to access the bpsr bits. */
574 USI
575 spr_bpsr_get_handler (SIM_CPU *current_cpu)
576 {
577 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
578 ((GET_H_BPSR_BET () & 0x1) );
579
580 return bpsr;
581 }
582
583 void
584 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
585 {
586 int bpsr = newval;
587
588 SET_H_BPSR_BS ((bpsr >> 12) & 1);
589 SET_H_BPSR_BET ((bpsr ) & 1);
590 }
591
592 /* Cover fns to access the psr bits. */
594 USI
595 spr_psr_get_handler (SIM_CPU *current_cpu)
596 {
597 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
598 ((GET_H_PSR_VER () & 0xf) << 24) |
599 ((GET_H_PSR_ICE () & 0x1) << 16) |
600 ((GET_H_PSR_NEM () & 0x1) << 14) |
601 ((GET_H_PSR_CM () & 0x1) << 13) |
602 ((GET_H_PSR_BE () & 0x1) << 12) |
603 ((GET_H_PSR_ESR () & 0x1) << 11) |
604 ((GET_H_PSR_EF () & 0x1) << 8) |
605 ((GET_H_PSR_EM () & 0x1) << 7) |
606 ((GET_H_PSR_PIL () & 0xf) << 3) |
607 ((GET_H_PSR_S () & 0x1) << 2) |
608 ((GET_H_PSR_PS () & 0x1) << 1) |
609 ((GET_H_PSR_ET () & 0x1) );
610
611 return psr;
612 }
613
614 void
615 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
616 {
617 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
618 first. */
619 SET_H_PSR_S ((newval >> 2) & 1);
620
621 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
622 SET_H_PSR_VER ((newval >> 24) & 0xf);
623 SET_H_PSR_ICE ((newval >> 16) & 1);
624 SET_H_PSR_NEM ((newval >> 14) & 1);
625 SET_H_PSR_CM ((newval >> 13) & 1);
626 SET_H_PSR_BE ((newval >> 12) & 1);
627 SET_H_PSR_ESR ((newval >> 11) & 1);
628 SET_H_PSR_EF ((newval >> 8) & 1);
629 SET_H_PSR_EM ((newval >> 7) & 1);
630 SET_H_PSR_PIL ((newval >> 3) & 0xf);
631 SET_H_PSR_PS ((newval >> 1) & 1);
632 SET_H_PSR_ET ((newval ) & 1);
633 }
634
635 void
636 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
637 {
638 /* If switching from user to supervisor mode, or vice-versa, then switch
639 the supervisor/user context. */
640 int psr_s = GET_H_PSR_S ();
641 if (psr_s != (newval & 1))
642 {
643 frvbf_switch_supervisor_user_context (current_cpu);
644 CPU (h_psr_s) = newval & 1;
645 }
646 }
647
648 /* Cover fns to access the ccr bits. */
650 USI
651 spr_ccr_get_handler (SIM_CPU *current_cpu)
652 {
653 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
654 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
655 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
656 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
657 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
658 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
659 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
660 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
661
662 return ccr;
663 }
664
665 void
666 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
667 {
668 int ccr = newval;
669
670 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
671 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
672 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
673 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
674 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
675 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
676 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
677 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
678 }
679
680 QI
682 frvbf_set_icc_for_shift_right (
683 SIM_CPU *current_cpu, SI value, SI shift, QI icc
684 )
685 {
686 /* Set the C flag of the given icc to the logical OR of the bits shifted
687 out. */
688 int mask = (1 << shift) - 1;
689 if ((value & mask) != 0)
690 return icc | 0x1;
691
692 return icc & 0xe;
693 }
694
695 QI
696 frvbf_set_icc_for_shift_left (
697 SIM_CPU *current_cpu, SI value, SI shift, QI icc
698 )
699 {
700 /* Set the V flag of the given icc to the logical OR of the bits shifted
701 out. */
702 int mask = ((1 << shift) - 1) << (32 - shift);
703 if ((value & mask) != 0)
704 return icc | 0x2;
705
706 return icc & 0xd;
707 }
708
709 /* Cover fns to access the cccr bits. */
711 USI
712 spr_cccr_get_handler (SIM_CPU *current_cpu)
713 {
714 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
715 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
716 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
717 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
718 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
719 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
720 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
721 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
722
723 return cccr;
724 }
725
726 void
727 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
728 {
729 int cccr = newval;
730
731 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
732 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
733 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
734 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
735 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
736 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
737 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
738 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
739 }
740
741 /* Cover fns to access the sr bits. */
743 USI
744 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
745 {
746 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
747 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
748 int psr_esr = GET_H_PSR_ESR ();
749 if (! psr_esr)
750 return GET_H_GR (4 + (spr - H_SPR_SR0));
751
752 return CPU (h_spr[spr]);
753 }
754
755 void
756 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
757 {
758 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
759 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
760 int psr_esr = GET_H_PSR_ESR ();
761 if (! psr_esr)
762 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
763 else
764 CPU (h_spr[spr]) = newval;
765 }
766
767 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
769 void
770 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
771 {
772 if (GET_H_PSR_ESR ())
773 {
774 /* We need to be in supervisor mode to swap the registers. Access the
775 PSR.S directly in order to avoid recursive context switches. */
776 int i;
777 int save_psr_s = CPU (h_psr_s);
778 CPU (h_psr_s) = 1;
779 for (i = 0; i < 4; ++i)
780 {
781 int gr = i + 4;
782 int spr = i + H_SPR_SR0;
783 SI tmp = GET_H_SPR (spr);
784 SET_H_SPR (spr, GET_H_GR (gr));
785 SET_H_GR (gr, tmp);
786 }
787 CPU (h_psr_s) = save_psr_s;
788 }
789 }
790
791 /* Handle load/store of quad registers. */
793 void
794 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
795 {
796 int i;
797 SI value[4];
798
799 /* Check memory alignment */
800 address = check_memory_alignment (current_cpu, address, 0xf);
801
802 /* If we need to count cycles, then the cache operation will be
803 initiated from the model profiling functions.
804 See frvbf_model_.... */
805 if (model_insn)
806 {
807 CPU_LOAD_ADDRESS (current_cpu) = address;
808 CPU_LOAD_LENGTH (current_cpu) = 16;
809 }
810 else
811 {
812 for (i = 0; i < 4; ++i)
813 {
814 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
815 address += 4;
816 }
817 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
818 value);
819 }
820 }
821
822 void
823 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
824 {
825 int i;
826 SI value[4];
827 USI hsr0;
828
829 /* Check register and memory alignment. */
830 src_ix = check_register_alignment (current_cpu, src_ix, 3);
831 address = check_memory_alignment (current_cpu, address, 0xf);
832
833 for (i = 0; i < 4; ++i)
834 {
835 /* GR0 is always 0. */
836 if (src_ix == 0)
837 value[i] = 0;
838 else
839 value[i] = GET_H_GR (src_ix + i);
840 }
841 hsr0 = GET_HSR0 ();
842 if (GET_HSR0_DCE (hsr0))
843 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
844 else
845 sim_queue_mem_xi_write (current_cpu, address, value);
846 }
847
848 void
849 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
850 {
851 int i;
852 SI value[4];
853
854 /* Check memory alignment */
855 address = check_memory_alignment (current_cpu, address, 0xf);
856
857 /* If we need to count cycles, then the cache operation will be
858 initiated from the model profiling functions.
859 See frvbf_model_.... */
860 if (model_insn)
861 {
862 CPU_LOAD_ADDRESS (current_cpu) = address;
863 CPU_LOAD_LENGTH (current_cpu) = 16;
864 }
865 else
866 {
867 for (i = 0; i < 4; ++i)
868 {
869 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
870 address += 4;
871 }
872 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
873 value);
874 }
875 }
876
877 void
878 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
879 {
880 int i;
881 SI value[4];
882 USI hsr0;
883
884 /* Check register and memory alignment. */
885 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
886 address = check_memory_alignment (current_cpu, address, 0xf);
887
888 for (i = 0; i < 4; ++i)
889 value[i] = GET_H_FR (src_ix + i);
890
891 hsr0 = GET_HSR0 ();
892 if (GET_HSR0_DCE (hsr0))
893 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
894 else
895 sim_queue_mem_xi_write (current_cpu, address, value);
896 }
897
898 void
899 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
900 {
901 int i;
902 SI value[4];
903
904 /* Check memory alignment */
905 address = check_memory_alignment (current_cpu, address, 0xf);
906
907 /* If we need to count cycles, then the cache operation will be
908 initiated from the model profiling functions.
909 See frvbf_model_.... */
910 if (model_insn)
911 {
912 CPU_LOAD_ADDRESS (current_cpu) = address;
913 CPU_LOAD_LENGTH (current_cpu) = 16;
914 }
915 else
916 {
917 for (i = 0; i < 4; ++i)
918 {
919 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
920 address += 4;
921 }
922 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
923 value);
924 }
925 }
926
927 void
928 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
929 {
930 int i;
931 SI value[4];
932 USI hsr0;
933
934 /* Check register and memory alignment. */
935 src_ix = check_register_alignment (current_cpu, src_ix, 3);
936 address = check_memory_alignment (current_cpu, address, 0xf);
937
938 for (i = 0; i < 4; ++i)
939 value[i] = GET_H_CPR (src_ix + i);
940
941 hsr0 = GET_HSR0 ();
942 if (GET_HSR0_DCE (hsr0))
943 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
944 else
945 sim_queue_mem_xi_write (current_cpu, address, value);
946 }
947
948 void
950 frvbf_signed_integer_divide (
951 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
952 )
953 {
954 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
955 if (arg1 == 0x80000000 && arg2 == -1)
956 {
957 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
958 otherwise it may result in 0x7fffffff (sparc compatibility) or
959 0x80000000 (C language compatibility). */
960 USI isr;
961 dtt = FRV_DTT_OVERFLOW;
962
963 isr = GET_ISR ();
964 if (GET_ISR_EDE (isr))
965 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
966 0x7fffffff);
967 else
968 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
969 0x80000000);
970 frvbf_force_update (current_cpu); /* Force update of target register. */
971 }
972 else if (arg2 == 0)
973 dtt = FRV_DTT_DIVISION_BY_ZERO;
974 else
975 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
976 arg1 / arg2);
977
978 /* Check for exceptions. */
979 if (dtt != FRV_DTT_NO_EXCEPTION)
980 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
981 non_excepting);
982 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
983 {
984 /* Non excepting instruction. Clear the NE flag for the target
985 register. */
986 SI NE_flags[2];
987 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
988 CLEAR_NE_FLAG (NE_flags, target_index);
989 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
990 }
991 }
992
993 void
994 frvbf_unsigned_integer_divide (
995 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
996 )
997 {
998 if (arg2 == 0)
999 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
1000 target_index, non_excepting);
1001 else
1002 {
1003 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
1004 arg1 / arg2);
1005 if (non_excepting)
1006 {
1007 /* Non excepting instruction. Clear the NE flag for the target
1008 register. */
1009 SI NE_flags[2];
1010 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
1011 CLEAR_NE_FLAG (NE_flags, target_index);
1012 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
1013 }
1014 }
1015 }
1016
1017 /* Clear accumulators. */
1019 void
1020 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1021 {
1022 SIM_DESC sd = CPU_STATE (current_cpu);
1023 int acc_mask =
1024 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1025 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1026 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1027 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1028 63;
1029 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1030
1031 ps->mclracc_acc = acc_ix;
1032 ps->mclracc_A = A;
1033 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1034 {
1035 /* This instruction is a nop if the referenced accumulator is not
1036 implemented. */
1037 if ((acc_ix & acc_mask) == acc_ix)
1038 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1039 }
1040 else
1041 {
1042 /* Clear all implemented accumulators. */
1043 int i;
1044 for (i = 0; i <= acc_mask; ++i)
1045 if ((i & acc_mask) == i)
1046 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1047 }
1048 }
1049
1050 /* Functions to aid insn semantics. */
1052
1053 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1054 SI
1055 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1056 {
1057 SI i;
1058 SI mask;
1059
1060 if (value == 0)
1061 return 63;
1062
1063 /* Find the position of the first non-zero bit.
1064 The loop will terminate since there is guaranteed to be at least one
1065 non-zero bit. */
1066 mask = 1 << (sizeof (mask) * 8 - 1);
1067 for (i = 0; (value & mask) == 0; ++i)
1068 value <<= 1;
1069
1070 return i;
1071 }
1072
1073 /* Compute the result of the cut insns. */
1074 SI
1075 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1076 {
1077 SI result;
1078 cut_point &= 0x3f;
1079 if (cut_point < 32)
1080 {
1081 result = reg1 << cut_point;
1082 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1083 }
1084 else
1085 result = reg2 << (cut_point - 32);
1086
1087 return result;
1088 }
1089
1090 /* Compute the result of the cut insns. */
1091 SI
1092 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1093 {
1094 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1095 cut_point = cut_point << 26 >> 26;
1096
1097 /* The cut_point is relative to bit 40 of 64 bits. */
1098 if (cut_point >= 0)
1099 return (acc << (cut_point + 24)) >> 32;
1100
1101 /* Extend the sign bit (bit 40) for negative cuts. */
1102 if (cut_point == -32)
1103 return (acc << 24) >> 63; /* Special case for full shiftout. */
1104
1105 return (acc << 24) >> (32 + -cut_point);
1106 }
1107
1108 /* Compute the result of the cut insns. */
1109 SI
1110 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1111 {
1112 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1113 cut_point = cut_point << 26 >> 26;
1114
1115 if (cut_point >= 0)
1116 {
1117 /* The cut_point is relative to bit 40 of 64 bits. */
1118 DI shifted = acc << (cut_point + 24);
1119 DI unshifted = shifted >> (cut_point + 24);
1120
1121 /* The result will be saturated if significant bits are shifted out. */
1122 if (unshifted != acc)
1123 {
1124 if (acc < 0)
1125 return 0x80000000;
1126 return 0x7fffffff;
1127 }
1128 }
1129
1130 /* The result will not be saturated, so use the code for the normal cut. */
1131 return frvbf_media_cut (current_cpu, acc, cut_point);
1132 }
1133
1134 /* Compute the result of int accumulator cut (SCUTSS). */
1135 SI
1136 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1137 {
1138 DI lower, upper;
1139
1140 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1141 cut_point = cut_point << 25 >> 25;
1142
1143 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1144 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1145 of this 128-bit value.
1146
1147 Since we can't deal with 128-bit values very easily, convert the
1148 operation into an equivalent 64-bit one. */
1149 if (cut_point < 0)
1150 {
1151 /* Avoid an undefined shift operation. */
1152 if (cut_point == -64)
1153 acc >>= 63;
1154 else
1155 acc >>= -cut_point;
1156 cut_point = 0;
1157 }
1158
1159 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1160 32 bits of the result and UPPER to the result >> 31. */
1161 if (cut_point < 32)
1162 {
1163 /* The cut loses the (32 - CUT_POINT) least significant bits.
1164 Round the result up if the most significant of these lost bits
1165 is 1. */
1166 lower = acc >> (32 - cut_point);
1167 if (lower < 0x7fffffff)
1168 if (acc & LSBIT64 (32 - cut_point - 1))
1169 lower++;
1170 upper = lower >> 31;
1171 }
1172 else
1173 {
1174 lower = acc << (cut_point - 32);
1175 upper = acc >> (63 - cut_point);
1176 }
1177
1178 /* Saturate the result. */
1179 if (upper < -1)
1180 return ~0x7fffffff;
1181 else if (upper > 0)
1182 return 0x7fffffff;
1183 else
1184 return lower;
1185 }
1186
1187 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1188 SI
1189 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1190 {
1191 int neg_arg1;
1192
1193 /* FIXME: what to do with negative shift amt? */
1194 if (arg2 <= 0)
1195 return arg1;
1196
1197 if (arg1 == 0)
1198 return 0;
1199
1200 /* Signed shift by 31 or greater saturates by definition. */
1201 if (arg2 >= 31)
1202 if (arg1 > 0)
1203 return (SI) 0x7fffffff;
1204 else
1205 return (SI) 0x80000000;
1206
1207 /* OK, arg2 is between 1 and 31. */
1208 neg_arg1 = (arg1 < 0);
1209 do {
1210 arg1 <<= 1;
1211 /* Check for sign bit change (saturation). */
1212 if (neg_arg1 && (arg1 >= 0))
1213 return (SI) 0x80000000;
1214 else if (!neg_arg1 && (arg1 < 0))
1215 return (SI) 0x7fffffff;
1216 } while (--arg2 > 0);
1217
1218 return arg1;
1219 }
1220
1221 /* Simulate the media custom insns. */
1222 void
1223 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1224 {
1225 /* The semantics of the insn are a nop, since it is implementation defined.
1226 We do need to check whether it's implemented and set up for MTRAP
1227 if it's not. */
1228 USI msr0 = GET_MSR (0);
1229 if (GET_MSR_EMCI (msr0) == 0)
1230 {
1231 /* no interrupt queued at this time. */
1232 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1233 }
1234 }
1235
1236 /* Simulate the media average (MAVEH) insn. */
1237 static HI
1238 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1239 {
1240 SIM_DESC sd = CPU_STATE (current_cpu);
1241 SI sum = (arg1 + arg2);
1242 HI result = sum >> 1;
1243 int rounding_value;
1244
1245 /* On fr4xx and fr550, check the rounding mode. On other machines
1246 rounding is always toward negative infinity and the result is
1247 already correctly rounded. */
1248 switch (STATE_ARCHITECTURE (sd)->mach)
1249 {
1250 /* Need to check rounding mode. */
1251 case bfd_mach_fr400:
1252 case bfd_mach_fr450:
1253 case bfd_mach_fr550:
1254 /* Check whether rounding will be required. Rounding will be required
1255 if the sum is an odd number. */
1256 rounding_value = sum & 1;
1257 if (rounding_value)
1258 {
1259 USI msr0 = GET_MSR (0);
1260 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1261 if (GET_MSR_SRDAV (msr0))
1262 {
1263 /* MSR0.RD controls rounding. */
1264 switch (GET_MSR_RD (msr0))
1265 {
1266 case 0:
1267 /* Round to nearest. */
1268 if (result >= 0)
1269 ++result;
1270 break;
1271 case 1:
1272 /* Round toward 0. */
1273 if (result < 0)
1274 ++result;
1275 break;
1276 case 2:
1277 /* Round toward positive infinity. */
1278 ++result;
1279 break;
1280 case 3:
1281 /* Round toward negative infinity. The result is already
1282 correctly rounded. */
1283 break;
1284 default:
1285 abort ();
1286 break;
1287 }
1288 }
1289 else
1290 {
1291 /* MSR0.RDAV controls rounding. If set, round toward positive
1292 infinity. Otherwise the result is already rounded correctly
1293 toward negative infinity. */
1294 if (GET_MSR_RDAV (msr0))
1295 ++result;
1296 }
1297 }
1298 break;
1299 default:
1300 break;
1301 }
1302
1303 return result;
1304 }
1305
1306 SI
1307 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1308 {
1309 SI result;
1310 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1311 result &= 0xffff;
1312 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1313 (reg2 >> 16) & 0xffff) << 16;
1314 return result;
1315 }
1316
1317 /* Maintain a flag in order to know when to write the address of the next
1318 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1319 void
1320 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1321 {
1322 frvbf_write_next_vliw_addr_to_LR = value;
1323 }
1324
1325 void
1326 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1327 {
1328 USI NE_flags[2];
1329
1330 /* Save the target register so interrupt processing can set its NE flag
1331 in the event of an exception. */
1332 frv_interrupt_state.ne_index = index;
1333
1334 /* Clear the NE flag of the target register. It will be reset if necessary
1335 in the event of an exception. */
1336 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1337 CLEAR_NE_FLAG (NE_flags, index);
1338 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1339 }
1340
1341 void
1342 frvbf_force_update (SIM_CPU *current_cpu)
1343 {
1344 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1345 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1346 if (ix > 0)
1347 {
1348 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1349 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1350 }
1351 }
1352
1353 /* Condition code logic. */
1355 enum cr_ops {
1356 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1357 num_cr_ops
1358 };
1359
1360 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1361
1362 static enum cr_result
1363 cr_logic[num_cr_ops][4][4] = {
1364 /* andcr */
1365 {
1366 /* undefined undefined false true */
1367 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1368 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1369 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1370 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1371 },
1372 /* orcr */
1373 {
1374 /* undefined undefined false true */
1375 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1376 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1377 /* false */ {cr_false, cr_false, cr_false, cr_true },
1378 /* true */ {cr_true, cr_true, cr_true, cr_true }
1379 },
1380 /* xorcr */
1381 {
1382 /* undefined undefined false true */
1383 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1384 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1385 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1386 /* true */ {cr_true, cr_true, cr_true, cr_false }
1387 },
1388 /* nandcr */
1389 {
1390 /* undefined undefined false true */
1391 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1392 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1393 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1394 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1395 },
1396 /* norcr */
1397 {
1398 /* undefined undefined false true */
1399 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1400 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1401 /* false */ {cr_true, cr_true, cr_true, cr_false },
1402 /* true */ {cr_false, cr_false, cr_false, cr_false }
1403 },
1404 /* andncr */
1405 {
1406 /* undefined undefined false true */
1407 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1408 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1409 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1410 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1411 },
1412 /* orncr */
1413 {
1414 /* undefined undefined false true */
1415 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1416 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1417 /* false */ {cr_true, cr_true, cr_true, cr_true },
1418 /* true */ {cr_false, cr_false, cr_false, cr_true }
1419 },
1420 /* nandncr */
1421 {
1422 /* undefined undefined false true */
1423 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1424 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1425 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1426 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1427 },
1428 /* norncr */
1429 {
1430 /* undefined undefined false true */
1431 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1432 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1433 /* false */ {cr_false, cr_false, cr_false, cr_false },
1434 /* true */ {cr_true, cr_true, cr_true, cr_false }
1435 }
1436 };
1437
1438 UQI
1439 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1440 {
1441 return cr_logic[operation][arg1][arg2];
1442 }
1443
1444 /* Cache Manipulation. */
1446 void
1447 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1448 {
1449 /* If we need to count cycles, then the cache operation will be
1450 initiated from the model profiling functions.
1451 See frvbf_model_.... */
1452 int hsr0 = GET_HSR0 ();
1453 if (GET_HSR0_ICE (hsr0))
1454 {
1455 if (model_insn)
1456 {
1457 CPU_LOAD_ADDRESS (current_cpu) = address;
1458 CPU_LOAD_LENGTH (current_cpu) = length;
1459 CPU_LOAD_LOCK (current_cpu) = lock;
1460 }
1461 else
1462 {
1463 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1464 frv_cache_preload (cache, address, length, lock);
1465 }
1466 }
1467 }
1468
1469 void
1470 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1471 {
1472 /* If we need to count cycles, then the cache operation will be
1473 initiated from the model profiling functions.
1474 See frvbf_model_.... */
1475 int hsr0 = GET_HSR0 ();
1476 if (GET_HSR0_DCE (hsr0))
1477 {
1478 if (model_insn)
1479 {
1480 CPU_LOAD_ADDRESS (current_cpu) = address;
1481 CPU_LOAD_LENGTH (current_cpu) = length;
1482 CPU_LOAD_LOCK (current_cpu) = lock;
1483 }
1484 else
1485 {
1486 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1487 frv_cache_preload (cache, address, length, lock);
1488 }
1489 }
1490 }
1491
1492 void
1493 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1494 {
1495 /* If we need to count cycles, then the cache operation will be
1496 initiated from the model profiling functions.
1497 See frvbf_model_.... */
1498 int hsr0 = GET_HSR0 ();
1499 if (GET_HSR0_ICE (hsr0))
1500 {
1501 if (model_insn)
1502 CPU_LOAD_ADDRESS (current_cpu) = address;
1503 else
1504 {
1505 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1506 frv_cache_unlock (cache, address);
1507 }
1508 }
1509 }
1510
1511 void
1512 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1513 {
1514 /* If we need to count cycles, then the cache operation will be
1515 initiated from the model profiling functions.
1516 See frvbf_model_.... */
1517 int hsr0 = GET_HSR0 ();
1518 if (GET_HSR0_DCE (hsr0))
1519 {
1520 if (model_insn)
1521 CPU_LOAD_ADDRESS (current_cpu) = address;
1522 else
1523 {
1524 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1525 frv_cache_unlock (cache, address);
1526 }
1527 }
1528 }
1529
1530 void
1531 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1532 {
1533 /* Make sure the insn was specified properly. -1 will be passed for ALL
1534 for a icei with A=0. */
1535 if (all == -1)
1536 {
1537 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1538 return;
1539 }
1540
1541 /* If we need to count cycles, then the cache operation will be
1542 initiated from the model profiling functions.
1543 See frvbf_model_.... */
1544 if (model_insn)
1545 {
1546 /* Record the all-entries flag for use in profiling. */
1547 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1548 ps->all_cache_entries = all;
1549 CPU_LOAD_ADDRESS (current_cpu) = address;
1550 }
1551 else
1552 {
1553 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1554 if (all)
1555 frv_cache_invalidate_all (cache, 0/* flush? */);
1556 else
1557 frv_cache_invalidate (cache, address, 0/* flush? */);
1558 }
1559 }
1560
1561 void
1562 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1563 {
1564 /* Make sure the insn was specified properly. -1 will be passed for ALL
1565 for a dcei with A=0. */
1566 if (all == -1)
1567 {
1568 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1569 return;
1570 }
1571
1572 /* If we need to count cycles, then the cache operation will be
1573 initiated from the model profiling functions.
1574 See frvbf_model_.... */
1575 if (model_insn)
1576 {
1577 /* Record the all-entries flag for use in profiling. */
1578 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1579 ps->all_cache_entries = all;
1580 CPU_LOAD_ADDRESS (current_cpu) = address;
1581 }
1582 else
1583 {
1584 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1585 if (all)
1586 frv_cache_invalidate_all (cache, 0/* flush? */);
1587 else
1588 frv_cache_invalidate (cache, address, 0/* flush? */);
1589 }
1590 }
1591
1592 void
1593 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1594 {
1595 /* Make sure the insn was specified properly. -1 will be passed for ALL
1596 for a dcef with A=0. */
1597 if (all == -1)
1598 {
1599 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1600 return;
1601 }
1602
1603 /* If we need to count cycles, then the cache operation will be
1604 initiated from the model profiling functions.
1605 See frvbf_model_.... */
1606 if (model_insn)
1607 {
1608 /* Record the all-entries flag for use in profiling. */
1609 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1610 ps->all_cache_entries = all;
1611 CPU_LOAD_ADDRESS (current_cpu) = address;
1612 }
1613 else
1614 {
1615 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1616 if (all)
1617 frv_cache_invalidate_all (cache, 1/* flush? */);
1618 else
1619 frv_cache_invalidate (cache, address, 1/* flush? */);
1620 }
1621 }
1622