Home | History | Annotate | Line # | Download | only in frv
frv.c revision 1.1
      1 /* frv simulator support code
      2    Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2007, 2008, 2009, 2010,
      3    2011 Free Software Foundation, Inc.
      4    Contributed by Red Hat.
      5 
      6 This file is part of the GNU simulators.
      7 
      8 This program is free software; you can redistribute it and/or modify
      9 it under the terms of the GNU General Public License as published by
     10 the Free Software Foundation; either version 3 of the License, or
     11 (at your option) any later version.
     12 
     13 This program is distributed in the hope that it will be useful,
     14 but WITHOUT ANY WARRANTY; without even the implied warranty of
     15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16 GNU General Public License for more details.
     17 
     18 You should have received a copy of the GNU General Public License
     19 along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
     20 
     21 #define WANT_CPU
     22 #define WANT_CPU_FRVBF
     23 
     24 #include "sim-main.h"
     25 #include "cgen-mem.h"
     26 #include "cgen-ops.h"
     27 #include "cgen-engine.h"
     28 #include "cgen-par.h"
     29 #include "bfd.h"
     30 #include "gdb/sim-frv.h"
     31 #include <math.h>
     32 
     33 /* Maintain a flag in order to know when to write the address of the next
     34    VLIW instruction into the LR register.  Used by JMPL. JMPIL, and CALL
     35    insns.  */
     36 int frvbf_write_next_vliw_addr_to_LR;
     37 
     38 /* The contents of BUF are in target byte order.  */
     39 int
     40 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
     41 {
     42   if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
     43     {
     44       int hi_available, lo_available;
     45       int grn = rn - SIM_FRV_GR0_REGNUM;
     46 
     47       frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
     48 
     49       if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
     50 	return 0;
     51       else
     52 	SETTSI (buf, GET_H_GR (grn));
     53     }
     54   else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
     55     {
     56       int hi_available, lo_available;
     57       int frn = rn - SIM_FRV_FR0_REGNUM;
     58 
     59       frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
     60 
     61       if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
     62 	return 0;
     63       else
     64 	SETTSI (buf, GET_H_FR (frn));
     65     }
     66   else if (rn == SIM_FRV_PC_REGNUM)
     67     SETTSI (buf, GET_H_PC ());
     68   else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
     69     {
     70       /* Make sure the register is implemented.  */
     71       FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
     72       int spr = rn - SIM_FRV_SPR0_REGNUM;
     73       if (! control->spr[spr].implemented)
     74 	return 0;
     75       SETTSI (buf, GET_H_SPR (spr));
     76     }
     77   else
     78     {
     79       SETTSI (buf, 0xdeadbeef);
     80       return 0;
     81     }
     82 
     83   return len;
     84 }
     85 
     86 /* The contents of BUF are in target byte order.  */
     87 
     88 int
     89 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
     90 {
     91   if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
     92     {
     93       int hi_available, lo_available;
     94       int grn = rn - SIM_FRV_GR0_REGNUM;
     95 
     96       frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
     97 
     98       if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
     99 	return 0;
    100       else
    101 	SET_H_GR (grn, GETTSI (buf));
    102     }
    103   else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
    104     {
    105       int hi_available, lo_available;
    106       int frn = rn - SIM_FRV_FR0_REGNUM;
    107 
    108       frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
    109 
    110       if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
    111 	return 0;
    112       else
    113 	SET_H_FR (frn, GETTSI (buf));
    114     }
    115   else if (rn == SIM_FRV_PC_REGNUM)
    116     SET_H_PC (GETTSI (buf));
    117   else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
    118     {
    119       /* Make sure the register is implemented.  */
    120       FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
    121       int spr = rn - SIM_FRV_SPR0_REGNUM;
    122       if (! control->spr[spr].implemented)
    123 	return 0;
    124       SET_H_SPR (spr, GETTSI (buf));
    125     }
    126   else
    127     return 0;
    128 
    129   return len;
    130 }
    131 
    132 /* Cover fns to access the general registers.  */
    134 USI
    135 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
    136 {
    137   frv_check_gr_access (current_cpu, gr);
    138   return CPU (h_gr[gr]);
    139 }
    140 
    141 void
    142 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
    143 {
    144   frv_check_gr_access (current_cpu, gr);
    145 
    146   if (gr == 0)
    147     return; /* Storing into gr0 has no effect.  */
    148 
    149   CPU (h_gr[gr]) = newval;
    150 }
    151 
    152 /* Cover fns to access the floating point registers.  */
    154 SF
    155 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
    156 {
    157   frv_check_fr_access (current_cpu, fr);
    158   return CPU (h_fr[fr]);
    159 }
    160 
    161 void
    162 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
    163 {
    164   frv_check_fr_access (current_cpu, fr);
    165   CPU (h_fr[fr]) = newval;
    166 }
    167 
    168 /* Cover fns to access the general registers as double words.  */
    170 static UINT
    171 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
    172 {
    173   if (reg & align_mask)
    174     {
    175       SIM_DESC sd = CPU_STATE (current_cpu);
    176       switch (STATE_ARCHITECTURE (sd)->mach)
    177 	{
    178 	  /* Note: there is a discrepancy between V2.2 of the FR400
    179 	     instruction manual and the various FR4xx LSI specs.
    180 	     The former claims that unaligned registers cause a
    181 	     register_exception while the latter say it's an
    182 	     illegal_instruction.  The LSI specs appear to be
    183 	     correct; in fact, the FR4xx series is not documented
    184 	     as having a register_exception.  */
    185 	case bfd_mach_fr400:
    186 	case bfd_mach_fr450:
    187 	case bfd_mach_fr550:
    188 	  frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
    189 	  break;
    190 	case bfd_mach_frvtomcat:
    191 	case bfd_mach_fr500:
    192 	case bfd_mach_frv:
    193 	  frv_queue_register_exception_interrupt (current_cpu,
    194 						  FRV_REC_UNALIGNED);
    195 	  break;
    196 	default:
    197 	  break;
    198 	}
    199 
    200       reg &= ~align_mask;
    201     }
    202 
    203   return reg;
    204 }
    205 
    206 static UINT
    207 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
    208 {
    209   if (reg & align_mask)
    210     {
    211       SIM_DESC sd = CPU_STATE (current_cpu);
    212       switch (STATE_ARCHITECTURE (sd)->mach)
    213 	{
    214 	  /* See comment in check_register_alignment().  */
    215 	case bfd_mach_fr400:
    216 	case bfd_mach_fr450:
    217 	case bfd_mach_fr550:
    218 	  frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
    219 	  break;
    220 	case bfd_mach_frvtomcat:
    221 	case bfd_mach_fr500:
    222 	case bfd_mach_frv:
    223 	  {
    224 	    struct frv_fp_exception_info fp_info = {
    225 	      FSR_NO_EXCEPTION, FTT_INVALID_FR
    226 	    };
    227 	    frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
    228 	  }
    229 	  break;
    230 	default:
    231 	  break;
    232 	}
    233 
    234       reg &= ~align_mask;
    235     }
    236 
    237   return reg;
    238 }
    239 
    240 static UINT
    241 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
    242 {
    243   if (address & align_mask)
    244     {
    245       SIM_DESC sd = CPU_STATE (current_cpu);
    246       switch (STATE_ARCHITECTURE (sd)->mach)
    247 	{
    248 	  /* See comment in check_register_alignment().  */
    249 	case bfd_mach_fr400:
    250 	case bfd_mach_fr450:
    251 	  frv_queue_data_access_error_interrupt (current_cpu, address);
    252 	  break;
    253 	case bfd_mach_frvtomcat:
    254 	case bfd_mach_fr500:
    255 	case bfd_mach_frv:
    256 	  frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
    257 	  break;
    258 	default:
    259 	  break;
    260 	}
    261 
    262       address &= ~align_mask;
    263     }
    264 
    265   return address;
    266 }
    267 
    268 DI
    269 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
    270 {
    271   DI value;
    272 
    273   if (gr == 0)
    274     return 0; /* gr0 is always 0.  */
    275 
    276   /* Check the register alignment.  */
    277   gr = check_register_alignment (current_cpu, gr, 1);
    278 
    279   value = GET_H_GR (gr);
    280   value <<= 32;
    281   value |=  (USI) GET_H_GR (gr + 1);
    282   return value;
    283 }
    284 
    285 void
    286 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
    287 {
    288   if (gr == 0)
    289     return; /* Storing into gr0 has no effect.  */
    290 
    291   /* Check the register alignment.  */
    292   gr = check_register_alignment (current_cpu, gr, 1);
    293 
    294   SET_H_GR (gr    , (newval >> 32) & 0xffffffff);
    295   SET_H_GR (gr + 1, (newval      ) & 0xffffffff);
    296 }
    297 
    298 /* Cover fns to access the floating point register as double words.  */
    300 DF
    301 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
    302 {
    303   union {
    304     SF as_sf[2];
    305     DF as_df;
    306   } value;
    307 
    308   /* Check the register alignment.  */
    309   fr = check_fr_register_alignment (current_cpu, fr, 1);
    310 
    311   if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
    312     {
    313       value.as_sf[1] = GET_H_FR (fr);
    314       value.as_sf[0] = GET_H_FR (fr + 1);
    315     }
    316   else
    317     {
    318       value.as_sf[0] = GET_H_FR (fr);
    319       value.as_sf[1] = GET_H_FR (fr + 1);
    320     }
    321 
    322   return value.as_df;
    323 }
    324 
    325 void
    326 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
    327 {
    328   union {
    329     SF as_sf[2];
    330     DF as_df;
    331   } value;
    332 
    333   /* Check the register alignment.  */
    334   fr = check_fr_register_alignment (current_cpu, fr, 1);
    335 
    336   value.as_df = newval;
    337   if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
    338     {
    339       SET_H_FR (fr    , value.as_sf[1]);
    340       SET_H_FR (fr + 1, value.as_sf[0]);
    341     }
    342   else
    343     {
    344       SET_H_FR (fr    , value.as_sf[0]);
    345       SET_H_FR (fr + 1, value.as_sf[1]);
    346     }
    347 }
    348 
    349 /* Cover fns to access the floating point register as integer words.  */
    351 USI
    352 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
    353 {
    354   union {
    355     SF  as_sf;
    356     USI as_usi;
    357   } value;
    358 
    359   value.as_sf = GET_H_FR (fr);
    360   return value.as_usi;
    361 }
    362 
    363 void
    364 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
    365 {
    366   union {
    367     SF  as_sf;
    368     USI as_usi;
    369   } value;
    370 
    371   value.as_usi = newval;
    372   SET_H_FR (fr, value.as_sf);
    373 }
    374 
    375 /* Cover fns to access the coprocessor registers as double words.  */
    377 DI
    378 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
    379 {
    380   DI value;
    381 
    382   /* Check the register alignment.  */
    383   cpr = check_register_alignment (current_cpu, cpr, 1);
    384 
    385   value = GET_H_CPR (cpr);
    386   value <<= 32;
    387   value |=  (USI) GET_H_CPR (cpr + 1);
    388   return value;
    389 }
    390 
    391 void
    392 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
    393 {
    394   /* Check the register alignment.  */
    395   cpr = check_register_alignment (current_cpu, cpr, 1);
    396 
    397   SET_H_CPR (cpr    , (newval >> 32) & 0xffffffff);
    398   SET_H_CPR (cpr + 1, (newval      ) & 0xffffffff);
    399 }
    400 
    401 /* Cover fns to write registers as quad words.  */
    403 void
    404 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
    405 {
    406   if (gr == 0)
    407     return; /* Storing into gr0 has no effect.  */
    408 
    409   /* Check the register alignment.  */
    410   gr = check_register_alignment (current_cpu, gr, 3);
    411 
    412   SET_H_GR (gr    , newval[0]);
    413   SET_H_GR (gr + 1, newval[1]);
    414   SET_H_GR (gr + 2, newval[2]);
    415   SET_H_GR (gr + 3, newval[3]);
    416 }
    417 
    418 void
    419 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
    420 {
    421   /* Check the register alignment.  */
    422   fr = check_fr_register_alignment (current_cpu, fr, 3);
    423 
    424   SET_H_FR (fr    , newval[0]);
    425   SET_H_FR (fr + 1, newval[1]);
    426   SET_H_FR (fr + 2, newval[2]);
    427   SET_H_FR (fr + 3, newval[3]);
    428 }
    429 
    430 void
    431 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
    432 {
    433   /* Check the register alignment.  */
    434   cpr = check_register_alignment (current_cpu, cpr, 3);
    435 
    436   SET_H_CPR (cpr    , newval[0]);
    437   SET_H_CPR (cpr + 1, newval[1]);
    438   SET_H_CPR (cpr + 2, newval[2]);
    439   SET_H_CPR (cpr + 3, newval[3]);
    440 }
    441 
    442 /* Cover fns to access the special purpose registers.  */
    444 USI
    445 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
    446 {
    447   /* Check access restrictions.  */
    448   frv_check_spr_read_access (current_cpu, spr);
    449 
    450   switch (spr)
    451     {
    452     case H_SPR_PSR:
    453       return spr_psr_get_handler (current_cpu);
    454     case H_SPR_TBR:
    455       return spr_tbr_get_handler (current_cpu);
    456     case H_SPR_BPSR:
    457       return spr_bpsr_get_handler (current_cpu);
    458     case H_SPR_CCR:
    459       return spr_ccr_get_handler (current_cpu);
    460     case H_SPR_CCCR:
    461       return spr_cccr_get_handler (current_cpu);
    462     case H_SPR_SR0:
    463     case H_SPR_SR1:
    464     case H_SPR_SR2:
    465     case H_SPR_SR3:
    466       return spr_sr_get_handler (current_cpu, spr);
    467       break;
    468     default:
    469       return CPU (h_spr[spr]);
    470     }
    471   return 0;
    472 }
    473 
    474 void
    475 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
    476 {
    477   FRV_REGISTER_CONTROL *control;
    478   USI mask;
    479   USI oldval;
    480 
    481   /* Check access restrictions.  */
    482   frv_check_spr_write_access (current_cpu, spr);
    483 
    484   /* Only set those fields which are writeable.  */
    485   control = CPU_REGISTER_CONTROL (current_cpu);
    486   mask = control->spr[spr].read_only_mask;
    487   oldval = GET_H_SPR (spr);
    488 
    489   newval = (newval & ~mask) | (oldval & mask);
    490 
    491   /* Some registers are represented by individual components which are
    492      referenced more often than the register itself.  */
    493   switch (spr)
    494     {
    495     case H_SPR_PSR:
    496       spr_psr_set_handler (current_cpu, newval);
    497       break;
    498     case H_SPR_TBR:
    499       spr_tbr_set_handler (current_cpu, newval);
    500       break;
    501     case H_SPR_BPSR:
    502       spr_bpsr_set_handler (current_cpu, newval);
    503       break;
    504     case H_SPR_CCR:
    505       spr_ccr_set_handler (current_cpu, newval);
    506       break;
    507     case H_SPR_CCCR:
    508       spr_cccr_set_handler (current_cpu, newval);
    509       break;
    510     case H_SPR_SR0:
    511     case H_SPR_SR1:
    512     case H_SPR_SR2:
    513     case H_SPR_SR3:
    514       spr_sr_set_handler (current_cpu, spr, newval);
    515       break;
    516     case H_SPR_IHSR8:
    517       frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
    518       break;
    519     default:
    520       CPU (h_spr[spr]) = newval;
    521       break;
    522     }
    523 }
    524 
    525 /* Cover fns to access the gr_hi and gr_lo registers.  */
    527 UHI
    528 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
    529 {
    530   return (GET_H_GR(gr) >> 16) & 0xffff;
    531 }
    532 
    533 void
    534 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
    535 {
    536   USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
    537   SET_H_GR (gr, value);
    538 }
    539 
    540 UHI
    541 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
    542 {
    543   return GET_H_GR(gr) & 0xffff;
    544 }
    545 
    546 void
    547 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
    548 {
    549   USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
    550   SET_H_GR (gr, value);
    551 }
    552 
    553 /* Cover fns to access the tbr bits.  */
    555 USI
    556 spr_tbr_get_handler (SIM_CPU *current_cpu)
    557 {
    558   int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
    559             ((GET_H_TBR_TT  () &  0xff) <<  4);
    560 
    561   return tbr;
    562 }
    563 
    564 void
    565 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
    566 {
    567   int tbr = newval;
    568 
    569   SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
    570   SET_H_TBR_TT  ((tbr >>  4) & 0xff) ;
    571 }
    572 
    573 /* Cover fns to access the bpsr bits.  */
    575 USI
    576 spr_bpsr_get_handler (SIM_CPU *current_cpu)
    577 {
    578   int bpsr = ((GET_H_BPSR_BS  () & 0x1) << 12) |
    579              ((GET_H_BPSR_BET () & 0x1)      );
    580 
    581   return bpsr;
    582 }
    583 
    584 void
    585 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
    586 {
    587   int bpsr = newval;
    588 
    589   SET_H_BPSR_BS  ((bpsr >> 12) & 1);
    590   SET_H_BPSR_BET ((bpsr      ) & 1);
    591 }
    592 
    593 /* Cover fns to access the psr bits.  */
    595 USI
    596 spr_psr_get_handler (SIM_CPU *current_cpu)
    597 {
    598   int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
    599             ((GET_H_PSR_VER   () & 0xf) << 24) |
    600             ((GET_H_PSR_ICE   () & 0x1) << 16) |
    601             ((GET_H_PSR_NEM   () & 0x1) << 14) |
    602             ((GET_H_PSR_CM    () & 0x1) << 13) |
    603             ((GET_H_PSR_BE    () & 0x1) << 12) |
    604             ((GET_H_PSR_ESR   () & 0x1) << 11) |
    605             ((GET_H_PSR_EF    () & 0x1) <<  8) |
    606             ((GET_H_PSR_EM    () & 0x1) <<  7) |
    607             ((GET_H_PSR_PIL   () & 0xf) <<  3) |
    608             ((GET_H_PSR_S     () & 0x1) <<  2) |
    609             ((GET_H_PSR_PS    () & 0x1) <<  1) |
    610             ((GET_H_PSR_ET    () & 0x1)      );
    611 
    612   return psr;
    613 }
    614 
    615 void
    616 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
    617 {
    618   /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
    619      first.  */
    620   SET_H_PSR_S ((newval >>  2) & 1);
    621 
    622   SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
    623   SET_H_PSR_VER   ((newval >> 24) & 0xf);
    624   SET_H_PSR_ICE   ((newval >> 16) & 1);
    625   SET_H_PSR_NEM   ((newval >> 14) & 1);
    626   SET_H_PSR_CM    ((newval >> 13) & 1);
    627   SET_H_PSR_BE    ((newval >> 12) & 1);
    628   SET_H_PSR_ESR   ((newval >> 11) & 1);
    629   SET_H_PSR_EF    ((newval >>  8) & 1);
    630   SET_H_PSR_EM    ((newval >>  7) & 1);
    631   SET_H_PSR_PIL   ((newval >>  3) & 0xf);
    632   SET_H_PSR_PS    ((newval >>  1) & 1);
    633   SET_H_PSR_ET    ((newval      ) & 1);
    634 }
    635 
    636 void
    637 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
    638 {
    639   /* If switching from user to supervisor mode, or vice-versa, then switch
    640      the supervisor/user context.  */
    641   int psr_s = GET_H_PSR_S ();
    642   if (psr_s != (newval & 1))
    643     {
    644       frvbf_switch_supervisor_user_context (current_cpu);
    645       CPU (h_psr_s) = newval & 1;
    646     }
    647 }
    648 
    649 /* Cover fns to access the ccr bits.  */
    651 USI
    652 spr_ccr_get_handler (SIM_CPU *current_cpu)
    653 {
    654   int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
    655             ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
    656             ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
    657             ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
    658             ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
    659             ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) <<  8) |
    660             ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) <<  4) |
    661             ((GET_H_FCCR (H_FCCR_FCC0) & 0xf)      );
    662 
    663   return ccr;
    664 }
    665 
    666 void
    667 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
    668 {
    669   int ccr = newval;
    670 
    671   SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
    672   SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
    673   SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
    674   SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
    675   SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
    676   SET_H_FCCR (H_FCCR_FCC2, (newval >>  8) & 0xf);
    677   SET_H_FCCR (H_FCCR_FCC1, (newval >>  4) & 0xf);
    678   SET_H_FCCR (H_FCCR_FCC0, (newval      ) & 0xf);
    679 }
    680 
    681 QI
    683 frvbf_set_icc_for_shift_right (
    684   SIM_CPU *current_cpu, SI value, SI shift, QI icc
    685 )
    686 {
    687   /* Set the C flag of the given icc to the logical OR of the bits shifted
    688      out.  */
    689   int mask = (1 << shift) - 1;
    690   if ((value & mask) != 0)
    691     return icc | 0x1;
    692 
    693   return icc & 0xe;
    694 }
    695 
    696 QI
    697 frvbf_set_icc_for_shift_left (
    698   SIM_CPU *current_cpu, SI value, SI shift, QI icc
    699 )
    700 {
    701   /* Set the V flag of the given icc to the logical OR of the bits shifted
    702      out.  */
    703   int mask = ((1 << shift) - 1) << (32 - shift);
    704   if ((value & mask) != 0)
    705     return icc | 0x2;
    706 
    707   return icc & 0xd;
    708 }
    709 
    710 /* Cover fns to access the cccr bits.  */
    712 USI
    713 spr_cccr_get_handler (SIM_CPU *current_cpu)
    714 {
    715   int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
    716              ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
    717              ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
    718              ((GET_H_CCCR (H_CCCR_CC4) & 0x3) <<  8) |
    719              ((GET_H_CCCR (H_CCCR_CC3) & 0x3) <<  6) |
    720              ((GET_H_CCCR (H_CCCR_CC2) & 0x3) <<  4) |
    721              ((GET_H_CCCR (H_CCCR_CC1) & 0x3) <<  2) |
    722              ((GET_H_CCCR (H_CCCR_CC0) & 0x3)      );
    723 
    724   return cccr;
    725 }
    726 
    727 void
    728 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
    729 {
    730   int cccr = newval;
    731 
    732   SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
    733   SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
    734   SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
    735   SET_H_CCCR (H_CCCR_CC4, (newval >>  8) & 0x3);
    736   SET_H_CCCR (H_CCCR_CC3, (newval >>  6) & 0x3);
    737   SET_H_CCCR (H_CCCR_CC2, (newval >>  4) & 0x3);
    738   SET_H_CCCR (H_CCCR_CC1, (newval >>  2) & 0x3);
    739   SET_H_CCCR (H_CCCR_CC0, (newval      ) & 0x3);
    740 }
    741 
    742 /* Cover fns to access the sr bits.  */
    744 USI
    745 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
    746 {
    747   /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
    748      otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3.  */
    749   int psr_esr = GET_H_PSR_ESR ();
    750   if (! psr_esr)
    751     return GET_H_GR (4 + (spr - H_SPR_SR0));
    752 
    753   return CPU (h_spr[spr]);
    754 }
    755 
    756 void
    757 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
    758 {
    759   /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
    760      otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3.  */
    761   int psr_esr = GET_H_PSR_ESR ();
    762   if (! psr_esr)
    763     SET_H_GR (4 + (spr - H_SPR_SR0), newval);
    764   else
    765     CPU (h_spr[spr]) = newval;
    766 }
    767 
    768 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set.  */
    770 void
    771 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
    772 {
    773   if (GET_H_PSR_ESR ())
    774     {
    775       /* We need to be in supervisor mode to swap the registers. Access the
    776 	 PSR.S directly in order to avoid recursive context switches.  */
    777       int i;
    778       int save_psr_s = CPU (h_psr_s);
    779       CPU (h_psr_s) = 1;
    780       for (i = 0; i < 4; ++i)
    781 	{
    782 	  int gr = i + 4;
    783 	  int spr = i + H_SPR_SR0;
    784 	  SI tmp = GET_H_SPR (spr);
    785 	  SET_H_SPR (spr, GET_H_GR (gr));
    786 	  SET_H_GR (gr, tmp);
    787 	}
    788       CPU (h_psr_s) = save_psr_s;
    789     }
    790 }
    791 
    792 /* Handle load/store of quad registers.  */
    794 void
    795 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
    796 {
    797   int i;
    798   SI value[4];
    799 
    800   /* Check memory alignment */
    801   address = check_memory_alignment (current_cpu, address, 0xf);
    802 
    803   /* If we need to count cycles, then the cache operation will be
    804      initiated from the model profiling functions.
    805      See frvbf_model_....  */
    806   if (model_insn)
    807     {
    808       CPU_LOAD_ADDRESS (current_cpu) = address;
    809       CPU_LOAD_LENGTH (current_cpu) = 16;
    810     }
    811   else
    812     {
    813       for (i = 0; i < 4; ++i)
    814 	{
    815 	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
    816 	  address += 4;
    817 	}
    818       sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
    819 			     value);
    820     }
    821 }
    822 
    823 void
    824 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
    825 {
    826   int i;
    827   SI value[4];
    828   USI hsr0;
    829 
    830   /* Check register and memory alignment.  */
    831   src_ix = check_register_alignment (current_cpu, src_ix, 3);
    832   address = check_memory_alignment (current_cpu, address, 0xf);
    833 
    834   for (i = 0; i < 4; ++i)
    835     {
    836       /* GR0 is always 0.  */
    837       if (src_ix == 0)
    838 	value[i] = 0;
    839       else
    840 	value[i] = GET_H_GR (src_ix + i);
    841     }
    842   hsr0 = GET_HSR0 ();
    843   if (GET_HSR0_DCE (hsr0))
    844     sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
    845   else
    846     sim_queue_mem_xi_write (current_cpu, address, value);
    847 }
    848 
    849 void
    850 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
    851 {
    852   int i;
    853   SI value[4];
    854 
    855   /* Check memory alignment */
    856   address = check_memory_alignment (current_cpu, address, 0xf);
    857 
    858   /* If we need to count cycles, then the cache operation will be
    859      initiated from the model profiling functions.
    860      See frvbf_model_....  */
    861   if (model_insn)
    862     {
    863       CPU_LOAD_ADDRESS (current_cpu) = address;
    864       CPU_LOAD_LENGTH (current_cpu) = 16;
    865     }
    866   else
    867     {
    868       for (i = 0; i < 4; ++i)
    869 	{
    870 	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
    871 	  address += 4;
    872 	}
    873       sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
    874 			     value);
    875     }
    876 }
    877 
    878 void
    879 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
    880 {
    881   int i;
    882   SI value[4];
    883   USI hsr0;
    884 
    885   /* Check register and memory alignment.  */
    886   src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
    887   address = check_memory_alignment (current_cpu, address, 0xf);
    888 
    889   for (i = 0; i < 4; ++i)
    890     value[i] = GET_H_FR (src_ix + i);
    891 
    892   hsr0 = GET_HSR0 ();
    893   if (GET_HSR0_DCE (hsr0))
    894     sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
    895   else
    896     sim_queue_mem_xi_write (current_cpu, address, value);
    897 }
    898 
    899 void
    900 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
    901 {
    902   int i;
    903   SI value[4];
    904 
    905   /* Check memory alignment */
    906   address = check_memory_alignment (current_cpu, address, 0xf);
    907 
    908   /* If we need to count cycles, then the cache operation will be
    909      initiated from the model profiling functions.
    910      See frvbf_model_....  */
    911   if (model_insn)
    912     {
    913       CPU_LOAD_ADDRESS (current_cpu) = address;
    914       CPU_LOAD_LENGTH (current_cpu) = 16;
    915     }
    916   else
    917     {
    918       for (i = 0; i < 4; ++i)
    919 	{
    920 	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
    921 	  address += 4;
    922 	}
    923       sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
    924 			     value);
    925     }
    926 }
    927 
    928 void
    929 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
    930 {
    931   int i;
    932   SI value[4];
    933   USI hsr0;
    934 
    935   /* Check register and memory alignment.  */
    936   src_ix = check_register_alignment (current_cpu, src_ix, 3);
    937   address = check_memory_alignment (current_cpu, address, 0xf);
    938 
    939   for (i = 0; i < 4; ++i)
    940     value[i] = GET_H_CPR (src_ix + i);
    941 
    942   hsr0 = GET_HSR0 ();
    943   if (GET_HSR0_DCE (hsr0))
    944     sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
    945   else
    946     sim_queue_mem_xi_write (current_cpu, address, value);
    947 }
    948 
    949 void
    951 frvbf_signed_integer_divide (
    952   SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
    953 )
    954 {
    955   enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
    956   if (arg1 == 0x80000000 && arg2 == -1)
    957     {
    958       /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
    959 	 otherwise it may result in 0x7fffffff (sparc compatibility) or
    960 	 0x80000000 (C language compatibility). */
    961       USI isr;
    962       dtt = FRV_DTT_OVERFLOW;
    963 
    964       isr = GET_ISR ();
    965       if (GET_ISR_EDE (isr))
    966 	sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
    967 			       0x7fffffff);
    968       else
    969 	sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
    970 			       0x80000000);
    971       frvbf_force_update (current_cpu); /* Force update of target register.  */
    972     }
    973   else if (arg2 == 0)
    974     dtt = FRV_DTT_DIVISION_BY_ZERO;
    975   else
    976     sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
    977 			   arg1 / arg2);
    978 
    979   /* Check for exceptions.  */
    980   if (dtt != FRV_DTT_NO_EXCEPTION)
    981     dtt = frvbf_division_exception (current_cpu, dtt, target_index,
    982 				    non_excepting);
    983   if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
    984     {
    985       /* Non excepting instruction. Clear the NE flag for the target
    986 	 register.  */
    987       SI NE_flags[2];
    988       GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
    989       CLEAR_NE_FLAG (NE_flags, target_index);
    990       SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
    991     }
    992 }
    993 
    994 void
    995 frvbf_unsigned_integer_divide (
    996   SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
    997 )
    998 {
    999   if (arg2 == 0)
   1000     frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
   1001 			      target_index, non_excepting);
   1002   else
   1003     {
   1004       sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
   1005 			     arg1 / arg2);
   1006       if (non_excepting)
   1007 	{
   1008 	  /* Non excepting instruction. Clear the NE flag for the target
   1009 	     register.  */
   1010 	  SI NE_flags[2];
   1011 	  GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
   1012 	  CLEAR_NE_FLAG (NE_flags, target_index);
   1013 	  SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
   1014 	}
   1015     }
   1016 }
   1017 
   1018 /* Clear accumulators.  */
   1020 void
   1021 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
   1022 {
   1023   SIM_DESC sd = CPU_STATE (current_cpu);
   1024   int acc_mask =
   1025     (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
   1026     (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
   1027     (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
   1028     (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
   1029     63;
   1030   FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
   1031 
   1032   ps->mclracc_acc = acc_ix;
   1033   ps->mclracc_A   = A;
   1034   if (A == 0 || acc_ix != 0) /* Clear 1 accumuator?  */
   1035     {
   1036       /* This instruction is a nop if the referenced accumulator is not
   1037 	 implemented. */
   1038       if ((acc_ix & acc_mask) == acc_ix)
   1039 	sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
   1040     }
   1041   else
   1042     {
   1043       /* Clear all implemented accumulators.  */
   1044       int i;
   1045       for (i = 0; i <= acc_mask; ++i)
   1046 	if ((i & acc_mask) == i)
   1047 	  sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
   1048     }
   1049 }
   1050 
   1051 /* Functions to aid insn semantics.  */
   1053 
   1054 /* Compute the result of the SCAN and SCANI insns after the shift and xor.  */
   1055 SI
   1056 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
   1057 {
   1058   SI i;
   1059   SI mask;
   1060 
   1061   if (value == 0)
   1062     return 63;
   1063 
   1064   /* Find the position of the first non-zero bit.
   1065      The loop will terminate since there is guaranteed to be at least one
   1066      non-zero bit.  */
   1067   mask = 1 << (sizeof (mask) * 8 - 1);
   1068   for (i = 0; (value & mask) == 0; ++i)
   1069     value <<= 1;
   1070 
   1071   return i;
   1072 }
   1073 
   1074 /* Compute the result of the cut insns.  */
   1075 SI
   1076 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
   1077 {
   1078   SI result;
   1079   cut_point &= 0x3f;
   1080   if (cut_point < 32)
   1081     {
   1082       result = reg1 << cut_point;
   1083       result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
   1084     }
   1085   else
   1086     result = reg2 << (cut_point - 32);
   1087 
   1088   return result;
   1089 }
   1090 
   1091 /* Compute the result of the cut insns.  */
   1092 SI
   1093 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
   1094 {
   1095   /* The cut point is the lower 6 bits (signed) of what we are passed.  */
   1096   cut_point = cut_point << 26 >> 26;
   1097 
   1098   /* The cut_point is relative to bit 40 of 64 bits.  */
   1099   if (cut_point >= 0)
   1100     return (acc << (cut_point + 24)) >> 32;
   1101 
   1102   /* Extend the sign bit (bit 40) for negative cuts.  */
   1103   if (cut_point == -32)
   1104     return (acc << 24) >> 63; /* Special case for full shiftout.  */
   1105 
   1106   return (acc << 24) >> (32 + -cut_point);
   1107 }
   1108 
   1109 /* Compute the result of the cut insns.  */
   1110 SI
   1111 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
   1112 {
   1113   /* The cut point is the lower 6 bits (signed) of what we are passed.  */
   1114   cut_point = cut_point << 26 >> 26;
   1115 
   1116   if (cut_point >= 0)
   1117     {
   1118       /* The cut_point is relative to bit 40 of 64 bits.  */
   1119       DI shifted = acc << (cut_point + 24);
   1120       DI unshifted = shifted >> (cut_point + 24);
   1121 
   1122       /* The result will be saturated if significant bits are shifted out.  */
   1123       if (unshifted != acc)
   1124 	{
   1125 	  if (acc < 0)
   1126 	    return 0x80000000;
   1127 	  return 0x7fffffff;
   1128 	}
   1129     }
   1130 
   1131   /* The result will not be saturated, so use the code for the normal cut.  */
   1132   return frvbf_media_cut (current_cpu, acc, cut_point);
   1133 }
   1134 
   1135 /* Compute the result of int accumulator cut (SCUTSS).  */
   1136 SI
   1137 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
   1138 {
   1139   DI lower, upper;
   1140 
   1141   /* The cut point is the lower 7 bits (signed) of what we are passed.  */
   1142   cut_point = cut_point << 25 >> 25;
   1143 
   1144   /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
   1145      The top bit of the return value corresponds to bit (63 - CUT_POINT)
   1146      of this 128-bit value.
   1147 
   1148      Since we can't deal with 128-bit values very easily, convert the
   1149      operation into an equivalent 64-bit one.  */
   1150   if (cut_point < 0)
   1151     {
   1152       /* Avoid an undefined shift operation.  */
   1153       if (cut_point == -64)
   1154 	acc >>= 63;
   1155       else
   1156 	acc >>= -cut_point;
   1157       cut_point = 0;
   1158     }
   1159 
   1160   /* Get the shifted but unsaturated result.  Set LOWER to the lowest
   1161      32 bits of the result and UPPER to the result >> 31.  */
   1162   if (cut_point < 32)
   1163     {
   1164       /* The cut loses the (32 - CUT_POINT) least significant bits.
   1165 	 Round the result up if the most significant of these lost bits
   1166 	 is 1.  */
   1167       lower = acc >> (32 - cut_point);
   1168       if (lower < 0x7fffffff)
   1169 	if (acc & LSBIT64 (32 - cut_point - 1))
   1170 	  lower++;
   1171       upper = lower >> 31;
   1172     }
   1173   else
   1174     {
   1175       lower = acc << (cut_point - 32);
   1176       upper = acc >> (63 - cut_point);
   1177     }
   1178 
   1179   /* Saturate the result.  */
   1180   if (upper < -1)
   1181     return ~0x7fffffff;
   1182   else if (upper > 0)
   1183     return 0x7fffffff;
   1184   else
   1185     return lower;
   1186 }
   1187 
   1188 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS).  */
   1189 SI
   1190 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
   1191 {
   1192   int neg_arg1;
   1193 
   1194   /* FIXME: what to do with negative shift amt?  */
   1195   if (arg2 <= 0)
   1196     return arg1;
   1197 
   1198   if (arg1 == 0)
   1199     return 0;
   1200 
   1201   /* Signed shift by 31 or greater saturates by definition.  */
   1202   if (arg2 >= 31)
   1203     if (arg1 > 0)
   1204       return (SI) 0x7fffffff;
   1205     else
   1206       return (SI) 0x80000000;
   1207 
   1208   /* OK, arg2 is between 1 and 31.  */
   1209   neg_arg1 = (arg1 < 0);
   1210   do {
   1211     arg1 <<= 1;
   1212     /* Check for sign bit change (saturation).  */
   1213     if (neg_arg1 && (arg1 >= 0))
   1214       return (SI) 0x80000000;
   1215     else if (!neg_arg1 && (arg1 < 0))
   1216       return (SI) 0x7fffffff;
   1217   } while (--arg2 > 0);
   1218 
   1219   return arg1;
   1220 }
   1221 
   1222 /* Simulate the media custom insns.  */
   1223 void
   1224 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
   1225 {
   1226   /* The semantics of the insn are a nop, since it is implementation defined.
   1227      We do need to check whether it's implemented and set up for MTRAP
   1228      if it's not.  */
   1229   USI msr0 = GET_MSR (0);
   1230   if (GET_MSR_EMCI (msr0) == 0)
   1231     {
   1232       /* no interrupt queued at this time.  */
   1233       frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
   1234     }
   1235 }
   1236 
   1237 /* Simulate the media average (MAVEH) insn.  */
   1238 static HI
   1239 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
   1240 {
   1241   SIM_DESC sd = CPU_STATE (current_cpu);
   1242   SI sum = (arg1 + arg2);
   1243   HI result = sum >> 1;
   1244   int rounding_value;
   1245 
   1246   /* On fr4xx and fr550, check the rounding mode.  On other machines
   1247      rounding is always toward negative infinity and the result is
   1248      already correctly rounded.  */
   1249   switch (STATE_ARCHITECTURE (sd)->mach)
   1250     {
   1251       /* Need to check rounding mode. */
   1252     case bfd_mach_fr400:
   1253     case bfd_mach_fr450:
   1254     case bfd_mach_fr550:
   1255       /* Check whether rounding will be required.  Rounding will be required
   1256 	 if the sum is an odd number.  */
   1257       rounding_value = sum & 1;
   1258       if (rounding_value)
   1259 	{
   1260 	  USI msr0 = GET_MSR (0);
   1261 	  /* Check MSR0.SRDAV to determine which bits control the rounding.  */
   1262 	  if (GET_MSR_SRDAV (msr0))
   1263 	    {
   1264 	      /* MSR0.RD controls rounding.  */
   1265 	      switch (GET_MSR_RD (msr0))
   1266 		{
   1267 		case 0:
   1268 		  /* Round to nearest.  */
   1269 		  if (result >= 0)
   1270 		    ++result;
   1271 		  break;
   1272 		case 1:
   1273 		  /* Round toward 0. */
   1274 		  if (result < 0)
   1275 		    ++result;
   1276 		  break;
   1277 		case 2:
   1278 		  /* Round toward positive infinity.  */
   1279 		  ++result;
   1280 		  break;
   1281 		case 3:
   1282 		  /* Round toward negative infinity.  The result is already
   1283 		     correctly rounded.  */
   1284 		  break;
   1285 		default:
   1286 		  abort ();
   1287 		  break;
   1288 		}
   1289 	    }
   1290 	  else
   1291 	    {
   1292 	      /* MSR0.RDAV controls rounding.  If set, round toward positive
   1293 		 infinity.  Otherwise the result is already rounded correctly
   1294 		 toward negative infinity.  */
   1295 	      if (GET_MSR_RDAV (msr0))
   1296 		++result;
   1297 	    }
   1298 	}
   1299       break;
   1300     default:
   1301       break;
   1302     }
   1303 
   1304   return result;
   1305 }
   1306 
   1307 SI
   1308 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
   1309 {
   1310   SI result;
   1311   result  = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
   1312   result &= 0xffff;
   1313   result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
   1314 			      (reg2 >> 16) & 0xffff) << 16;
   1315   return result;
   1316 }
   1317 
   1318 /* Maintain a flag in order to know when to write the address of the next
   1319    VLIW instruction into the LR register.  Used by JMPL. JMPIL, and CALL.  */
   1320 void
   1321 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
   1322 {
   1323   frvbf_write_next_vliw_addr_to_LR = value;
   1324 }
   1325 
   1326 void
   1327 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
   1328 {
   1329   USI NE_flags[2];
   1330 
   1331   /* Save the target register so interrupt processing can set its NE flag
   1332      in the event of an exception.  */
   1333   frv_interrupt_state.ne_index = index;
   1334 
   1335   /* Clear the NE flag of the target register. It will be reset if necessary
   1336      in the event of an exception.  */
   1337   GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
   1338   CLEAR_NE_FLAG (NE_flags, index);
   1339   SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
   1340 }
   1341 
   1342 void
   1343 frvbf_force_update (SIM_CPU *current_cpu)
   1344 {
   1345   CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
   1346   int ix = CGEN_WRITE_QUEUE_INDEX (q);
   1347   if (ix > 0)
   1348     {
   1349       CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
   1350       item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
   1351     }
   1352 }
   1353 
   1354 /* Condition code logic.  */
   1356 enum cr_ops {
   1357   andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
   1358   num_cr_ops
   1359 };
   1360 
   1361 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
   1362 
   1363 static enum cr_result
   1364 cr_logic[num_cr_ops][4][4] = {
   1365   /* andcr */
   1366   {
   1367     /*                undefined     undefined       false         true */
   1368     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1369     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1370     /* false     */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1371     /* true      */ {cr_undefined, cr_undefined, cr_false,     cr_true     }
   1372   },
   1373   /* orcr */
   1374   {
   1375     /*                undefined     undefined       false         true */
   1376     /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1377     /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1378     /* false     */ {cr_false,     cr_false,     cr_false,     cr_true     },
   1379     /* true      */ {cr_true,      cr_true,      cr_true,      cr_true     }
   1380   },
   1381   /* xorcr */
   1382   {
   1383     /*                undefined     undefined       false         true */
   1384     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1385     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1386     /* false     */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1387     /* true      */ {cr_true,      cr_true,      cr_true,      cr_false    }
   1388   },
   1389   /* nandcr */
   1390   {
   1391     /*                undefined     undefined       false         true */
   1392     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1393     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1394     /* false     */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1395     /* true      */ {cr_undefined, cr_undefined, cr_true,      cr_false    }
   1396   },
   1397   /* norcr */
   1398   {
   1399     /*                undefined     undefined       false         true */
   1400     /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
   1401     /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
   1402     /* false     */ {cr_true,      cr_true,      cr_true,      cr_false    },
   1403     /* true      */ {cr_false,     cr_false,     cr_false,     cr_false    }
   1404   },
   1405   /* andncr */
   1406   {
   1407     /*                undefined     undefined       false         true */
   1408     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1409     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1410     /* false     */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1411     /* true      */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
   1412   },
   1413   /* orncr */
   1414   {
   1415     /*                undefined     undefined       false         true */
   1416     /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1417     /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
   1418     /* false     */ {cr_true,      cr_true,      cr_true,      cr_true     },
   1419     /* true      */ {cr_false,     cr_false,     cr_false,     cr_true     }
   1420   },
   1421   /* nandncr */
   1422   {
   1423     /*                undefined     undefined       false         true */
   1424     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1425     /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
   1426     /* false     */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
   1427     /* true      */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
   1428   },
   1429   /* norncr */
   1430   {
   1431     /*                undefined     undefined       false         true */
   1432     /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
   1433     /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
   1434     /* false     */ {cr_false,     cr_false,     cr_false,     cr_false    },
   1435     /* true      */ {cr_true,      cr_true,      cr_true,      cr_false    }
   1436   }
   1437 };
   1438 
   1439 UQI
   1440 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
   1441 {
   1442   return cr_logic[operation][arg1][arg2];
   1443 }
   1444 
   1445 /* Cache Manipulation.  */
   1447 void
   1448 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
   1449 {
   1450   /* If we need to count cycles, then the cache operation will be
   1451      initiated from the model profiling functions.
   1452      See frvbf_model_....  */
   1453   int hsr0 = GET_HSR0 ();
   1454   if (GET_HSR0_ICE (hsr0))
   1455     {
   1456       if (model_insn)
   1457 	{
   1458 	  CPU_LOAD_ADDRESS (current_cpu) = address;
   1459 	  CPU_LOAD_LENGTH (current_cpu) = length;
   1460 	  CPU_LOAD_LOCK (current_cpu) = lock;
   1461 	}
   1462       else
   1463 	{
   1464 	  FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
   1465 	  frv_cache_preload (cache, address, length, lock);
   1466 	}
   1467     }
   1468 }
   1469 
   1470 void
   1471 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
   1472 {
   1473   /* If we need to count cycles, then the cache operation will be
   1474      initiated from the model profiling functions.
   1475      See frvbf_model_....  */
   1476   int hsr0 = GET_HSR0 ();
   1477   if (GET_HSR0_DCE (hsr0))
   1478     {
   1479       if (model_insn)
   1480 	{
   1481 	  CPU_LOAD_ADDRESS (current_cpu) = address;
   1482 	  CPU_LOAD_LENGTH (current_cpu) = length;
   1483 	  CPU_LOAD_LOCK (current_cpu) = lock;
   1484 	}
   1485       else
   1486 	{
   1487 	  FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
   1488 	  frv_cache_preload (cache, address, length, lock);
   1489 	}
   1490     }
   1491 }
   1492 
   1493 void
   1494 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
   1495 {
   1496   /* If we need to count cycles, then the cache operation will be
   1497      initiated from the model profiling functions.
   1498      See frvbf_model_....  */
   1499   int hsr0 = GET_HSR0 ();
   1500   if (GET_HSR0_ICE (hsr0))
   1501     {
   1502       if (model_insn)
   1503 	CPU_LOAD_ADDRESS (current_cpu) = address;
   1504       else
   1505 	{
   1506 	  FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
   1507 	  frv_cache_unlock (cache, address);
   1508 	}
   1509     }
   1510 }
   1511 
   1512 void
   1513 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
   1514 {
   1515   /* If we need to count cycles, then the cache operation will be
   1516      initiated from the model profiling functions.
   1517      See frvbf_model_....  */
   1518   int hsr0 = GET_HSR0 ();
   1519   if (GET_HSR0_DCE (hsr0))
   1520     {
   1521       if (model_insn)
   1522 	CPU_LOAD_ADDRESS (current_cpu) = address;
   1523       else
   1524 	{
   1525 	  FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
   1526 	  frv_cache_unlock (cache, address);
   1527 	}
   1528     }
   1529 }
   1530 
   1531 void
   1532 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
   1533 {
   1534   /* Make sure the insn was specified properly.  -1 will be passed for ALL
   1535      for a icei with A=0.  */
   1536   if (all == -1)
   1537     {
   1538       frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
   1539       return;
   1540     }
   1541 
   1542   /* If we need to count cycles, then the cache operation will be
   1543      initiated from the model profiling functions.
   1544      See frvbf_model_....  */
   1545   if (model_insn)
   1546     {
   1547       /* Record the all-entries flag for use in profiling.  */
   1548       FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
   1549       ps->all_cache_entries = all;
   1550       CPU_LOAD_ADDRESS (current_cpu) = address;
   1551     }
   1552   else
   1553     {
   1554       FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
   1555       if (all)
   1556 	frv_cache_invalidate_all (cache, 0/* flush? */);
   1557       else
   1558 	frv_cache_invalidate (cache, address, 0/* flush? */);
   1559     }
   1560 }
   1561 
   1562 void
   1563 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
   1564 {
   1565   /* Make sure the insn was specified properly.  -1 will be passed for ALL
   1566      for a dcei with A=0.  */
   1567   if (all == -1)
   1568     {
   1569       frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
   1570       return;
   1571     }
   1572 
   1573   /* If we need to count cycles, then the cache operation will be
   1574      initiated from the model profiling functions.
   1575      See frvbf_model_....  */
   1576   if (model_insn)
   1577     {
   1578       /* Record the all-entries flag for use in profiling.  */
   1579       FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
   1580       ps->all_cache_entries = all;
   1581       CPU_LOAD_ADDRESS (current_cpu) = address;
   1582     }
   1583   else
   1584     {
   1585       FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
   1586       if (all)
   1587 	frv_cache_invalidate_all (cache, 0/* flush? */);
   1588       else
   1589 	frv_cache_invalidate (cache, address, 0/* flush? */);
   1590     }
   1591 }
   1592 
   1593 void
   1594 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
   1595 {
   1596   /* Make sure the insn was specified properly.  -1 will be passed for ALL
   1597      for a dcef with A=0.  */
   1598   if (all == -1)
   1599     {
   1600       frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
   1601       return;
   1602     }
   1603 
   1604   /* If we need to count cycles, then the cache operation will be
   1605      initiated from the model profiling functions.
   1606      See frvbf_model_....  */
   1607   if (model_insn)
   1608     {
   1609       /* Record the all-entries flag for use in profiling.  */
   1610       FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
   1611       ps->all_cache_entries = all;
   1612       CPU_LOAD_ADDRESS (current_cpu) = address;
   1613     }
   1614   else
   1615     {
   1616       FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
   1617       if (all)
   1618 	frv_cache_invalidate_all (cache, 1/* flush? */);
   1619       else
   1620 	frv_cache_invalidate (cache, address, 1/* flush? */);
   1621     }
   1622 }
   1623