Home | History | Annotate | Line # | Download | only in frv
      1 # Simulator main loop for frv. -*- C -*-
      2 # Copyright (C) 1998-2024 Free Software Foundation, Inc.
      3 # Contributed by Red Hat.
      4 #
      5 # This file is part of the GNU Simulators.
      6 #
      7 # This program is free software; you can redistribute it and/or modify
      8 # it under the terms of the GNU General Public License as published by
      9 # the Free Software Foundation; either version 3 of the License, or
     10 # (at your option) any later version.
     11 #
     12 # This program is distributed in the hope that it will be useful,
     13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 # GNU General Public License for more details.
     16 #
     17 # You should have received a copy of the GNU General Public License
     18 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
     19 
     20 # Syntax:
     21 # /bin/sh mainloop.in command
     22 #
     23 # Command is one of:
     24 #
     25 # init
     26 # support
     27 # extract-{simple,scache,pbb}
     28 # {full,fast}-exec-{simple,scache,pbb}
     29 #
     30 # A target need only provide a "full" version of one of simple,scache,pbb.
     31 # If the target wants it can also provide a fast version of same.
     32 # It can't provide more than this.
     33 
     34 # ??? After a few more ports are done, revisit.
     35 # Will eventually need to machine generate a lot of this.
     36 
     37 case "x$1" in
     38 
     39 xsupport)
     40 
     41 cat <<EOF
     42 #line $LINENO "$0"
     43 
     44 static INLINE const IDESC *
     45 extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
     46          int fast_p)
     47 {
     48   const IDESC *id = @cpu@_decode (current_cpu, pc, insn, insn, abuf);
     49   @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
     50   if (! fast_p)
     51     {
     52       int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
     53       int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
     54       @cpu@_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
     55     }
     56   return id;
     57 }
     58 
     59 static INLINE SEM_PC
     60 execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
     61 {
     62   SEM_PC vpc;
     63 
     64   /* Force gr0 to zero before every insn.  */
     65   @cpu@_h_gr_set (current_cpu, 0, 0);
     66 
     67   if (fast_p)
     68     {
     69       vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
     70     }
     71   else
     72     {
     73       ARGBUF *abuf = &sc->argbuf;
     74       const IDESC *idesc = abuf->idesc;
     75 #if WITH_SCACHE_PBB
     76       int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
     77 #else
     78       int virtual_p = 0;
     79 #endif
     80 
     81       if (! virtual_p)
     82 	{
     83 	  /* FIXME: call x-before */
     84 	  if (ARGBUF_PROFILE_P (abuf))
     85 	    PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
     86 	  /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}.  */
     87 	  if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf)))
     88 	    {
     89 	      @cpu@_model_insn_before (current_cpu, sc->first_insn_p);
     90 	      model_insn = FRV_INSN_MODEL_PASS_1;
     91 	      if (idesc->timing->model_fn != NULL)
     92 		(*idesc->timing->model_fn) (current_cpu, sc);
     93 	    }
     94 	  else
     95 	    model_insn = FRV_INSN_NO_MODELING;
     96 	  CGEN_TRACE_INSN_INIT (current_cpu, abuf, 1);
     97 	  CGEN_TRACE_INSN (current_cpu, idesc->idata,
     98 		      (const struct argbuf *) abuf, abuf->addr);
     99 	}
    100 #if WITH_SCACHE
    101       vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
    102 #else
    103       vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
    104 #endif
    105       if (! virtual_p)
    106 	{
    107 	  /* FIXME: call x-after */
    108 	  if (FRV_COUNT_CYCLES (current_cpu, ARGBUF_PROFILE_P (abuf)))
    109 	    {
    110 	      int cycles;
    111 	      if (idesc->timing->model_fn != NULL)
    112 		{
    113 		  model_insn = FRV_INSN_MODEL_PASS_2;
    114 		  cycles = (*idesc->timing->model_fn) (current_cpu, sc);
    115 		}
    116 	      else
    117 		cycles = 1;
    118 	      @cpu@_model_insn_after (current_cpu, sc->last_insn_p, cycles);
    119 	    }
    120 	  CGEN_TRACE_INSN_FINI (current_cpu, abuf, 1);
    121 	}
    122     }
    123 
    124   return vpc;
    125 }
    126 
    127 static void
    128 @cpu@_parallel_write_init (SIM_CPU *current_cpu)
    129 {
    130   CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
    131   CGEN_WRITE_QUEUE_CLEAR (q);
    132   previous_vliw_pc = CPU_PC_GET(current_cpu);
    133   frv_interrupt_state.f_ne_flags[0] = 0;
    134   frv_interrupt_state.f_ne_flags[1] = 0;
    135   frv_interrupt_state.imprecise_interrupt = NULL;
    136 }
    137 
    138 static void
    139 @cpu@_parallel_write_queued (SIM_CPU *current_cpu)
    140 {
    141   int i;
    142 
    143   CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
    144 
    145   /* Loop over the queued writes, executing them. Set the pc to the address
    146      of the insn which queued each write for the proper context in case an
    147      interrupt is caused. Restore the proper pc after the writes are
    148      completed.  */
    149   IADDR save_pc = CPU_PC_GET (current_cpu);
    150   IADDR new_pc  = save_pc;
    151   int branch_taken = 0;
    152   int limit = CGEN_WRITE_QUEUE_INDEX (q);
    153   frv_interrupt_state.data_written.length = 0;
    154 
    155   for (i = 0; i < limit; ++i)
    156     {
    157       CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, i);
    158 
    159       /* If an imprecise interrupt was generated, then, check whether the
    160 	 result should still be written.  */
    161       if (frv_interrupt_state.imprecise_interrupt != NULL)
    162 	{
    163 	  /* Only check writes by the insn causing the exception.  */
    164 	  if (CGEN_WRITE_QUEUE_ELEMENT_IADDR (item)
    165 	      == frv_interrupt_state.imprecise_interrupt->vpc)
    166 	    {
    167 	      /* Execute writes of floating point operations resulting in
    168 		 overflow, underflow or inexact.  */
    169 	      if (frv_interrupt_state.imprecise_interrupt->kind
    170 		  == FRV_FP_EXCEPTION)
    171 		{
    172 		  if ((frv_interrupt_state.imprecise_interrupt
    173 		       ->u.fp_info.fsr_mask
    174 		       & ~(FSR_INEXACT | FSR_OVERFLOW | FSR_UNDERFLOW)))
    175 		    continue; /* Don't execute */
    176 		}
    177 	      /* Execute writes marked as 'forced'.  */
    178 	      else if (! (CGEN_WRITE_QUEUE_ELEMENT_FLAGS (item)
    179 			  & FRV_WRITE_QUEUE_FORCE_WRITE))
    180 		continue; /* Don't execute */
    181 	    }
    182 	}
    183 
    184       /* Only execute the first branch on the queue.  */
    185       if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE
    186           || CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_FN_PC_WRITE)
    187 	{
    188 	  if (branch_taken)
    189 	    continue;
    190 	  branch_taken = 1;
    191 	  if (CGEN_WRITE_QUEUE_ELEMENT_KIND (item) == CGEN_PC_WRITE)
    192 	    new_pc = item->kinds.pc_write.value;
    193           else
    194 	    new_pc = item->kinds.fn_pc_write.value;
    195 	}
    196 
    197       CPU_PC_SET (current_cpu, CGEN_WRITE_QUEUE_ELEMENT_IADDR (item));
    198       frv_save_data_written_for_interrupts (current_cpu, item);
    199       cgen_write_queue_element_execute (current_cpu, item);
    200     }
    201 
    202   /* Update the LR with the address of the next insn if the flag is set.
    203      This flag gets set in frvbf_set_write_next_vliw_to_LR by the JMPL,
    204      JMPIL and CALL insns.  */
    205   if (frvbf_write_next_vliw_addr_to_LR)
    206     {
    207       frvbf_h_spr_set_handler (current_cpu, H_SPR_LR, save_pc);
    208       frvbf_write_next_vliw_addr_to_LR = 0;
    209     }
    210 
    211   CPU_PC_SET (current_cpu, new_pc);
    212   CGEN_WRITE_QUEUE_CLEAR (q);
    213 }
    214 
    215 void
    216 @cpu@_perform_writeback (SIM_CPU *current_cpu)
    217 {
    218   @cpu@_parallel_write_queued (current_cpu);
    219 }
    220 
    221 static unsigned cache_reqno = 0x80000000; /* Start value is for debugging.  */
    222 
    223 #if 0 /* experimental */
    224 /* FR400 has single prefetch.  */
    225 static void
    226 fr400_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc)
    227 {
    228   int cur_ix;
    229   FRV_CACHE *cache;
    230 
    231 /* The cpu receives 8 bytes worth of insn data for each fetch aligned
    232    on 8 byte boundary.  */
    233 #define FR400_FETCH_SIZE 8
    234 
    235   cur_ix = LS;
    236   vpc &= ~(FR400_FETCH_SIZE - 1);
    237   cache = CPU_INSN_CACHE (current_cpu);
    238 
    239   /* Request a load of the current address buffer, if necessary.  */
    240   if (frv_insn_fetch_buffer[cur_ix].address != vpc)
    241     {
    242       frv_insn_fetch_buffer[cur_ix].address = vpc;
    243       frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++;
    244       if (FRV_COUNT_CYCLES (current_cpu, 1))
    245 	frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno,
    246 				frv_insn_fetch_buffer[cur_ix].address,
    247 				UNIT_I0 + cur_ix);
    248     }
    249 
    250   /* Wait for the current address buffer to be loaded, if necessary.  */
    251   if (FRV_COUNT_CYCLES (current_cpu, 1))
    252     {
    253       FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
    254       int wait;
    255 
    256       /* Account for any branch penalty.  */
    257       if (ps->branch_penalty > 0 && ! ps->past_first_p)
    258 	{
    259 	  frv_model_advance_cycles (current_cpu, ps->branch_penalty);
    260 	  frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty,
    261 				       "Branch penalty:");
    262 	  ps->branch_penalty = 0;
    263 	}
    264 
    265       /* Account for insn fetch latency.  */
    266       wait = 0;
    267       while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
    268 	{
    269 	  frv_model_advance_cycles (current_cpu, 1);
    270 	  ++wait;
    271 	}
    272       frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:");
    273       return;
    274     }
    275 
    276   /* Otherwise just load the insns directly from the cache.
    277    */
    278   if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
    279     {
    280       frv_cache_read (cache, cur_ix, vpc);
    281       frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO;
    282     }
    283 }
    284 #endif /* experimental */
    285 
    286 /* FR500 has dual prefetch.  */
    287 static void
    288 simulate_dual_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc, int fetch_size)
    289 {
    290   int i;
    291   int cur_ix, pre_ix;
    292   SI pre_address;
    293   FRV_CACHE *cache;
    294 
    295   /* See if the pc is within the addresses specified by either of the
    296      fetch buffers.  If so, that will be the current buffer. Otherwise,
    297      arbitrarily select the LD buffer as the current one since it gets
    298      priority in the case of interfering load requests.  */
    299   cur_ix = LD;
    300   vpc &= ~(fetch_size - 1);
    301   for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
    302     {
    303       if (frv_insn_fetch_buffer[i].address == vpc)
    304 	{
    305 	  cur_ix = i;
    306 	  break;
    307 	}
    308     }
    309   cache = CPU_INSN_CACHE (current_cpu);
    310 
    311   /* Request a load of the current address buffer, if necessary.  */
    312   if (frv_insn_fetch_buffer[cur_ix].address != vpc)
    313     {
    314       frv_insn_fetch_buffer[cur_ix].address = vpc;
    315       frv_insn_fetch_buffer[cur_ix].reqno = cache_reqno++;
    316       if (FRV_COUNT_CYCLES (current_cpu, 1))
    317 	frv_cache_request_load (cache, frv_insn_fetch_buffer[cur_ix].reqno,
    318 				frv_insn_fetch_buffer[cur_ix].address,
    319 				UNIT_I0 + cur_ix);
    320     }
    321 
    322   /* If the prefetch buffer does not represent the next sequential address, then
    323      request a load of the next sequential address.  */
    324   pre_ix = (cur_ix + 1) % FRV_CACHE_PIPELINES;
    325   pre_address = vpc + fetch_size;
    326   if (frv_insn_fetch_buffer[pre_ix].address != pre_address)
    327     {
    328       frv_insn_fetch_buffer[pre_ix].address = pre_address;
    329       frv_insn_fetch_buffer[pre_ix].reqno = cache_reqno++;
    330       if (FRV_COUNT_CYCLES (current_cpu, 1))
    331 	frv_cache_request_load (cache, frv_insn_fetch_buffer[pre_ix].reqno,
    332 				frv_insn_fetch_buffer[pre_ix].address,
    333 				UNIT_I0 + pre_ix);
    334     }
    335 
    336   /* If counting cycles, account for any branch penalty and/or insn fetch
    337      latency here.  */
    338   if (FRV_COUNT_CYCLES (current_cpu, 1))
    339     {
    340       FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
    341       int wait;
    342 
    343       /* Account for any branch penalty.  */
    344       if (ps->branch_penalty > 0 && ! ps->past_first_p)
    345 	{
    346 	  frv_model_advance_cycles (current_cpu, ps->branch_penalty);
    347 	  frv_model_trace_wait_cycles (current_cpu, ps->branch_penalty,
    348 				       "Branch penalty:");
    349 	  ps->branch_penalty = 0;
    350 	}
    351 
    352       /* Account for insn fetch latency.  */
    353       wait = 0;
    354       while (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
    355 	{
    356 	  frv_model_advance_cycles (current_cpu, 1);
    357 	  ++wait;
    358 	}
    359       frv_model_trace_wait_cycles (current_cpu, wait, "Insn fetch:");
    360       return;
    361     }
    362 
    363   /* Otherwise just load the insns directly from the cache.
    364    */
    365   if (frv_insn_fetch_buffer[cur_ix].reqno != NO_REQNO)
    366     {
    367       frv_cache_read (cache, cur_ix, vpc);
    368       frv_insn_fetch_buffer[cur_ix].reqno = NO_REQNO;
    369     }
    370   if (frv_insn_fetch_buffer[pre_ix].reqno != NO_REQNO)
    371     {
    372       frv_cache_read (cache, pre_ix, pre_address);
    373       frv_insn_fetch_buffer[pre_ix].reqno = NO_REQNO;
    374     }
    375 }
    376 
    377 static void
    378 @cpu@_simulate_insn_prefetch (SIM_CPU *current_cpu, IADDR vpc)
    379 {
    380   SI hsr0;
    381   SIM_DESC sd;
    382 
    383   /* Nothing to do if not counting cycles and the cache is not enabled.  */
    384   hsr0 = GET_HSR0 ();
    385   if (! GET_HSR0_ICE (hsr0) && ! FRV_COUNT_CYCLES (current_cpu, 1))
    386     return;
    387 
    388   /* Different machines handle prefetch defferently.  */
    389   sd = CPU_STATE (current_cpu);
    390   switch (STATE_ARCHITECTURE (sd)->mach)
    391     {
    392     case bfd_mach_fr400:
    393     case bfd_mach_fr450:
    394       simulate_dual_insn_prefetch (current_cpu, vpc, 8);
    395       break;
    396     case bfd_mach_frvtomcat:
    397     case bfd_mach_fr500:
    398     case bfd_mach_fr550:
    399     case bfd_mach_frv:
    400       simulate_dual_insn_prefetch (current_cpu, vpc, 16);
    401       break;
    402     default:
    403       break;
    404     }
    405 }
    406 
    407 int frv_save_profile_model_p;
    408 EOF
    409 
    410 ;;
    411 
    412 xinit)
    413 
    414 cat <<EOF
    415 #line $LINENO "$0"
    416 /*xxxinit*/
    417   /* If the timer is enabled, then we will enable model profiling during
    418      execution.  This is because the timer needs accurate cycles counts to
    419      work properly.  Save the original setting of model profiling.  */
    420   if (frv_interrupt_state.timer.enabled)
    421     frv_save_profile_model_p = PROFILE_MODEL_P (current_cpu);
    422 EOF
    423 
    424 ;;
    425 
    426 xextract-simple | xextract-scache)
    427 
    428 # Inputs:  current_cpu, vpc, sc, FAST_P
    429 # Outputs: sc filled in
    430 # SET_LAST_INSN_P(last_p) called to indicate whether insn is last one
    431 
    432 cat <<EOF
    433 #line $LINENO "$0"
    434 {
    435   CGEN_INSN_INT insn = frvbf_read_imem_USI (current_cpu, vpc);
    436   extract (current_cpu, vpc, insn, SEM_ARGBUF (sc), FAST_P);
    437   SET_LAST_INSN_P ((insn & 0x80000000) != 0);
    438 }
    439 EOF
    440 
    441 ;;
    442 
    443 xfull-exec-* | xfast-exec-*)
    444 
    445 # Inputs: current_cpu, vpc, FAST_P
    446 # Outputs:
    447 #   vpc contains the address of the next insn to execute
    448 #   pc of current_cpu must be up to date (=vpc) upon exit
    449 #   CPU_INSN_COUNT (current_cpu) must be updated by number of insns executed
    450 #
    451 # Unlike the non-parallel case, this version is responsible for doing the
    452 # scache lookup.
    453 
    454 cat <<EOF
    455 #line $LINENO "$0"
    456 {
    457   FRV_VLIW *vliw;
    458   int first_insn_p = 1;
    459   int last_insn_p = 0;
    460   int ninsns;
    461   CGEN_ATTR_VALUE_ENUM_TYPE slot;
    462 
    463   /* If the timer is enabled, then enable model profiling.  This is because
    464      the timer needs accurate cycles counts to work properly.  */
    465   if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p)
    466     sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "1");
    467 
    468   /* Init parallel-write queue and vliw.  */
    469   @cpu@_parallel_write_init (current_cpu);
    470   vliw = CPU_VLIW (current_cpu);
    471   frv_vliw_reset (vliw, STATE_ARCHITECTURE (CPU_STATE (current_cpu))->mach,
    472                   CPU_ELF_FLAGS (current_cpu));
    473   frv_current_fm_slot = UNIT_NIL;
    474 
    475   for (ninsns = 0; ! last_insn_p && ninsns < FRV_VLIW_SIZE; ++ninsns)
    476     {
    477       SCACHE *sc;
    478       const CGEN_INSN *insn;
    479       int error;
    480       /* Go through the motions of finding the insns in the cache.  */
    481       @cpu@_simulate_insn_prefetch (current_cpu, vpc);
    482 
    483       sc = @cpu@_scache_lookup (current_cpu, vpc, scache, hash_mask, FAST_P);
    484       sc->first_insn_p = first_insn_p;
    485       last_insn_p = sc->last_insn_p;
    486 
    487       /* Add the insn to the vliw and set up the interrupt state.  */
    488       insn = sc->argbuf.idesc->idata;
    489       error = frv_vliw_add_insn (vliw, insn);
    490       if (! error)
    491         frv_vliw_setup_insn (current_cpu, insn);
    492       frv_detect_insn_access_interrupts (current_cpu, sc);
    493       slot = (*vliw->current_vliw)[vliw->next_slot - 1];
    494       if (slot >= UNIT_FM0 && slot <= UNIT_FM3)
    495         frv_current_fm_slot = slot;
    496 
    497       vpc = execute (current_cpu, sc, FAST_P);
    498 
    499       SET_H_PC (vpc); /* needed for interrupt handling */
    500       first_insn_p = 0;
    501     }
    502 
    503   /* If the timer is enabled, and model profiling was not originally enabled,
    504      then turn it off again.  This is the only place we can currently gain
    505      control to do this.  */
    506   if (frv_interrupt_state.timer.enabled && ! frv_save_profile_model_p)
    507     sim_profile_set_option (current_state, "-model", PROFILE_MODEL_IDX, "0");
    508 
    509   /* Check for interrupts.  Also handles writeback if necessary.  */
    510   frv_process_interrupts (current_cpu);
    511 
    512   CPU_INSN_COUNT (current_cpu) += ninsns;
    513 }
    514 EOF
    515 
    516 ;;
    517 
    518 *)
    519   echo "Invalid argument to mainloop.in: $1" >&2
    520   exit 1
    521   ;;
    522 
    523 esac
    524