aarch64-speculation.cc revision 1.1 1 1.1 mrg /* Speculation tracking and mitigation (e.g. CVE 2017-5753) for AArch64.
2 1.1 mrg Copyright (C) 2018-2019 Free Software Foundation, Inc.
3 1.1 mrg Contributed by ARM Ltd.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it
8 1.1 mrg under the terms of the GNU General Public License as published by
9 1.1 mrg the Free Software Foundation; either version 3, or (at your option)
10 1.1 mrg any later version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but
13 1.1 mrg WITHOUT ANY WARRANTY; without even the implied warranty of
14 1.1 mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 1.1 mrg General Public License for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #include "config.h"
22 1.1 mrg #include "system.h"
23 1.1 mrg #include "coretypes.h"
24 1.1 mrg #include "target.h"
25 1.1 mrg #include "rtl.h"
26 1.1 mrg #include "tree-pass.h"
27 1.1 mrg #include "profile-count.h"
28 1.1 mrg #include "backend.h"
29 1.1 mrg #include "cfgbuild.h"
30 1.1 mrg #include "print-rtl.h"
31 1.1 mrg #include "cfgrtl.h"
32 1.1 mrg #include "function.h"
33 1.1 mrg #include "basic-block.h"
34 1.1 mrg #include "memmodel.h"
35 1.1 mrg #include "emit-rtl.h"
36 1.1 mrg #include "insn-attr.h"
37 1.1 mrg #include "df.h"
38 1.1 mrg #include "tm_p.h"
39 1.1 mrg #include "insn-config.h"
40 1.1 mrg #include "recog.h"
41 1.1 mrg
42 1.1 mrg /* This pass scans the RTL just before the final branch
43 1.1 mrg re-organisation pass. The aim is to identify all places where
44 1.1 mrg there is conditional control flow and to insert code that tracks
45 1.1 mrg any speculative execution of a conditional branch.
46 1.1 mrg
47 1.1 mrg To do this we reserve a call-clobbered register (so that it can be
48 1.1 mrg initialized very early in the function prologue) that can then be
49 1.1 mrg updated each time there is a conditional branch. At each such
50 1.1 mrg branch we then generate a code sequence that uses conditional
51 1.1 mrg select operations that are not subject to speculation themselves
52 1.1 mrg (we ignore for the moment situations where that might not always be
53 1.1 mrg strictly true). For example, a branch sequence such as:
54 1.1 mrg
55 1.1 mrg B.EQ <dst>
56 1.1 mrg ...
57 1.1 mrg <dst>:
58 1.1 mrg
59 1.1 mrg is transformed to:
60 1.1 mrg
61 1.1 mrg B.EQ <dst>
62 1.1 mrg CSEL tracker, tracker, XZr, ne
63 1.1 mrg ...
64 1.1 mrg <dst>:
65 1.1 mrg CSEL tracker, tracker, XZr, eq
66 1.1 mrg
67 1.1 mrg Since we start with the tracker initialized to all bits one, if at any
68 1.1 mrg time the predicted control flow diverges from the architectural program
69 1.1 mrg behavior, then the tracker will become zero (but not otherwise).
70 1.1 mrg
71 1.1 mrg The tracker value can be used at any time at which a value needs
72 1.1 mrg guarding against incorrect speculation. This can be done in
73 1.1 mrg several ways, but they all amount to the same thing. For an
74 1.1 mrg untrusted address, or an untrusted offset to a trusted address, we
75 1.1 mrg can simply mask the address with the tracker with the untrusted
76 1.1 mrg value. If the CPU is not speculating, or speculating correctly,
77 1.1 mrg then the value will remain unchanged, otherwise it will be clamped
78 1.1 mrg to zero. For more complex scenarios we can compare the tracker
79 1.1 mrg against zero and use the flags to form a new selection with an
80 1.1 mrg alternate safe value.
81 1.1 mrg
82 1.1 mrg On implementations where the data processing instructions may
83 1.1 mrg themselves produce speculative values, the architecture requires
84 1.1 mrg that a CSDB instruction will resolve such data speculation, so each
85 1.1 mrg time we use the tracker for protecting a vulnerable value we also
86 1.1 mrg emit a CSDB: we do not need to do that each time the tracker itself
87 1.1 mrg is updated.
88 1.1 mrg
89 1.1 mrg At function boundaries, we need to communicate the speculation
90 1.1 mrg tracking state with the caller or the callee. This is tricky
91 1.1 mrg because there is no register available for such a purpose without
92 1.1 mrg creating a new ABI. We deal with this by relying on the principle
93 1.1 mrg that in all real programs the stack pointer, SP will never be NULL
94 1.1 mrg at a function boundary; we can thus encode the speculation state in
95 1.1 mrg SP by clearing SP if the speculation tracker itself is NULL. After
96 1.1 mrg the call we recover the tracking state back from SP into the
97 1.1 mrg tracker register. The results is that a function call sequence is
98 1.1 mrg transformed to
99 1.1 mrg
100 1.1 mrg MOV tmp, SP
101 1.1 mrg AND tmp, tmp, tracker
102 1.1 mrg MOV SP, tmp
103 1.1 mrg BL <callee>
104 1.1 mrg CMP SP, #0
105 1.1 mrg CSETM tracker, ne
106 1.1 mrg
107 1.1 mrg The additional MOV instructions in the pre-call sequence are needed
108 1.1 mrg because SP cannot be used directly with the AND instruction.
109 1.1 mrg
110 1.1 mrg The code inside a function body uses the post-call sequence in the
111 1.1 mrg prologue to establish the tracker and the pre-call sequence in the
112 1.1 mrg epilogue to re-encode the state for the return.
113 1.1 mrg
114 1.1 mrg The code sequences have the nice property that if called from, or
115 1.1 mrg calling a function that does not track speculation then the stack pointer
116 1.1 mrg will always be non-NULL and hence the tracker will be initialized to all
117 1.1 mrg bits one as we need: we lose the ability to fully track speculation in that
118 1.1 mrg case, but we are still architecturally safe.
119 1.1 mrg
120 1.1 mrg Tracking speculation in this way is quite expensive, both in code
121 1.1 mrg size and execution time. We employ a number of tricks to try to
122 1.1 mrg limit this:
123 1.1 mrg
124 1.1 mrg 1) Simple leaf functions with no conditional branches (or use of
125 1.1 mrg the tracker) do not need to establish a new tracker: they simply
126 1.1 mrg carry the tracking state through SP for the duration of the call.
127 1.1 mrg The same is also true for leaf functions that end in a tail-call.
128 1.1 mrg
129 1.1 mrg 2) Back-to-back function calls in a single basic block also do not
130 1.1 mrg need to re-establish the tracker between the calls. Again, we can
131 1.1 mrg carry the tracking state in SP for this period of time unless the
132 1.1 mrg tracker value is needed at that point in time.
133 1.1 mrg
134 1.1 mrg We run the pass just before the final branch reorganization pass so
135 1.1 mrg that we can handle most of the conditional branch cases using the
136 1.1 mrg standard edge insertion code. The reorg pass will hopefully clean
137 1.1 mrg things up for afterwards so that the results aren't too
138 1.1 mrg horrible. */
139 1.1 mrg
140 1.1 mrg /* Generate a code sequence to clobber SP if speculating incorreclty. */
141 1.1 mrg static rtx_insn *
142 1.1 mrg aarch64_speculation_clobber_sp ()
143 1.1 mrg {
144 1.1 mrg rtx sp = gen_rtx_REG (DImode, SP_REGNUM);
145 1.1 mrg rtx tracker = gen_rtx_REG (DImode, SPECULATION_TRACKER_REGNUM);
146 1.1 mrg rtx scratch = gen_rtx_REG (DImode, SPECULATION_SCRATCH_REGNUM);
147 1.1 mrg
148 1.1 mrg start_sequence ();
149 1.1 mrg emit_insn (gen_rtx_SET (scratch, sp));
150 1.1 mrg emit_insn (gen_anddi3 (scratch, scratch, tracker));
151 1.1 mrg emit_insn (gen_rtx_SET (sp, scratch));
152 1.1 mrg rtx_insn *seq = get_insns ();
153 1.1 mrg end_sequence ();
154 1.1 mrg return seq;
155 1.1 mrg }
156 1.1 mrg
157 1.1 mrg /* Generate a code sequence to establish the tracker variable from the
158 1.1 mrg contents of SP. */
159 1.1 mrg static rtx_insn *
160 1.1 mrg aarch64_speculation_establish_tracker ()
161 1.1 mrg {
162 1.1 mrg rtx sp = gen_rtx_REG (DImode, SP_REGNUM);
163 1.1 mrg rtx tracker = gen_rtx_REG (DImode, SPECULATION_TRACKER_REGNUM);
164 1.1 mrg start_sequence ();
165 1.1 mrg rtx cc = aarch64_gen_compare_reg (EQ, sp, const0_rtx);
166 1.1 mrg emit_insn (gen_cstoredi_neg (tracker,
167 1.1 mrg gen_rtx_NE (CCmode, cc, const0_rtx), cc));
168 1.1 mrg rtx_insn *seq = get_insns ();
169 1.1 mrg end_sequence ();
170 1.1 mrg return seq;
171 1.1 mrg }
172 1.1 mrg
173 1.1 mrg /* Main speculation tracking pass. */
174 1.1 mrg unsigned int
175 1.1 mrg aarch64_do_track_speculation ()
176 1.1 mrg {
177 1.1 mrg basic_block bb;
178 1.1 mrg bool needs_tracking = false;
179 1.1 mrg bool need_second_pass = false;
180 1.1 mrg rtx_insn *insn;
181 1.1 mrg int fixups_pending = 0;
182 1.1 mrg
183 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
184 1.1 mrg {
185 1.1 mrg insn = BB_END (bb);
186 1.1 mrg
187 1.1 mrg if (dump_file)
188 1.1 mrg fprintf (dump_file, "Basic block %d:\n", bb->index);
189 1.1 mrg
190 1.1 mrg while (insn != BB_HEAD (bb)
191 1.1 mrg && NOTE_P (insn))
192 1.1 mrg insn = PREV_INSN (insn);
193 1.1 mrg
194 1.1 mrg if (control_flow_insn_p (insn))
195 1.1 mrg {
196 1.1 mrg if (any_condjump_p (insn))
197 1.1 mrg {
198 1.1 mrg if (dump_file)
199 1.1 mrg {
200 1.1 mrg fprintf (dump_file, " condjump\n");
201 1.1 mrg dump_insn_slim (dump_file, insn);
202 1.1 mrg }
203 1.1 mrg
204 1.1 mrg rtx src = SET_SRC (pc_set (insn));
205 1.1 mrg
206 1.1 mrg /* Check for an inverted jump, where the fall-through edge
207 1.1 mrg appears first. */
208 1.1 mrg bool inverted = GET_CODE (XEXP (src, 2)) != PC;
209 1.1 mrg /* The other edge must be the PC (we assume that we don't
210 1.1 mrg have conditional return instructions). */
211 1.1 mrg gcc_assert (GET_CODE (XEXP (src, 1 + !inverted)) == PC);
212 1.1 mrg
213 1.1 mrg rtx cond = copy_rtx (XEXP (src, 0));
214 1.1 mrg gcc_assert (COMPARISON_P (cond)
215 1.1 mrg && REG_P (XEXP (cond, 0))
216 1.1 mrg && REGNO (XEXP (cond, 0)) == CC_REGNUM
217 1.1 mrg && XEXP (cond, 1) == const0_rtx);
218 1.1 mrg enum rtx_code inv_cond_code
219 1.1 mrg = reversed_comparison_code (cond, insn);
220 1.1 mrg /* We should be able to reverse all conditions. */
221 1.1 mrg gcc_assert (inv_cond_code != UNKNOWN);
222 1.1 mrg rtx inv_cond = gen_rtx_fmt_ee (inv_cond_code, GET_MODE (cond),
223 1.1 mrg copy_rtx (XEXP (cond, 0)),
224 1.1 mrg copy_rtx (XEXP (cond, 1)));
225 1.1 mrg if (inverted)
226 1.1 mrg std::swap (cond, inv_cond);
227 1.1 mrg
228 1.1 mrg insert_insn_on_edge (gen_speculation_tracker (cond),
229 1.1 mrg BRANCH_EDGE (bb));
230 1.1 mrg insert_insn_on_edge (gen_speculation_tracker (inv_cond),
231 1.1 mrg FALLTHRU_EDGE (bb));
232 1.1 mrg needs_tracking = true;
233 1.1 mrg }
234 1.1 mrg else if (GET_CODE (PATTERN (insn)) == RETURN)
235 1.1 mrg {
236 1.1 mrg /* If we already know we'll need a second pass, don't put
237 1.1 mrg out the return sequence now, or we might end up with
238 1.1 mrg two copies. Instead, we'll do all return statements
239 1.1 mrg during the second pass. However, if this is the
240 1.1 mrg first return insn we've found and we already
241 1.1 mrg know that we'll need to emit the code, we can save a
242 1.1 mrg second pass by emitting the code now. */
243 1.1 mrg if (needs_tracking && ! need_second_pass)
244 1.1 mrg {
245 1.1 mrg rtx_insn *seq = aarch64_speculation_clobber_sp ();
246 1.1 mrg emit_insn_before (seq, insn);
247 1.1 mrg }
248 1.1 mrg else
249 1.1 mrg {
250 1.1 mrg fixups_pending++;
251 1.1 mrg need_second_pass = true;
252 1.1 mrg }
253 1.1 mrg }
254 1.1 mrg else if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
255 1.1 mrg {
256 1.1 mrg rtx_insn *seq = aarch64_speculation_clobber_sp ();
257 1.1 mrg emit_insn_before (seq, insn);
258 1.1 mrg needs_tracking = true;
259 1.1 mrg }
260 1.1 mrg }
261 1.1 mrg else
262 1.1 mrg {
263 1.1 mrg if (dump_file)
264 1.1 mrg {
265 1.1 mrg fprintf (dump_file, " other\n");
266 1.1 mrg dump_insn_slim (dump_file, insn);
267 1.1 mrg }
268 1.1 mrg }
269 1.1 mrg }
270 1.1 mrg
271 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
272 1.1 mrg {
273 1.1 mrg rtx_insn *end = BB_END (bb);
274 1.1 mrg rtx_insn *call_insn = NULL;
275 1.1 mrg
276 1.1 mrg if (bb->flags & BB_NON_LOCAL_GOTO_TARGET)
277 1.1 mrg {
278 1.1 mrg rtx_insn *label = NULL;
279 1.1 mrg /* For non-local goto targets we have to recover the
280 1.1 mrg speculation state from SP. Find the last code label at
281 1.1 mrg the head of the block and place the fixup sequence after
282 1.1 mrg that. */
283 1.1 mrg for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
284 1.1 mrg {
285 1.1 mrg if (LABEL_P (insn))
286 1.1 mrg label = insn;
287 1.1 mrg /* Never put anything before the basic block note. */
288 1.1 mrg if (NOTE_INSN_BASIC_BLOCK_P (insn))
289 1.1 mrg label = insn;
290 1.1 mrg if (INSN_P (insn))
291 1.1 mrg break;
292 1.1 mrg }
293 1.1 mrg
294 1.1 mrg gcc_assert (label);
295 1.1 mrg emit_insn_after (aarch64_speculation_establish_tracker (), label);
296 1.1 mrg }
297 1.1 mrg
298 1.1 mrg /* Scan the insns looking for calls. We need to pass the
299 1.1 mrg speculation tracking state encoded in to SP. After a call we
300 1.1 mrg restore the speculation tracking into the tracker register.
301 1.1 mrg To avoid unnecessary transfers we look for two or more calls
302 1.1 mrg within a single basic block and eliminate, where possible,
303 1.1 mrg any redundant operations. */
304 1.1 mrg for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
305 1.1 mrg {
306 1.1 mrg if (NONDEBUG_INSN_P (insn)
307 1.1 mrg && recog_memoized (insn) >= 0
308 1.1 mrg && (get_attr_speculation_barrier (insn)
309 1.1 mrg == SPECULATION_BARRIER_TRUE))
310 1.1 mrg {
311 1.1 mrg if (call_insn)
312 1.1 mrg {
313 1.1 mrg /* This instruction requires the speculation
314 1.1 mrg tracking to be in the tracker register. If there
315 1.1 mrg was an earlier call in this block, we need to
316 1.1 mrg copy the speculation tracking back there. */
317 1.1 mrg emit_insn_after (aarch64_speculation_establish_tracker (),
318 1.1 mrg call_insn);
319 1.1 mrg call_insn = NULL;
320 1.1 mrg }
321 1.1 mrg
322 1.1 mrg needs_tracking = true;
323 1.1 mrg }
324 1.1 mrg
325 1.1 mrg if (CALL_P (insn))
326 1.1 mrg {
327 1.1 mrg bool tailcall
328 1.1 mrg = (SIBLING_CALL_P (insn)
329 1.1 mrg || find_reg_note (insn, REG_NORETURN, NULL_RTX));
330 1.1 mrg
331 1.1 mrg /* Tailcalls are like returns, we can eliminate the
332 1.1 mrg transfer between the tracker register and SP if we
333 1.1 mrg know that this function does not itself need
334 1.1 mrg tracking. */
335 1.1 mrg if (tailcall && (need_second_pass || !needs_tracking))
336 1.1 mrg {
337 1.1 mrg /* Don't clear call_insn if it is set - needs_tracking
338 1.1 mrg will be true in that case and so we will end
339 1.1 mrg up putting out mitigation sequences. */
340 1.1 mrg fixups_pending++;
341 1.1 mrg need_second_pass = true;
342 1.1 mrg break;
343 1.1 mrg }
344 1.1 mrg
345 1.1 mrg needs_tracking = true;
346 1.1 mrg
347 1.1 mrg /* We always need a transfer before the first call in a BB. */
348 1.1 mrg if (!call_insn)
349 1.1 mrg emit_insn_before (aarch64_speculation_clobber_sp (), insn);
350 1.1 mrg
351 1.1 mrg /* Tail-calls and no-return calls don't need any post-call
352 1.1 mrg reestablishment of the tracker. */
353 1.1 mrg if (! tailcall)
354 1.1 mrg call_insn = insn;
355 1.1 mrg else
356 1.1 mrg call_insn = NULL;
357 1.1 mrg }
358 1.1 mrg
359 1.1 mrg if (insn == end)
360 1.1 mrg break;
361 1.1 mrg }
362 1.1 mrg
363 1.1 mrg if (call_insn)
364 1.1 mrg {
365 1.1 mrg rtx_insn *seq = aarch64_speculation_establish_tracker ();
366 1.1 mrg
367 1.1 mrg /* Handle debug insns at the end of the BB. Put the extra
368 1.1 mrg insns after them. This ensures that we have consistent
369 1.1 mrg behaviour for the placement of the extra insns between
370 1.1 mrg debug and non-debug builds. */
371 1.1 mrg for (insn = call_insn;
372 1.1 mrg insn != end && DEBUG_INSN_P (NEXT_INSN (insn));
373 1.1 mrg insn = NEXT_INSN (insn))
374 1.1 mrg ;
375 1.1 mrg
376 1.1 mrg if (insn == end)
377 1.1 mrg {
378 1.1 mrg edge e = find_fallthru_edge (bb->succs);
379 1.1 mrg /* We need to be very careful about some calls that
380 1.1 mrg appear at the end of a basic block. If the call
381 1.1 mrg involves exceptions, then the compiler may depend on
382 1.1 mrg this being the last instruction in the block. The
383 1.1 mrg easiest way to handle this is to commit the new
384 1.1 mrg instructions on the fall-through edge and to let
385 1.1 mrg commit_edge_insertions clean things up for us.
386 1.1 mrg
387 1.1 mrg Sometimes, eg with OMP, there may not even be an
388 1.1 mrg outgoing edge after the call. In that case, there's
389 1.1 mrg not much we can do, presumably the compiler has
390 1.1 mrg decided that the call can never return in this
391 1.1 mrg context. */
392 1.1 mrg if (e)
393 1.1 mrg {
394 1.1 mrg /* We need to set the location lists explicitly in
395 1.1 mrg this case. */
396 1.1 mrg if (! INSN_P (seq))
397 1.1 mrg {
398 1.1 mrg start_sequence ();
399 1.1 mrg emit_insn (seq);
400 1.1 mrg seq = get_insns ();
401 1.1 mrg end_sequence ();
402 1.1 mrg }
403 1.1 mrg
404 1.1 mrg for (rtx_insn *list = seq; list; list = NEXT_INSN (list))
405 1.1 mrg INSN_LOCATION (list) = INSN_LOCATION (call_insn);
406 1.1 mrg
407 1.1 mrg insert_insn_on_edge (seq, e);
408 1.1 mrg }
409 1.1 mrg }
410 1.1 mrg else
411 1.1 mrg emit_insn_after (seq, call_insn);
412 1.1 mrg }
413 1.1 mrg }
414 1.1 mrg
415 1.1 mrg if (needs_tracking)
416 1.1 mrg {
417 1.1 mrg if (need_second_pass)
418 1.1 mrg {
419 1.1 mrg /* We found a return instruction before we found out whether
420 1.1 mrg or not we need to emit the tracking code, but we now
421 1.1 mrg know we do. Run quickly over the basic blocks and
422 1.1 mrg fix up the return insns. */
423 1.1 mrg FOR_EACH_BB_FN (bb, cfun)
424 1.1 mrg {
425 1.1 mrg insn = BB_END (bb);
426 1.1 mrg
427 1.1 mrg while (insn != BB_HEAD (bb)
428 1.1 mrg && NOTE_P (insn))
429 1.1 mrg insn = PREV_INSN (insn);
430 1.1 mrg
431 1.1 mrg if ((control_flow_insn_p (insn)
432 1.1 mrg && GET_CODE (PATTERN (insn)) == RETURN)
433 1.1 mrg || (CALL_P (insn)
434 1.1 mrg && (SIBLING_CALL_P (insn)
435 1.1 mrg || find_reg_note (insn, REG_NORETURN, NULL_RTX))))
436 1.1 mrg {
437 1.1 mrg rtx_insn *seq = aarch64_speculation_clobber_sp ();
438 1.1 mrg emit_insn_before (seq, insn);
439 1.1 mrg fixups_pending--;
440 1.1 mrg }
441 1.1 mrg }
442 1.1 mrg gcc_assert (fixups_pending == 0);
443 1.1 mrg }
444 1.1 mrg
445 1.1 mrg /* Set up the initial value of the tracker, using the incoming SP. */
446 1.1 mrg insert_insn_on_edge (aarch64_speculation_establish_tracker (),
447 1.1 mrg single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
448 1.1 mrg commit_edge_insertions ();
449 1.1 mrg }
450 1.1 mrg
451 1.1 mrg return 0;
452 1.1 mrg }
453 1.1 mrg
454 1.1 mrg namespace {
455 1.1 mrg
456 1.1 mrg const pass_data pass_data_aarch64_track_speculation =
457 1.1 mrg {
458 1.1 mrg RTL_PASS, /* type. */
459 1.1 mrg "speculation", /* name. */
460 1.1 mrg OPTGROUP_NONE, /* optinfo_flags. */
461 1.1 mrg TV_MACH_DEP, /* tv_id. */
462 1.1 mrg 0, /* properties_required. */
463 1.1 mrg 0, /* properties_provided. */
464 1.1 mrg 0, /* properties_destroyed. */
465 1.1 mrg 0, /* todo_flags_start. */
466 1.1 mrg 0 /* todo_flags_finish. */
467 1.1 mrg };
468 1.1 mrg
469 1.1 mrg class pass_track_speculation : public rtl_opt_pass
470 1.1 mrg {
471 1.1 mrg public:
472 1.1 mrg pass_track_speculation(gcc::context *ctxt)
473 1.1 mrg : rtl_opt_pass(pass_data_aarch64_track_speculation, ctxt)
474 1.1 mrg {}
475 1.1 mrg
476 1.1 mrg /* opt_pass methods: */
477 1.1 mrg virtual bool gate (function *)
478 1.1 mrg {
479 1.1 mrg return aarch64_track_speculation;
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg virtual unsigned int execute (function *)
483 1.1 mrg {
484 1.1 mrg return aarch64_do_track_speculation ();
485 1.1 mrg }
486 1.1 mrg }; // class pass_track_speculation.
487 1.1 mrg } // anon namespace.
488 1.1 mrg
489 1.1 mrg /* Create a new pass instance. */
490 1.1 mrg rtl_opt_pass *
491 1.1 mrg make_pass_track_speculation (gcc::context *ctxt)
492 1.1 mrg {
493 1.1 mrg return new pass_track_speculation (ctxt);
494 1.1 mrg }
495