Home | History | Annotate | Line # | Download | only in arm
      1 ;; Machine description for ARM processor synchronization primitives.
      2 ;; Copyright (C) 2010-2022 Free Software Foundation, Inc.
      3 ;; Written by Marcus Shawcroft (marcus.shawcroft (a] arm.com)
      4 ;; 64bit Atomics by Dave Gilbert (david.gilbert (a] linaro.org)
      5 ;;
      6 ;; This file is part of GCC.
      7 ;;
      8 ;; GCC is free software; you can redistribute it and/or modify it
      9 ;; under the terms of the GNU General Public License as published by
     10 ;; the Free Software Foundation; either version 3, or (at your option)
     11 ;; any later version.
     12 ;;
     13 ;; GCC is distributed in the hope that it will be useful, but
     14 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
     15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     16 ;; General Public License for more details.
     17 ;;
     18 ;; You should have received a copy of the GNU General Public License
     19 ;; along with GCC; see the file COPYING3.  If not see
     20 ;; <http://www.gnu.org/licenses/>.  */
     21 
     22 (define_mode_attr sync_predtab
     23   [(QI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
     24    (HI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
     25    (SI "TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER")
     26    (DI "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN
     27 	&& TARGET_HAVE_MEMORY_BARRIER")])
     28 
     29 (define_code_iterator syncop [plus minus ior xor and])
     30 
     31 (define_code_attr sync_optab
     32   [(ior "or") (xor "xor") (and "and") (plus "add") (minus "sub")])
     33 
     34 (define_mode_attr sync_sfx
     35   [(QI "b") (HI "h") (SI "") (DI "d")])
     36 
     37 (define_expand "memory_barrier"
     38   [(set (match_dup 0)
     39 	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
     40   "TARGET_HAVE_MEMORY_BARRIER"
     41 {
     42   operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
     43   MEM_VOLATILE_P (operands[0]) = 1;
     44 })
     45 
     46 (define_insn "*memory_barrier"
     47   [(set (match_operand:BLK 0 "" "")
     48 	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
     49   "TARGET_HAVE_MEMORY_BARRIER"
     50   {
     51     if (TARGET_HAVE_DMB)
     52       {
     53 	return "dmb\\tish";
     54       }
     55 
     56     if (TARGET_HAVE_DMB_MCR)
     57       return "mcr\\tp15, 0, r0, c7, c10, 5";
     58 
     59     gcc_unreachable ();
     60   }
     61   [(set_attr "length" "4")
     62    (set_attr "conds" "unconditional")
     63    (set_attr "predicable" "no")])
     64 
     65 (define_insn "atomic_load<mode>"
     66   [(set (match_operand:QHSI 0 "register_operand" "=r,r,l")
     67     (unspec_volatile:QHSI
     68       [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q,Q,Q")
     69        (match_operand:SI 2 "const_int_operand" "n,Pf,n")]	;; model
     70       VUNSPEC_LDA))]
     71   "TARGET_HAVE_LDACQ"
     72   {
     73     if (aarch_mm_needs_acquire (operands[2]))
     74       {
     75 	if (TARGET_THUMB1)
     76 	  return "lda<sync_sfx>\t%0, %1";
     77 	else
     78 	  return "lda<sync_sfx>%?\t%0, %1";
     79       }
     80     else
     81       {
     82 	if (TARGET_THUMB1)
     83 	  return "ldr<sync_sfx>\t%0, %1";
     84 	else
     85 	  return "ldr<sync_sfx>%?\t%0, %1";
     86       }
     87   }
     88   [(set_attr "arch" "32,v8mb,any")
     89    (set_attr "predicable" "yes")])
     90 
     91 (define_insn "atomic_store<mode>"
     92   [(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
     93     (unspec_volatile:QHSI
     94       [(match_operand:QHSI 1 "general_operand" "r,r,l")
     95        (match_operand:SI 2 "const_int_operand" "n,Pf,n")]	;; model
     96       VUNSPEC_STL))]
     97   "TARGET_HAVE_LDACQ"
     98   {
     99     if (aarch_mm_needs_release (operands[2]))
    100       {
    101 	if (TARGET_THUMB1)
    102 	  return "stl<sync_sfx>\t%1, %0";
    103 	else
    104 	  return "stl<sync_sfx>%?\t%1, %0";
    105       }
    106     else
    107       {
    108 	if (TARGET_THUMB1)
    109 	  return "str<sync_sfx>\t%1, %0";
    110 	else
    111 	  return "str<sync_sfx>%?\t%1, %0";
    112       }
    113   }
    114   [(set_attr "arch" "32,v8mb,any")
    115    (set_attr "predicable" "yes")])
    116 
    117 ;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
    118 
    119 (define_insn "arm_atomic_loaddi2_ldrd"
    120   [(set (match_operand:DI 0 "register_operand" "=r")
    121 	(unspec_volatile:DI
    122 	  [(match_operand:DI 1 "arm_sync_memory_operand" "Q")]
    123 	    VUNSPEC_LDRD_ATOMIC))]
    124   "ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE"
    125   "ldrd%?\t%0, %H0, %C1"
    126   [(set_attr "predicable" "yes")])
    127 
    128 ;; There are three ways to expand this depending on the architecture
    129 ;; features available.  As for the barriers, a load needs a barrier
    130 ;; after it on all non-relaxed memory models except when the load
    131 ;; has acquire semantics (for ARMv8-A).
    132 
    133 (define_expand "atomic_loaddi"
    134   [(match_operand:DI 0 "s_register_operand")		;; val out
    135    (match_operand:DI 1 "mem_noofs_operand")		;; memory
    136    (match_operand:SI 2 "const_int_operand")]		;; model
    137   "(TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQEXD)
    138    && ARM_DOUBLEWORD_ALIGN"
    139 {
    140   memmodel model = memmodel_from_int (INTVAL (operands[2]));
    141 
    142   /* For ARMv8-A we can use an LDAEXD to atomically load two 32-bit registers
    143      when acquire or stronger semantics are needed.  When the relaxed model is
    144      used this can be relaxed to a normal LDRD.  */
    145   if (TARGET_HAVE_LDACQEXD)
    146     {
    147       if (is_mm_relaxed (model))
    148 	emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
    149       else
    150 	emit_insn (gen_arm_load_acquire_exclusivedi (operands[0], operands[1]));
    151 
    152       DONE;
    153     }
    154 
    155   /* On LPAE targets LDRD and STRD accesses to 64-bit aligned
    156      locations are 64-bit single-copy atomic.  We still need barriers in the
    157      appropriate places to implement the ordering constraints.  */
    158   if (TARGET_HAVE_LPAE)
    159     emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
    160   else
    161     emit_insn (gen_arm_load_exclusivedi (operands[0], operands[1]));
    162 
    163 
    164   /* All non-relaxed models need a barrier after the load when load-acquire
    165      instructions are not available.  */
    166   if (!is_mm_relaxed (model))
    167     expand_mem_thread_fence (model);
    168 
    169   DONE;
    170 })
    171 
    172 (define_expand "atomic_compare_and_swap<mode>"
    173   [(match_operand:SI 0 "s_register_operand")		;; bool out
    174    (match_operand:QHSD 1 "s_register_operand")		;; val out
    175    (match_operand:QHSD 2 "mem_noofs_operand")		;; memory
    176    (match_operand:QHSD 3 "general_operand")		;; expected
    177    (match_operand:QHSD 4 "s_register_operand")		;; desired
    178    (match_operand:SI 5 "const_int_operand")		;; is_weak
    179    (match_operand:SI 6 "const_int_operand")		;; mod_s
    180    (match_operand:SI 7 "const_int_operand")]		;; mod_f
    181   "<sync_predtab>"
    182 {
    183   arm_expand_compare_and_swap (operands);
    184   DONE;
    185 })
    186 
    187 ;; Constraints of this pattern must be at least as strict as those of the
    188 ;; cbranchsi operations in thumb1.md and aim to be as permissive.
    189 (define_insn_and_split "@atomic_compare_and_swap<CCSI:arch><NARROW:mode>_1"
    190   [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l")	;; bool out
    191 	(unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
    192    (set (match_operand:SI 1 "s_register_operand" "=&r,&l,&l*h")	;; val out
    193 	(zero_extend:SI
    194 	  (match_operand:NARROW 2 "mem_noofs_operand" "+Ua,Ua,Ua")))	;; memory
    195    (set (match_dup 2)
    196 	(unspec_volatile:NARROW
    197 	  [(match_operand:SI 3 "arm_add_operand" "rIL,lILJ*h,*r")	;; expected
    198 	   (match_operand:NARROW 4 "s_register_operand" "r,r,r")	;; desired
    199 	   (match_operand:SI 5 "const_int_operand")		;; is_weak
    200 	   (match_operand:SI 6 "const_int_operand")		;; mod_s
    201 	   (match_operand:SI 7 "const_int_operand")]		;; mod_f
    202 	  VUNSPEC_ATOMIC_CAS))
    203    (clobber (match_scratch:SI 8 "=&r,X,X"))]
    204   "<NARROW:sync_predtab>"
    205   "#"
    206   "&& reload_completed"
    207   [(const_int 0)]
    208   {
    209     arm_split_compare_and_swap (operands);
    210     DONE;
    211   }
    212   [(set_attr "arch" "32,v8mb,v8mb")])
    213 
    214 (define_mode_attr cas_cmp_operand
    215   [(SI "arm_add_operand") (DI "cmpdi_operand")])
    216 (define_mode_attr cas_cmp_str
    217   [(SI "rIL") (DI "rDi")])
    218 
    219 ;; Constraints of this pattern must be at least as strict as those of the
    220 ;; cbranchsi operations in thumb1.md and aim to be as permissive.
    221 (define_insn_and_split "@atomic_compare_and_swap<CCSI:arch><SIDI:mode>_1"
    222   [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l")	;; bool out
    223 	(unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
    224    (set (match_operand:SIDI 1 "s_register_operand" "=&r,&l,&l*h")	;; val out
    225 	(match_operand:SIDI 2 "mem_noofs_operand" "+Ua,Ua,Ua"))	;; memory
    226    (set (match_dup 2)
    227 	(unspec_volatile:SIDI
    228 	  [(match_operand:SIDI 3 "<SIDI:cas_cmp_operand>" "<SIDI:cas_cmp_str>,lILJ*h,*r") ;; expect
    229 	   (match_operand:SIDI 4 "s_register_operand" "r,r,r")	;; desired
    230 	   (match_operand:SI 5 "const_int_operand")		;; is_weak
    231 	   (match_operand:SI 6 "const_int_operand")		;; mod_s
    232 	   (match_operand:SI 7 "const_int_operand")]		;; mod_f
    233 	  VUNSPEC_ATOMIC_CAS))
    234    (clobber (match_scratch:SI 8 "=&r,X,X"))]
    235   "<SIDI:sync_predtab>"
    236   "#"
    237   "&& reload_completed"
    238   [(const_int 0)]
    239   {
    240     arm_split_compare_and_swap (operands);
    241     DONE;
    242   }
    243   [(set_attr "arch" "32,v8mb,v8mb")])
    244 
    245 (define_insn_and_split "atomic_exchange<mode>"
    246   [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")	;; output
    247 	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))	;; memory
    248    (set (match_dup 1)
    249 	(unspec_volatile:QHSD
    250 	  [(match_operand:QHSD 2 "s_register_operand" "r,r")	;; input
    251 	   (match_operand:SI 3 "const_int_operand" "")]		;; model
    252 	  VUNSPEC_ATOMIC_XCHG))
    253    (clobber (reg:CC CC_REGNUM))
    254    (clobber (match_scratch:SI 4 "=&r,&l"))]
    255   "<sync_predtab>"
    256   "#"
    257   "&& reload_completed"
    258   [(const_int 0)]
    259   {
    260     arm_split_atomic_op (SET, operands[0], NULL, operands[1],
    261 			 operands[2], operands[3], operands[4]);
    262     DONE;
    263   }
    264   [(set_attr "arch" "32,v8mb")])
    265 
    266 ;; The following mode and code attribute are defined here because they are
    267 ;; specific to atomics and are not needed anywhere else.
    268 
    269 (define_mode_attr atomic_op_operand
    270   [(QI "reg_or_int_operand")
    271    (HI "reg_or_int_operand")
    272    (SI "reg_or_int_operand")
    273    (DI "s_register_operand")])
    274 
    275 (define_mode_attr atomic_op_str
    276   [(QI "rn") (HI "rn") (SI "rn") (DI "r")])
    277 
    278 (define_code_attr thumb1_atomic_op_str
    279   [(ior "l,l") (xor "l,l") (and "l,l") (plus "lIJL,r") (minus "lPd,lPd")])
    280 
    281 (define_code_attr thumb1_atomic_newop_str
    282   [(ior "&l,&l") (xor "&l,&l") (and "&l,&l") (plus "&l,&r") (minus "&l,&l")])
    283 
    284 ;; Constraints of this pattern must be at least as strict as those of the non
    285 ;; atomic operations in thumb1.md and aim to be as permissive.
    286 (define_insn_and_split "atomic_<sync_optab><mode>"
    287   [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua,Ua")
    288 	(unspec_volatile:QHSD
    289 	  [(syncop:QHSD (match_dup 0)
    290 	     (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>"))
    291 	   (match_operand:SI 2 "const_int_operand")]		;; model
    292 	  VUNSPEC_ATOMIC_OP))
    293    (clobber (reg:CC CC_REGNUM))
    294    (clobber (match_scratch:QHSD 3 "=&r,<thumb1_atomic_newop_str>"))
    295    (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
    296   "<sync_predtab>"
    297   "#"
    298   "&& reload_completed"
    299   [(const_int 0)]
    300   {
    301     arm_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
    302 			 operands[1], operands[2], operands[4]);
    303     DONE;
    304   }
    305   [(set_attr "arch" "32,v8mb,v8mb")])
    306 
    307 ;; Constraints of this pattern must be at least as strict as those of the non
    308 ;; atomic NANDs in thumb1.md and aim to be as permissive.
    309 (define_insn_and_split "atomic_nand<mode>"
    310   [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua")
    311 	(unspec_volatile:QHSD
    312 	  [(not:QHSD
    313 	     (and:QHSD (match_dup 0)
    314 	       (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,l")))
    315 	   (match_operand:SI 2 "const_int_operand")]		;; model
    316 	  VUNSPEC_ATOMIC_OP))
    317    (clobber (reg:CC CC_REGNUM))
    318    (clobber (match_scratch:QHSD 3 "=&r,&l"))
    319    (clobber (match_scratch:SI 4 "=&r,&l"))]
    320   "<sync_predtab>"
    321   "#"
    322   "&& reload_completed"
    323   [(const_int 0)]
    324   {
    325     arm_split_atomic_op (NOT, NULL, operands[3], operands[0],
    326 			 operands[1], operands[2], operands[4]);
    327     DONE;
    328   }
    329   [(set_attr "arch" "32,v8mb")])
    330 
    331 ;; 3 alternatives are needed to represent constraints after split from
    332 ;; thumb1_addsi3: (i) case where operand1 and destination can be in different
    333 ;; registers, (ii) case where they are in the same low register and (iii) case
    334 ;; when they are in the same register without restriction on the register.  We
    335 ;; disparage slightly alternatives that require copying the old value into the
    336 ;; register for the new value (see bind_old_new in arm_split_atomic_op).
    337 (define_code_attr thumb1_atomic_fetch_op_str
    338   [(ior "l,l,l") (xor "l,l,l") (and "l,l,l") (plus "lL,?IJ,?r") (minus "lPd,lPd,lPd")])
    339 
    340 (define_code_attr thumb1_atomic_fetch_newop_str
    341   [(ior "&l,&l,&l") (xor "&l,&l,&l") (and "&l,&l,&l") (plus "&l,&l,&r") (minus "&l,&l,&l")])
    342 
    343 (define_code_attr thumb1_atomic_fetch_oldop_str
    344   [(ior "&r,&r,&r") (xor "&r,&r,&r") (and "&r,&r,&r") (plus "&l,&r,&r") (minus "&l,&l,&l")])
    345 
    346 ;; Constraints of this pattern must be at least as strict as those of the non
    347 ;; atomic operations in thumb1.md and aim to be as permissive.
    348 (define_insn_and_split "atomic_fetch_<sync_optab><mode>"
    349   [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_fetch_oldop_str>")
    350 	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))
    351    (set (match_dup 1)
    352 	(unspec_volatile:QHSD
    353 	  [(syncop:QHSD (match_dup 1)
    354 	     (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_fetch_op_str>"))
    355 	   (match_operand:SI 3 "const_int_operand")]		;; model
    356 	  VUNSPEC_ATOMIC_OP))
    357    (clobber (reg:CC CC_REGNUM))
    358    (clobber (match_scratch:QHSD 4 "=&r,<thumb1_atomic_fetch_newop_str>"))
    359    (clobber (match_scratch:SI 5 "=&r,&l,&l,&l"))]
    360   "<sync_predtab>"
    361   "#"
    362   "&& reload_completed"
    363   [(const_int 0)]
    364   {
    365     arm_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
    366 			 operands[2], operands[3], operands[5]);
    367     DONE;
    368   }
    369   [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
    370 
    371 ;; Constraints of this pattern must be at least as strict as those of the non
    372 ;; atomic NANDs in thumb1.md and aim to be as permissive.
    373 (define_insn_and_split "atomic_fetch_nand<mode>"
    374   [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")
    375 	(match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))
    376    (set (match_dup 1)
    377 	(unspec_volatile:QHSD
    378 	  [(not:QHSD
    379 	     (and:QHSD (match_dup 1)
    380 	       (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l")))
    381 	   (match_operand:SI 3 "const_int_operand")]		;; model
    382 	  VUNSPEC_ATOMIC_OP))
    383    (clobber (reg:CC CC_REGNUM))
    384    (clobber (match_scratch:QHSD 4 "=&r,&l"))
    385    (clobber (match_scratch:SI 5 "=&r,&l"))]
    386   "<sync_predtab>"
    387   "#"
    388   "&& reload_completed"
    389   [(const_int 0)]
    390   {
    391     arm_split_atomic_op (NOT, operands[0], operands[4], operands[1],
    392 			 operands[2], operands[3], operands[5]);
    393     DONE;
    394   }
    395   [(set_attr "arch" "32,v8mb")])
    396 
    397 ;; Constraints of this pattern must be at least as strict as those of the non
    398 ;; atomic operations in thumb1.md and aim to be as permissive.
    399 (define_insn_and_split "atomic_<sync_optab>_fetch<mode>"
    400   [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_newop_str>")
    401 	(syncop:QHSD
    402 	  (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua")
    403 	  (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>")))
    404    (set (match_dup 1)
    405 	(unspec_volatile:QHSD
    406 	  [(match_dup 1) (match_dup 2)
    407 	   (match_operand:SI 3 "const_int_operand")]		;; model
    408 	  VUNSPEC_ATOMIC_OP))
    409    (clobber (reg:CC CC_REGNUM))
    410    (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
    411   "<sync_predtab>"
    412   "#"
    413   "&& reload_completed"
    414   [(const_int 0)]
    415   {
    416     arm_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
    417 			 operands[2], operands[3], operands[4]);
    418     DONE;
    419   }
    420   [(set_attr "arch" "32,v8mb,v8mb")])
    421 
    422 ;; Constraints of this pattern must be at least as strict as those of the non
    423 ;; atomic NANDs in thumb1.md and aim to be as permissive.
    424 (define_insn_and_split "atomic_nand_fetch<mode>"
    425   [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&l")
    426 	(not:QHSD
    427 	  (and:QHSD
    428 	    (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")
    429 	    (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l"))))
    430    (set (match_dup 1)
    431 	(unspec_volatile:QHSD
    432 	  [(match_dup 1) (match_dup 2)
    433 	   (match_operand:SI 3 "const_int_operand")]		;; model
    434 	  VUNSPEC_ATOMIC_OP))
    435    (clobber (reg:CC CC_REGNUM))
    436    (clobber (match_scratch:SI 4 "=&r,&l"))]
    437   "<sync_predtab>"
    438   "#"
    439   "&& reload_completed"
    440   [(const_int 0)]
    441   {
    442     arm_split_atomic_op (NOT, NULL, operands[0], operands[1],
    443 			 operands[2], operands[3], operands[4]);
    444     DONE;
    445   }
    446   [(set_attr "arch" "32,v8mb")])
    447 
    448 (define_insn "arm_load_exclusive<mode>"
    449   [(set (match_operand:SI 0 "s_register_operand" "=r,r")
    450         (zero_extend:SI
    451 	  (unspec_volatile:NARROW
    452 	    [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
    453 	    VUNSPEC_LL)))]
    454   "TARGET_HAVE_LDREXBH"
    455   "@
    456    ldrex<sync_sfx>%?\t%0, %C1
    457    ldrex<sync_sfx>\t%0, %C1"
    458   [(set_attr "arch" "32,v8mb")
    459    (set_attr "predicable" "yes")])
    460 
    461 (define_insn "arm_load_acquire_exclusive<mode>"
    462   [(set (match_operand:SI 0 "s_register_operand" "=r,r")
    463         (zero_extend:SI
    464 	  (unspec_volatile:NARROW
    465 	    [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
    466 	    VUNSPEC_LAX)))]
    467   "TARGET_HAVE_LDACQ"
    468   "@
    469    ldaex<sync_sfx>%?\\t%0, %C1
    470    ldaex<sync_sfx>\\t%0, %C1"
    471   [(set_attr "arch" "32,v8mb")
    472    (set_attr "predicable" "yes")])
    473 
    474 (define_insn "arm_load_exclusivesi"
    475   [(set (match_operand:SI 0 "s_register_operand" "=r,r")
    476 	(unspec_volatile:SI
    477 	  [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
    478 	  VUNSPEC_LL))]
    479   "TARGET_HAVE_LDREX"
    480   "@
    481    ldrex%?\t%0, %C1
    482    ldrex\t%0, %C1"
    483   [(set_attr "arch" "32,v8mb")
    484    (set_attr "predicable" "yes")])
    485 
    486 (define_insn "arm_load_acquire_exclusivesi"
    487   [(set (match_operand:SI 0 "s_register_operand" "=r,r")
    488 	(unspec_volatile:SI
    489 	  [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
    490 	  VUNSPEC_LAX))]
    491   "TARGET_HAVE_LDACQ"
    492   "@
    493    ldaex%?\t%0, %C1
    494    ldaex\t%0, %C1"
    495   [(set_attr "arch" "32,v8mb")
    496    (set_attr "predicable" "yes")])
    497 
    498 (define_insn "arm_load_exclusivedi"
    499   [(set (match_operand:DI 0 "s_register_operand" "=r")
    500 	(unspec_volatile:DI
    501 	  [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
    502 	  VUNSPEC_LL))]
    503   "TARGET_HAVE_LDREXD"
    504   "ldrexd%?\t%0, %H0, %C1"
    505   [(set_attr "predicable" "yes")])
    506 
    507 (define_insn "arm_load_acquire_exclusivedi"
    508   [(set (match_operand:DI 0 "s_register_operand" "=r")
    509 	(unspec_volatile:DI
    510 	  [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
    511 	  VUNSPEC_LAX))]
    512   "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
    513   "ldaexd%?\t%0, %H0, %C1"
    514   [(set_attr "predicable" "yes")])
    515 
    516 (define_insn "arm_store_exclusive<mode>"
    517   [(set (match_operand:SI 0 "s_register_operand" "=&r")
    518 	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SC))
    519    (set (match_operand:QHSD 1 "mem_noofs_operand" "=Ua")
    520 	(unspec_volatile:QHSD
    521 	  [(match_operand:QHSD 2 "s_register_operand" "r")]
    522 	  VUNSPEC_SC))]
    523   "<sync_predtab>"
    524   {
    525     if (<MODE>mode == DImode)
    526       {
    527 	/* The restrictions on target registers in ARM mode are that the two
    528 	   registers are consecutive and the first one is even; Thumb is
    529 	   actually more flexible, but DI should give us this anyway.
    530 	   Note that the 1st register always gets the
    531 	   lowest word in memory.  */
    532 	gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
    533 	return "strexd%?\t%0, %2, %H2, %C1";
    534       }
    535     if (TARGET_THUMB1)
    536       return "strex<sync_sfx>\t%0, %2, %C1";
    537     else
    538       return "strex<sync_sfx>%?\t%0, %2, %C1";
    539   }
    540   [(set_attr "predicable" "yes")])
    541 
    542 (define_insn "arm_store_release_exclusivedi"
    543   [(set (match_operand:SI 0 "s_register_operand" "=&r")
    544 	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
    545    (set (match_operand:DI 1 "mem_noofs_operand" "=Ua")
    546 	(unspec_volatile:DI
    547 	  [(match_operand:DI 2 "s_register_operand" "r")]
    548 	  VUNSPEC_SLX))]
    549   "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
    550   {
    551     /* See comment in arm_store_exclusive<mode> above.  */
    552     gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
    553     return "stlexd%?\t%0, %2, %H2, %C1";
    554   }
    555   [(set_attr "predicable" "yes")])
    556 
    557 (define_insn "arm_store_release_exclusive<mode>"
    558   [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
    559 	(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
    560    (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua,Ua")
    561 	(unspec_volatile:QHSI
    562 	  [(match_operand:QHSI 2 "s_register_operand" "r,r")]
    563 	  VUNSPEC_SLX))]
    564   "TARGET_HAVE_LDACQ"
    565   "@
    566    stlex<sync_sfx>%?\t%0, %2, %C1
    567    stlex<sync_sfx>\t%0, %2, %C1"
    568   [(set_attr "arch" "32,v8mb")
    569    (set_attr "predicable" "yes")])
    570