atomic revision 1.1 1 // -*- C++ -*- header.
2
3 // Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file atomic
26 * This is a Standard C++ Library header.
27 */
28
29 // Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
30 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
31
32 #ifndef _GLIBCXX_ATOMIC
33 #define _GLIBCXX_ATOMIC 1
34
35 #pragma GCC system_header
36
37 #ifndef __GXX_EXPERIMENTAL_CXX0X__
38 # include <bits/c++0x_warning.h>
39 #endif
40
41 #include <bits/atomic_base.h>
42 #include <cstddef>
43
44 _GLIBCXX_BEGIN_NAMESPACE(std)
45
46 /**
47 * @addtogroup atomics
48 * @{
49 */
50
51 /// kill_dependency
52 template<typename _Tp>
53 inline _Tp
54 kill_dependency(_Tp __y)
55 {
56 _Tp ret(__y);
57 return ret;
58 }
59
60 inline memory_order
61 __calculate_memory_order(memory_order __m)
62 {
63 const bool __cond1 = __m == memory_order_release;
64 const bool __cond2 = __m == memory_order_acq_rel;
65 memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
66 memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
67 return __mo2;
68 }
69
70 //
71 // Three nested namespaces for atomic implementation details.
72 //
73 // The nested namespace inlined into std:: is determined by the value
74 // of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
75 // ATOMIC_*_LOCK_FREE macros. See file atomic_base.h.
76 //
77 // 0 == __atomic0 == Never lock-free
78 // 1 == __atomic1 == Best available, sometimes lock-free
79 // 2 == __atomic2 == Always lock-free
80 #include <bits/atomic_0.h>
81 #include <bits/atomic_2.h>
82
83 /// atomic
84 /// 29.4.3, Generic atomic type, primary class template.
85 template<typename _Tp>
86 struct atomic
87 {
88 private:
89 _Tp _M_i;
90
91 public:
92 atomic() = default;
93 ~atomic() = default;
94 atomic(const atomic&) = delete;
95 atomic& operator=(const atomic&) volatile = delete;
96
97 atomic(_Tp __i) : _M_i(__i) { }
98
99 operator _Tp() const;
100
101 _Tp
102 operator=(_Tp __i) { store(__i); return __i; }
103
104 bool
105 is_lock_free() const volatile;
106
107 void
108 store(_Tp, memory_order = memory_order_seq_cst) volatile;
109
110 _Tp
111 load(memory_order = memory_order_seq_cst) const volatile;
112
113 _Tp
114 exchange(_Tp __i, memory_order = memory_order_seq_cst) volatile;
115
116 bool
117 compare_exchange_weak(_Tp&, _Tp, memory_order, memory_order) volatile;
118
119 bool
120 compare_exchange_strong(_Tp&, _Tp, memory_order, memory_order) volatile;
121
122 bool
123 compare_exchange_weak(_Tp&, _Tp,
124 memory_order = memory_order_seq_cst) volatile;
125
126 bool
127 compare_exchange_strong(_Tp&, _Tp,
128 memory_order = memory_order_seq_cst) volatile;
129 };
130
131
132 /// Partial specialization for pointer types.
133 template<typename _Tp>
134 struct atomic<_Tp*> : atomic_address
135 {
136 atomic() = default;
137 ~atomic() = default;
138 atomic(const atomic&) = delete;
139 atomic& operator=(const atomic&) volatile = delete;
140
141 atomic(_Tp* __v) : atomic_address(__v) { }
142
143 void
144 store(_Tp* __v, memory_order __m = memory_order_seq_cst)
145 { atomic_address::store(__v, __m); }
146
147 _Tp*
148 load(memory_order __m = memory_order_seq_cst) const
149 { return static_cast<_Tp*>(atomic_address::load(__m)); }
150
151 _Tp*
152 exchange(_Tp* __v, memory_order __m = memory_order_seq_cst)
153 { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }
154
155 bool
156 compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order);
157
158 bool
159 compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order);
160
161 bool
162 compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
163
164 bool
165 compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
166
167 _Tp*
168 fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst);
169
170 _Tp*
171 fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst);
172
173 operator _Tp*() const
174 { return load(); }
175
176 _Tp*
177 operator=(_Tp* __v)
178 {
179 store(__v);
180 return __v;
181 }
182
183 _Tp*
184 operator++(int) { return fetch_add(1); }
185
186 _Tp*
187 operator--(int) { return fetch_sub(1); }
188
189 _Tp*
190 operator++() { return fetch_add(1) + 1; }
191
192 _Tp*
193 operator--() { return fetch_sub(1) - 1; }
194
195 _Tp*
196 operator+=(ptrdiff_t __d)
197 { return fetch_add(__d) + __d; }
198
199 _Tp*
200 operator-=(ptrdiff_t __d)
201 { return fetch_sub(__d) - __d; }
202 };
203
204
205 /// Explicit specialization for void*
206 template<>
207 struct atomic<void*> : public atomic_address
208 {
209 typedef void* __integral_type;
210 typedef atomic_address __base_type;
211
212 atomic() = default;
213 ~atomic() = default;
214 atomic(const atomic&) = delete;
215 atomic& operator=(const atomic&) volatile = delete;
216
217 atomic(__integral_type __i) : __base_type(__i) { }
218
219 using __base_type::operator __integral_type;
220 using __base_type::operator=;
221 };
222
223 /// Explicit specialization for bool.
224 template<>
225 struct atomic<bool> : public atomic_bool
226 {
227 typedef bool __integral_type;
228 typedef atomic_bool __base_type;
229
230 atomic() = default;
231 ~atomic() = default;
232 atomic(const atomic&) = delete;
233 atomic& operator=(const atomic&) volatile = delete;
234
235 atomic(__integral_type __i) : __base_type(__i) { }
236
237 using __base_type::operator __integral_type;
238 using __base_type::operator=;
239 };
240
241 /// Explicit specialization for char.
242 template<>
243 struct atomic<char> : public atomic_char
244 {
245 typedef char __integral_type;
246 typedef atomic_char __base_type;
247
248 atomic() = default;
249 ~atomic() = default;
250 atomic(const atomic&) = delete;
251 atomic& operator=(const atomic&) volatile = delete;
252
253 atomic(__integral_type __i) : __base_type(__i) { }
254
255 using __base_type::operator __integral_type;
256 using __base_type::operator=;
257 };
258
259 /// Explicit specialization for signed char.
260 template<>
261 struct atomic<signed char> : public atomic_schar
262 {
263 typedef signed char __integral_type;
264 typedef atomic_schar __base_type;
265
266 atomic() = default;
267 ~atomic() = default;
268 atomic(const atomic&) = delete;
269 atomic& operator=(const atomic&) volatile = delete;
270
271 atomic(__integral_type __i) : __base_type(__i) { }
272
273 using __base_type::operator __integral_type;
274 using __base_type::operator=;
275 };
276
277 /// Explicit specialization for unsigned char.
278 template<>
279 struct atomic<unsigned char> : public atomic_uchar
280 {
281 typedef unsigned char __integral_type;
282 typedef atomic_uchar __base_type;
283
284 atomic() = default;
285 ~atomic() = default;
286 atomic(const atomic&) = delete;
287 atomic& operator=(const atomic&) volatile = delete;
288
289 atomic(__integral_type __i) : __base_type(__i) { }
290
291 using __base_type::operator __integral_type;
292 using __base_type::operator=;
293 };
294
295 /// Explicit specialization for short.
296 template<>
297 struct atomic<short> : public atomic_short
298 {
299 typedef short __integral_type;
300 typedef atomic_short __base_type;
301
302 atomic() = default;
303 ~atomic() = default;
304 atomic(const atomic&) = delete;
305 atomic& operator=(const atomic&) volatile = delete;
306
307 atomic(__integral_type __i) : __base_type(__i) { }
308
309 using __base_type::operator __integral_type;
310 using __base_type::operator=;
311 };
312
313 /// Explicit specialization for unsigned short.
314 template<>
315 struct atomic<unsigned short> : public atomic_ushort
316 {
317 typedef unsigned short __integral_type;
318 typedef atomic_ushort __base_type;
319
320 atomic() = default;
321 ~atomic() = default;
322 atomic(const atomic&) = delete;
323 atomic& operator=(const atomic&) volatile = delete;
324
325 atomic(__integral_type __i) : __base_type(__i) { }
326
327 using __base_type::operator __integral_type;
328 using __base_type::operator=;
329 };
330
331 /// Explicit specialization for int.
332 template<>
333 struct atomic<int> : atomic_int
334 {
335 typedef int __integral_type;
336 typedef atomic_int __base_type;
337
338 atomic() = default;
339 ~atomic() = default;
340 atomic(const atomic&) = delete;
341 atomic& operator=(const atomic&) volatile = delete;
342
343 atomic(__integral_type __i) : __base_type(__i) { }
344
345 using __base_type::operator __integral_type;
346 using __base_type::operator=;
347 };
348
349 /// Explicit specialization for unsigned int.
350 template<>
351 struct atomic<unsigned int> : public atomic_uint
352 {
353 typedef unsigned int __integral_type;
354 typedef atomic_uint __base_type;
355
356 atomic() = default;
357 ~atomic() = default;
358 atomic(const atomic&) = delete;
359 atomic& operator=(const atomic&) volatile = delete;
360
361 atomic(__integral_type __i) : __base_type(__i) { }
362
363 using __base_type::operator __integral_type;
364 using __base_type::operator=;
365 };
366
367 /// Explicit specialization for long.
368 template<>
369 struct atomic<long> : public atomic_long
370 {
371 typedef long __integral_type;
372 typedef atomic_long __base_type;
373
374 atomic() = default;
375 ~atomic() = default;
376 atomic(const atomic&) = delete;
377 atomic& operator=(const atomic&) volatile = delete;
378
379 atomic(__integral_type __i) : __base_type(__i) { }
380
381 using __base_type::operator __integral_type;
382 using __base_type::operator=;
383 };
384
385 /// Explicit specialization for unsigned long.
386 template<>
387 struct atomic<unsigned long> : public atomic_ulong
388 {
389 typedef unsigned long __integral_type;
390 typedef atomic_ulong __base_type;
391
392 atomic() = default;
393 ~atomic() = default;
394 atomic(const atomic&) = delete;
395 atomic& operator=(const atomic&) volatile = delete;
396
397 atomic(__integral_type __i) : __base_type(__i) { }
398
399 using __base_type::operator __integral_type;
400 using __base_type::operator=;
401 };
402
403 /// Explicit specialization for long long.
404 template<>
405 struct atomic<long long> : public atomic_llong
406 {
407 typedef long long __integral_type;
408 typedef atomic_llong __base_type;
409
410 atomic() = default;
411 ~atomic() = default;
412 atomic(const atomic&) = delete;
413 atomic& operator=(const atomic&) volatile = delete;
414
415 atomic(__integral_type __i) : __base_type(__i) { }
416
417 using __base_type::operator __integral_type;
418 using __base_type::operator=;
419 };
420
421 /// Explicit specialization for unsigned long long.
422 template<>
423 struct atomic<unsigned long long> : public atomic_ullong
424 {
425 typedef unsigned long long __integral_type;
426 typedef atomic_ullong __base_type;
427
428 atomic() = default;
429 ~atomic() = default;
430 atomic(const atomic&) = delete;
431 atomic& operator=(const atomic&) volatile = delete;
432
433 atomic(__integral_type __i) : __base_type(__i) { }
434
435 using __base_type::operator __integral_type;
436 using __base_type::operator=;
437 };
438
439 /// Explicit specialization for wchar_t.
440 template<>
441 struct atomic<wchar_t> : public atomic_wchar_t
442 {
443 typedef wchar_t __integral_type;
444 typedef atomic_wchar_t __base_type;
445
446 atomic() = default;
447 ~atomic() = default;
448 atomic(const atomic&) = delete;
449 atomic& operator=(const atomic&) volatile = delete;
450
451 atomic(__integral_type __i) : __base_type(__i) { }
452
453 using __base_type::operator __integral_type;
454 using __base_type::operator=;
455 };
456
457 /// Explicit specialization for char16_t.
458 template<>
459 struct atomic<char16_t> : public atomic_char16_t
460 {
461 typedef char16_t __integral_type;
462 typedef atomic_char16_t __base_type;
463
464 atomic() = default;
465 ~atomic() = default;
466 atomic(const atomic&) = delete;
467 atomic& operator=(const atomic&) volatile = delete;
468
469 atomic(__integral_type __i) : __base_type(__i) { }
470
471 using __base_type::operator __integral_type;
472 using __base_type::operator=;
473 };
474
475 /// Explicit specialization for char32_t.
476 template<>
477 struct atomic<char32_t> : public atomic_char32_t
478 {
479 typedef char32_t __integral_type;
480 typedef atomic_char32_t __base_type;
481
482 atomic() = default;
483 ~atomic() = default;
484 atomic(const atomic&) = delete;
485 atomic& operator=(const atomic&) volatile = delete;
486
487 atomic(__integral_type __i) : __base_type(__i) { }
488
489 using __base_type::operator __integral_type;
490 using __base_type::operator=;
491 };
492
493 template<typename _Tp>
494 bool
495 atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,
496 memory_order __m2)
497 {
498 void** __vr = reinterpret_cast<void**>(&__r);
499 void* __vv = static_cast<void*>(__v);
500 return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);
501 }
502
503 template<typename _Tp>
504 bool
505 atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
506 memory_order __m1,
507 memory_order __m2)
508 {
509 void** __vr = reinterpret_cast<void**>(&__r);
510 void* __vv = static_cast<void*>(__v);
511 return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);
512 }
513
514 template<typename _Tp>
515 bool
516 atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v,
517 memory_order __m)
518 {
519 return compare_exchange_weak(__r, __v, __m,
520 __calculate_memory_order(__m));
521 }
522
523 template<typename _Tp>
524 bool
525 atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
526 memory_order __m)
527 {
528 return compare_exchange_strong(__r, __v, __m,
529 __calculate_memory_order(__m));
530 }
531
532 template<typename _Tp>
533 _Tp*
534 atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m)
535 {
536 void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);
537 return static_cast<_Tp*>(__p);
538 }
539
540 template<typename _Tp>
541 _Tp*
542 atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m)
543 {
544 void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);
545 return static_cast<_Tp*>(__p);
546 }
547
548 // Convenience function definitions, atomic_flag.
549 inline bool
550 atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m)
551 { return __a->test_and_set(__m); }
552
553 inline void
554 atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m)
555 { return __a->clear(__m); }
556
557
558 // Convenience function definitions, atomic_address.
559 inline bool
560 atomic_is_lock_free(const atomic_address* __a)
561 { return __a->is_lock_free(); }
562
563 inline void
564 atomic_store(atomic_address* __a, void* __v)
565 { __a->store(__v); }
566
567 inline void
568 atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m)
569 { __a->store(__v, __m); }
570
571 inline void*
572 atomic_load(const atomic_address* __a)
573 { return __a->load(); }
574
575 inline void*
576 atomic_load_explicit(const atomic_address* __a, memory_order __m)
577 { return __a->load(__m); }
578
579 inline void*
580 atomic_exchange(atomic_address* __a, void* __v)
581 { return __a->exchange(__v); }
582
583 inline void*
584 atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m)
585 { return __a->exchange(__v, __m); }
586
587 inline bool
588 atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2)
589 {
590 return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,
591 memory_order_seq_cst);
592 }
593
594 inline bool
595 atomic_compare_exchange_strong(atomic_address* __a,
596 void** __v1, void* __v2)
597 {
598 return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,
599 memory_order_seq_cst);
600 }
601
602 inline bool
603 atomic_compare_exchange_weak_explicit(atomic_address* __a,
604 void** __v1, void* __v2,
605 memory_order __m1, memory_order __m2)
606 { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }
607
608 inline bool
609 atomic_compare_exchange_strong_explicit(atomic_address* __a,
610 void** __v1, void* __v2,
611 memory_order __m1, memory_order __m2)
612 { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }
613
614 inline void*
615 atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d,
616 memory_order __m)
617 { return __a->fetch_add(__d, __m); }
618
619 inline void*
620 atomic_fetch_add(atomic_address* __a, ptrdiff_t __d)
621 { return __a->fetch_add(__d); }
622
623 inline void*
624 atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d,
625 memory_order __m)
626 { return __a->fetch_sub(__d, __m); }
627
628 inline void*
629 atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d)
630 { return __a->fetch_sub(__d); }
631
632
633 // Convenience function definitions, atomic_bool.
634 inline bool
635 atomic_is_lock_free(const atomic_bool* __a)
636 { return __a->is_lock_free(); }
637
638 inline void
639 atomic_store(atomic_bool* __a, bool __i)
640 { __a->store(__i); }
641
642 inline void
643 atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m)
644 { __a->store(__i, __m); }
645
646 inline bool
647 atomic_load(const atomic_bool* __a)
648 { return __a->load(); }
649
650 inline bool
651 atomic_load_explicit(const atomic_bool* __a, memory_order __m)
652 { return __a->load(__m); }
653
654 inline bool
655 atomic_exchange(atomic_bool* __a, bool __i)
656 { return __a->exchange(__i); }
657
658 inline bool
659 atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m)
660 { return __a->exchange(__i, __m); }
661
662 inline bool
663 atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2)
664 {
665 return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,
666 memory_order_seq_cst);
667 }
668
669 inline bool
670 atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2)
671 {
672 return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,
673 memory_order_seq_cst);
674 }
675
676 inline bool
677 atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1,
678 bool __i2, memory_order __m1,
679 memory_order __m2)
680 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
681
682 inline bool
683 atomic_compare_exchange_strong_explicit(atomic_bool* __a,
684 bool* __i1, bool __i2,
685 memory_order __m1, memory_order __m2)
686 { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
687
688
689
690 // Free standing functions. Template argument should be constricted
691 // to intergral types as specified in the standard.
692 template<typename _ITp>
693 inline void
694 atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m)
695 { __a->store(__i, __m); }
696
697 template<typename _ITp>
698 inline _ITp
699 atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m)
700 { return __a->load(__m); }
701
702 template<typename _ITp>
703 inline _ITp
704 atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i,
705 memory_order __m)
706 { return __a->exchange(__i, __m); }
707
708 template<typename _ITp>
709 inline bool
710 atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a,
711 _ITp* __i1, _ITp __i2,
712 memory_order __m1, memory_order __m2)
713 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
714
715 template<typename _ITp>
716 inline bool
717 atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a,
718 _ITp* __i1, _ITp __i2,
719 memory_order __m1,
720 memory_order __m2)
721 { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
722
723 template<typename _ITp>
724 inline _ITp
725 atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
726 memory_order __m)
727 { return __a->fetch_add(__i, __m); }
728
729 template<typename _ITp>
730 inline _ITp
731 atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
732 memory_order __m)
733 { return __a->fetch_sub(__i, __m); }
734
735 template<typename _ITp>
736 inline _ITp
737 atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
738 memory_order __m)
739 { return __a->fetch_and(__i, __m); }
740
741 template<typename _ITp>
742 inline _ITp
743 atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
744 memory_order __m)
745 { return __a->fetch_or(__i, __m); }
746
747 template<typename _ITp>
748 inline _ITp
749 atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
750 memory_order __m)
751 { return __a->fetch_xor(__i, __m); }
752
753 template<typename _ITp>
754 inline bool
755 atomic_is_lock_free(const __atomic_base<_ITp>* __a)
756 { return __a->is_lock_free(); }
757
758 template<typename _ITp>
759 inline void
760 atomic_store(__atomic_base<_ITp>* __a, _ITp __i)
761 { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
762
763 template<typename _ITp>
764 inline _ITp
765 atomic_load(const __atomic_base<_ITp>* __a)
766 { return atomic_load_explicit(__a, memory_order_seq_cst); }
767
768 template<typename _ITp>
769 inline _ITp
770 atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i)
771 { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
772
773 template<typename _ITp>
774 inline bool
775 atomic_compare_exchange_weak(__atomic_base<_ITp>* __a,
776 _ITp* __i1, _ITp __i2)
777 {
778 return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
779 memory_order_seq_cst,
780 memory_order_seq_cst);
781 }
782
783 template<typename _ITp>
784 inline bool
785 atomic_compare_exchange_strong(__atomic_base<_ITp>* __a,
786 _ITp* __i1, _ITp __i2)
787 {
788 return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
789 memory_order_seq_cst,
790 memory_order_seq_cst);
791 }
792
793 template<typename _ITp>
794 inline _ITp
795 atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i)
796 { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
797
798 template<typename _ITp>
799 inline _ITp
800 atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i)
801 { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
802
803 template<typename _ITp>
804 inline _ITp
805 atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i)
806 { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
807
808 template<typename _ITp>
809 inline _ITp
810 atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i)
811 { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
812
813 template<typename _ITp>
814 inline _ITp
815 atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i)
816 { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
817
818 // @} group atomics
819
820 _GLIBCXX_END_NAMESPACE
821
822 #endif
823