Home | History | Annotate | Line # | Download | only in libgomp
team.c revision 1.1
      1  1.1  mrg /* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
      2  1.1  mrg    Contributed by Richard Henderson <rth (at) redhat.com>.
      3  1.1  mrg 
      4  1.1  mrg    This file is part of the GNU OpenMP Library (libgomp).
      5  1.1  mrg 
      6  1.1  mrg    Libgomp is free software; you can redistribute it and/or modify it
      7  1.1  mrg    under the terms of the GNU General Public License as published by
      8  1.1  mrg    the Free Software Foundation; either version 3, or (at your option)
      9  1.1  mrg    any later version.
     10  1.1  mrg 
     11  1.1  mrg    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
     12  1.1  mrg    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
     13  1.1  mrg    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     14  1.1  mrg    more details.
     15  1.1  mrg 
     16  1.1  mrg    Under Section 7 of GPL version 3, you are granted additional
     17  1.1  mrg    permissions described in the GCC Runtime Library Exception, version
     18  1.1  mrg    3.1, as published by the Free Software Foundation.
     19  1.1  mrg 
     20  1.1  mrg    You should have received a copy of the GNU General Public License and
     21  1.1  mrg    a copy of the GCC Runtime Library Exception along with this program;
     22  1.1  mrg    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23  1.1  mrg    <http://www.gnu.org/licenses/>.  */
     24  1.1  mrg 
     25  1.1  mrg /* This file handles the maintainence of threads in response to team
     26  1.1  mrg    creation and termination.  */
     27  1.1  mrg 
     28  1.1  mrg #include "libgomp.h"
     29  1.1  mrg #include <stdlib.h>
     30  1.1  mrg #include <string.h>
     31  1.1  mrg 
     32  1.1  mrg /* This attribute contains PTHREAD_CREATE_DETACHED.  */
     33  1.1  mrg pthread_attr_t gomp_thread_attr;
     34  1.1  mrg 
     35  1.1  mrg /* This key is for the thread destructor.  */
     36  1.1  mrg pthread_key_t gomp_thread_destructor;
     37  1.1  mrg 
     38  1.1  mrg 
     39  1.1  mrg /* This is the libgomp per-thread data structure.  */
     40  1.1  mrg #ifdef HAVE_TLS
     41  1.1  mrg __thread struct gomp_thread gomp_tls_data;
     42  1.1  mrg #else
     43  1.1  mrg pthread_key_t gomp_tls_key;
     44  1.1  mrg #endif
     45  1.1  mrg 
     46  1.1  mrg 
     47  1.1  mrg /* This structure is used to communicate across pthread_create.  */
     48  1.1  mrg 
     49  1.1  mrg struct gomp_thread_start_data
     50  1.1  mrg {
     51  1.1  mrg   void (*fn) (void *);
     52  1.1  mrg   void *fn_data;
     53  1.1  mrg   struct gomp_team_state ts;
     54  1.1  mrg   struct gomp_task *task;
     55  1.1  mrg   struct gomp_thread_pool *thread_pool;
     56  1.1  mrg   bool nested;
     57  1.1  mrg };
     58  1.1  mrg 
     59  1.1  mrg 
     60  1.1  mrg /* This function is a pthread_create entry point.  This contains the idle
     61  1.1  mrg    loop in which a thread waits to be called up to become part of a team.  */
     62  1.1  mrg 
     63  1.1  mrg static void *
     64  1.1  mrg gomp_thread_start (void *xdata)
     65  1.1  mrg {
     66  1.1  mrg   struct gomp_thread_start_data *data = xdata;
     67  1.1  mrg   struct gomp_thread *thr;
     68  1.1  mrg   struct gomp_thread_pool *pool;
     69  1.1  mrg   void (*local_fn) (void *);
     70  1.1  mrg   void *local_data;
     71  1.1  mrg 
     72  1.1  mrg #ifdef HAVE_TLS
     73  1.1  mrg   thr = &gomp_tls_data;
     74  1.1  mrg #else
     75  1.1  mrg   struct gomp_thread local_thr;
     76  1.1  mrg   thr = &local_thr;
     77  1.1  mrg   pthread_setspecific (gomp_tls_key, thr);
     78  1.1  mrg #endif
     79  1.1  mrg   gomp_sem_init (&thr->release, 0);
     80  1.1  mrg 
     81  1.1  mrg   /* Extract what we need from data.  */
     82  1.1  mrg   local_fn = data->fn;
     83  1.1  mrg   local_data = data->fn_data;
     84  1.1  mrg   thr->thread_pool = data->thread_pool;
     85  1.1  mrg   thr->ts = data->ts;
     86  1.1  mrg   thr->task = data->task;
     87  1.1  mrg 
     88  1.1  mrg   thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
     89  1.1  mrg 
     90  1.1  mrg   /* Make thread pool local. */
     91  1.1  mrg   pool = thr->thread_pool;
     92  1.1  mrg 
     93  1.1  mrg   if (data->nested)
     94  1.1  mrg     {
     95  1.1  mrg       struct gomp_team *team = thr->ts.team;
     96  1.1  mrg       struct gomp_task *task = thr->task;
     97  1.1  mrg 
     98  1.1  mrg       gomp_barrier_wait (&team->barrier);
     99  1.1  mrg 
    100  1.1  mrg       local_fn (local_data);
    101  1.1  mrg       gomp_team_barrier_wait (&team->barrier);
    102  1.1  mrg       gomp_finish_task (task);
    103  1.1  mrg       gomp_barrier_wait_last (&team->barrier);
    104  1.1  mrg     }
    105  1.1  mrg   else
    106  1.1  mrg     {
    107  1.1  mrg       pool->threads[thr->ts.team_id] = thr;
    108  1.1  mrg 
    109  1.1  mrg       gomp_barrier_wait (&pool->threads_dock);
    110  1.1  mrg       do
    111  1.1  mrg 	{
    112  1.1  mrg 	  struct gomp_team *team = thr->ts.team;
    113  1.1  mrg 	  struct gomp_task *task = thr->task;
    114  1.1  mrg 
    115  1.1  mrg 	  local_fn (local_data);
    116  1.1  mrg 	  gomp_team_barrier_wait (&team->barrier);
    117  1.1  mrg 	  gomp_finish_task (task);
    118  1.1  mrg 
    119  1.1  mrg 	  gomp_barrier_wait (&pool->threads_dock);
    120  1.1  mrg 
    121  1.1  mrg 	  local_fn = thr->fn;
    122  1.1  mrg 	  local_data = thr->data;
    123  1.1  mrg 	  thr->fn = NULL;
    124  1.1  mrg 	}
    125  1.1  mrg       while (local_fn);
    126  1.1  mrg     }
    127  1.1  mrg 
    128  1.1  mrg   gomp_sem_destroy (&thr->release);
    129  1.1  mrg   return NULL;
    130  1.1  mrg }
    131  1.1  mrg 
    132  1.1  mrg 
    133  1.1  mrg /* Create a new team data structure.  */
    134  1.1  mrg 
    135  1.1  mrg struct gomp_team *
    136  1.1  mrg gomp_new_team (unsigned nthreads)
    137  1.1  mrg {
    138  1.1  mrg   struct gomp_team *team;
    139  1.1  mrg   size_t size;
    140  1.1  mrg   int i;
    141  1.1  mrg 
    142  1.1  mrg   size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
    143  1.1  mrg 				      + sizeof (team->implicit_task[0]));
    144  1.1  mrg   team = gomp_malloc (size);
    145  1.1  mrg 
    146  1.1  mrg   team->work_share_chunk = 8;
    147  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    148  1.1  mrg   team->single_count = 0;
    149  1.1  mrg #else
    150  1.1  mrg   gomp_mutex_init (&team->work_share_list_free_lock);
    151  1.1  mrg #endif
    152  1.1  mrg   gomp_init_work_share (&team->work_shares[0], false, nthreads);
    153  1.1  mrg   team->work_shares[0].next_alloc = NULL;
    154  1.1  mrg   team->work_share_list_free = NULL;
    155  1.1  mrg   team->work_share_list_alloc = &team->work_shares[1];
    156  1.1  mrg   for (i = 1; i < 7; i++)
    157  1.1  mrg     team->work_shares[i].next_free = &team->work_shares[i + 1];
    158  1.1  mrg   team->work_shares[i].next_free = NULL;
    159  1.1  mrg 
    160  1.1  mrg   team->nthreads = nthreads;
    161  1.1  mrg   gomp_barrier_init (&team->barrier, nthreads);
    162  1.1  mrg 
    163  1.1  mrg   gomp_sem_init (&team->master_release, 0);
    164  1.1  mrg   team->ordered_release = (void *) &team->implicit_task[nthreads];
    165  1.1  mrg   team->ordered_release[0] = &team->master_release;
    166  1.1  mrg 
    167  1.1  mrg   gomp_mutex_init (&team->task_lock);
    168  1.1  mrg   team->task_queue = NULL;
    169  1.1  mrg   team->task_count = 0;
    170  1.1  mrg   team->task_running_count = 0;
    171  1.1  mrg 
    172  1.1  mrg   return team;
    173  1.1  mrg }
    174  1.1  mrg 
    175  1.1  mrg 
    176  1.1  mrg /* Free a team data structure.  */
    177  1.1  mrg 
    178  1.1  mrg static void
    179  1.1  mrg free_team (struct gomp_team *team)
    180  1.1  mrg {
    181  1.1  mrg   gomp_barrier_destroy (&team->barrier);
    182  1.1  mrg   gomp_mutex_destroy (&team->task_lock);
    183  1.1  mrg   free (team);
    184  1.1  mrg }
    185  1.1  mrg 
    186  1.1  mrg /* Allocate and initialize a thread pool. */
    187  1.1  mrg 
    188  1.1  mrg static struct gomp_thread_pool *gomp_new_thread_pool (void)
    189  1.1  mrg {
    190  1.1  mrg   struct gomp_thread_pool *pool
    191  1.1  mrg     = gomp_malloc (sizeof(struct gomp_thread_pool));
    192  1.1  mrg   pool->threads = NULL;
    193  1.1  mrg   pool->threads_size = 0;
    194  1.1  mrg   pool->threads_used = 0;
    195  1.1  mrg   pool->last_team = NULL;
    196  1.1  mrg   return pool;
    197  1.1  mrg }
    198  1.1  mrg 
    199  1.1  mrg static void
    200  1.1  mrg gomp_free_pool_helper (void *thread_pool)
    201  1.1  mrg {
    202  1.1  mrg   struct gomp_thread_pool *pool
    203  1.1  mrg     = (struct gomp_thread_pool *) thread_pool;
    204  1.1  mrg   gomp_barrier_wait_last (&pool->threads_dock);
    205  1.1  mrg   gomp_sem_destroy (&gomp_thread ()->release);
    206  1.1  mrg   pthread_exit (NULL);
    207  1.1  mrg }
    208  1.1  mrg 
    209  1.1  mrg /* Free a thread pool and release its threads. */
    210  1.1  mrg 
    211  1.1  mrg static void
    212  1.1  mrg gomp_free_thread (void *arg __attribute__((unused)))
    213  1.1  mrg {
    214  1.1  mrg   struct gomp_thread *thr = gomp_thread ();
    215  1.1  mrg   struct gomp_thread_pool *pool = thr->thread_pool;
    216  1.1  mrg   if (pool)
    217  1.1  mrg     {
    218  1.1  mrg       if (pool->threads_used > 0)
    219  1.1  mrg 	{
    220  1.1  mrg 	  int i;
    221  1.1  mrg 	  for (i = 1; i < pool->threads_used; i++)
    222  1.1  mrg 	    {
    223  1.1  mrg 	      struct gomp_thread *nthr = pool->threads[i];
    224  1.1  mrg 	      nthr->fn = gomp_free_pool_helper;
    225  1.1  mrg 	      nthr->data = pool;
    226  1.1  mrg 	    }
    227  1.1  mrg 	  /* This barrier undocks threads docked on pool->threads_dock.  */
    228  1.1  mrg 	  gomp_barrier_wait (&pool->threads_dock);
    229  1.1  mrg 	  /* And this waits till all threads have called gomp_barrier_wait_last
    230  1.1  mrg 	     in gomp_free_pool_helper.  */
    231  1.1  mrg 	  gomp_barrier_wait (&pool->threads_dock);
    232  1.1  mrg 	  /* Now it is safe to destroy the barrier and free the pool.  */
    233  1.1  mrg 	  gomp_barrier_destroy (&pool->threads_dock);
    234  1.1  mrg 	}
    235  1.1  mrg       free (pool->threads);
    236  1.1  mrg       if (pool->last_team)
    237  1.1  mrg 	free_team (pool->last_team);
    238  1.1  mrg       free (pool);
    239  1.1  mrg       thr->thread_pool = NULL;
    240  1.1  mrg     }
    241  1.1  mrg   if (thr->task != NULL)
    242  1.1  mrg     {
    243  1.1  mrg       struct gomp_task *task = thr->task;
    244  1.1  mrg       gomp_end_task ();
    245  1.1  mrg       free (task);
    246  1.1  mrg     }
    247  1.1  mrg }
    248  1.1  mrg 
    249  1.1  mrg /* Launch a team.  */
    250  1.1  mrg 
    251  1.1  mrg void
    252  1.1  mrg gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
    253  1.1  mrg 		 struct gomp_team *team)
    254  1.1  mrg {
    255  1.1  mrg   struct gomp_thread_start_data *start_data;
    256  1.1  mrg   struct gomp_thread *thr, *nthr;
    257  1.1  mrg   struct gomp_task *task;
    258  1.1  mrg   struct gomp_task_icv *icv;
    259  1.1  mrg   bool nested;
    260  1.1  mrg   struct gomp_thread_pool *pool;
    261  1.1  mrg   unsigned i, n, old_threads_used = 0;
    262  1.1  mrg   pthread_attr_t thread_attr, *attr;
    263  1.1  mrg 
    264  1.1  mrg   thr = gomp_thread ();
    265  1.1  mrg   nested = thr->ts.team != NULL;
    266  1.1  mrg   if (__builtin_expect (thr->thread_pool == NULL, 0))
    267  1.1  mrg     {
    268  1.1  mrg       thr->thread_pool = gomp_new_thread_pool ();
    269  1.1  mrg       pthread_setspecific (gomp_thread_destructor, thr);
    270  1.1  mrg     }
    271  1.1  mrg   pool = thr->thread_pool;
    272  1.1  mrg   task = thr->task;
    273  1.1  mrg   icv = task ? &task->icv : &gomp_global_icv;
    274  1.1  mrg 
    275  1.1  mrg   /* Always save the previous state, even if this isn't a nested team.
    276  1.1  mrg      In particular, we should save any work share state from an outer
    277  1.1  mrg      orphaned work share construct.  */
    278  1.1  mrg   team->prev_ts = thr->ts;
    279  1.1  mrg 
    280  1.1  mrg   thr->ts.team = team;
    281  1.1  mrg   thr->ts.team_id = 0;
    282  1.1  mrg   ++thr->ts.level;
    283  1.1  mrg   if (nthreads > 1)
    284  1.1  mrg     ++thr->ts.active_level;
    285  1.1  mrg   thr->ts.work_share = &team->work_shares[0];
    286  1.1  mrg   thr->ts.last_work_share = NULL;
    287  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    288  1.1  mrg   thr->ts.single_count = 0;
    289  1.1  mrg #endif
    290  1.1  mrg   thr->ts.static_trip = 0;
    291  1.1  mrg   thr->task = &team->implicit_task[0];
    292  1.1  mrg   gomp_init_task (thr->task, task, icv);
    293  1.1  mrg 
    294  1.1  mrg   if (nthreads == 1)
    295  1.1  mrg     return;
    296  1.1  mrg 
    297  1.1  mrg   i = 1;
    298  1.1  mrg 
    299  1.1  mrg   /* We only allow the reuse of idle threads for non-nested PARALLEL
    300  1.1  mrg      regions.  This appears to be implied by the semantics of
    301  1.1  mrg      threadprivate variables, but perhaps that's reading too much into
    302  1.1  mrg      things.  Certainly it does prevent any locking problems, since
    303  1.1  mrg      only the initial program thread will modify gomp_threads.  */
    304  1.1  mrg   if (!nested)
    305  1.1  mrg     {
    306  1.1  mrg       old_threads_used = pool->threads_used;
    307  1.1  mrg 
    308  1.1  mrg       if (nthreads <= old_threads_used)
    309  1.1  mrg 	n = nthreads;
    310  1.1  mrg       else if (old_threads_used == 0)
    311  1.1  mrg 	{
    312  1.1  mrg 	  n = 0;
    313  1.1  mrg 	  gomp_barrier_init (&pool->threads_dock, nthreads);
    314  1.1  mrg 	}
    315  1.1  mrg       else
    316  1.1  mrg 	{
    317  1.1  mrg 	  n = old_threads_used;
    318  1.1  mrg 
    319  1.1  mrg 	  /* Increase the barrier threshold to make sure all new
    320  1.1  mrg 	     threads arrive before the team is released.  */
    321  1.1  mrg 	  gomp_barrier_reinit (&pool->threads_dock, nthreads);
    322  1.1  mrg 	}
    323  1.1  mrg 
    324  1.1  mrg       /* Not true yet, but soon will be.  We're going to release all
    325  1.1  mrg 	 threads from the dock, and those that aren't part of the
    326  1.1  mrg 	 team will exit.  */
    327  1.1  mrg       pool->threads_used = nthreads;
    328  1.1  mrg 
    329  1.1  mrg       /* Release existing idle threads.  */
    330  1.1  mrg       for (; i < n; ++i)
    331  1.1  mrg 	{
    332  1.1  mrg 	  nthr = pool->threads[i];
    333  1.1  mrg 	  nthr->ts.team = team;
    334  1.1  mrg 	  nthr->ts.work_share = &team->work_shares[0];
    335  1.1  mrg 	  nthr->ts.last_work_share = NULL;
    336  1.1  mrg 	  nthr->ts.team_id = i;
    337  1.1  mrg 	  nthr->ts.level = team->prev_ts.level + 1;
    338  1.1  mrg 	  nthr->ts.active_level = thr->ts.active_level;
    339  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    340  1.1  mrg 	  nthr->ts.single_count = 0;
    341  1.1  mrg #endif
    342  1.1  mrg 	  nthr->ts.static_trip = 0;
    343  1.1  mrg 	  nthr->task = &team->implicit_task[i];
    344  1.1  mrg 	  gomp_init_task (nthr->task, task, icv);
    345  1.1  mrg 	  nthr->fn = fn;
    346  1.1  mrg 	  nthr->data = data;
    347  1.1  mrg 	  team->ordered_release[i] = &nthr->release;
    348  1.1  mrg 	}
    349  1.1  mrg 
    350  1.1  mrg       if (i == nthreads)
    351  1.1  mrg 	goto do_release;
    352  1.1  mrg 
    353  1.1  mrg       /* If necessary, expand the size of the gomp_threads array.  It is
    354  1.1  mrg 	 expected that changes in the number of threads are rare, thus we
    355  1.1  mrg 	 make no effort to expand gomp_threads_size geometrically.  */
    356  1.1  mrg       if (nthreads >= pool->threads_size)
    357  1.1  mrg 	{
    358  1.1  mrg 	  pool->threads_size = nthreads + 1;
    359  1.1  mrg 	  pool->threads
    360  1.1  mrg 	    = gomp_realloc (pool->threads,
    361  1.1  mrg 			    pool->threads_size
    362  1.1  mrg 			    * sizeof (struct gomp_thread_data *));
    363  1.1  mrg 	}
    364  1.1  mrg     }
    365  1.1  mrg 
    366  1.1  mrg   if (__builtin_expect (nthreads > old_threads_used, 0))
    367  1.1  mrg     {
    368  1.1  mrg       long diff = (long) nthreads - (long) old_threads_used;
    369  1.1  mrg 
    370  1.1  mrg       if (old_threads_used == 0)
    371  1.1  mrg 	--diff;
    372  1.1  mrg 
    373  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    374  1.1  mrg       __sync_fetch_and_add (&gomp_managed_threads, diff);
    375  1.1  mrg #else
    376  1.1  mrg       gomp_mutex_lock (&gomp_remaining_threads_lock);
    377  1.1  mrg       gomp_managed_threads += diff;
    378  1.1  mrg       gomp_mutex_unlock (&gomp_remaining_threads_lock);
    379  1.1  mrg #endif
    380  1.1  mrg     }
    381  1.1  mrg 
    382  1.1  mrg   attr = &gomp_thread_attr;
    383  1.1  mrg   if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
    384  1.1  mrg     {
    385  1.1  mrg       size_t stacksize;
    386  1.1  mrg       pthread_attr_init (&thread_attr);
    387  1.1  mrg       pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
    388  1.1  mrg       if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
    389  1.1  mrg 	pthread_attr_setstacksize (&thread_attr, stacksize);
    390  1.1  mrg       attr = &thread_attr;
    391  1.1  mrg     }
    392  1.1  mrg 
    393  1.1  mrg   start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
    394  1.1  mrg 			    * (nthreads-i));
    395  1.1  mrg 
    396  1.1  mrg   /* Launch new threads.  */
    397  1.1  mrg   for (; i < nthreads; ++i, ++start_data)
    398  1.1  mrg     {
    399  1.1  mrg       pthread_t pt;
    400  1.1  mrg       int err;
    401  1.1  mrg 
    402  1.1  mrg       start_data->fn = fn;
    403  1.1  mrg       start_data->fn_data = data;
    404  1.1  mrg       start_data->ts.team = team;
    405  1.1  mrg       start_data->ts.work_share = &team->work_shares[0];
    406  1.1  mrg       start_data->ts.last_work_share = NULL;
    407  1.1  mrg       start_data->ts.team_id = i;
    408  1.1  mrg       start_data->ts.level = team->prev_ts.level + 1;
    409  1.1  mrg       start_data->ts.active_level = thr->ts.active_level;
    410  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    411  1.1  mrg       start_data->ts.single_count = 0;
    412  1.1  mrg #endif
    413  1.1  mrg       start_data->ts.static_trip = 0;
    414  1.1  mrg       start_data->task = &team->implicit_task[i];
    415  1.1  mrg       gomp_init_task (start_data->task, task, icv);
    416  1.1  mrg       start_data->thread_pool = pool;
    417  1.1  mrg       start_data->nested = nested;
    418  1.1  mrg 
    419  1.1  mrg       if (gomp_cpu_affinity != NULL)
    420  1.1  mrg 	gomp_init_thread_affinity (attr);
    421  1.1  mrg 
    422  1.1  mrg       err = pthread_create (&pt, attr, gomp_thread_start, start_data);
    423  1.1  mrg       if (err != 0)
    424  1.1  mrg 	gomp_fatal ("Thread creation failed: %s", strerror (err));
    425  1.1  mrg     }
    426  1.1  mrg 
    427  1.1  mrg   if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
    428  1.1  mrg     pthread_attr_destroy (&thread_attr);
    429  1.1  mrg 
    430  1.1  mrg  do_release:
    431  1.1  mrg   gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock);
    432  1.1  mrg 
    433  1.1  mrg   /* Decrease the barrier threshold to match the number of threads
    434  1.1  mrg      that should arrive back at the end of this team.  The extra
    435  1.1  mrg      threads should be exiting.  Note that we arrange for this test
    436  1.1  mrg      to never be true for nested teams.  */
    437  1.1  mrg   if (__builtin_expect (nthreads < old_threads_used, 0))
    438  1.1  mrg     {
    439  1.1  mrg       long diff = (long) nthreads - (long) old_threads_used;
    440  1.1  mrg 
    441  1.1  mrg       gomp_barrier_reinit (&pool->threads_dock, nthreads);
    442  1.1  mrg 
    443  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    444  1.1  mrg       __sync_fetch_and_add (&gomp_managed_threads, diff);
    445  1.1  mrg #else
    446  1.1  mrg       gomp_mutex_lock (&gomp_remaining_threads_lock);
    447  1.1  mrg       gomp_managed_threads += diff;
    448  1.1  mrg       gomp_mutex_unlock (&gomp_remaining_threads_lock);
    449  1.1  mrg #endif
    450  1.1  mrg     }
    451  1.1  mrg }
    452  1.1  mrg 
    453  1.1  mrg 
    454  1.1  mrg /* Terminate the current team.  This is only to be called by the master
    455  1.1  mrg    thread.  We assume that we must wait for the other threads.  */
    456  1.1  mrg 
    457  1.1  mrg void
    458  1.1  mrg gomp_team_end (void)
    459  1.1  mrg {
    460  1.1  mrg   struct gomp_thread *thr = gomp_thread ();
    461  1.1  mrg   struct gomp_team *team = thr->ts.team;
    462  1.1  mrg 
    463  1.1  mrg   /* This barrier handles all pending explicit threads.  */
    464  1.1  mrg   gomp_team_barrier_wait (&team->barrier);
    465  1.1  mrg   gomp_fini_work_share (thr->ts.work_share);
    466  1.1  mrg 
    467  1.1  mrg   gomp_end_task ();
    468  1.1  mrg   thr->ts = team->prev_ts;
    469  1.1  mrg 
    470  1.1  mrg   if (__builtin_expect (thr->ts.team != NULL, 0))
    471  1.1  mrg     {
    472  1.1  mrg #ifdef HAVE_SYNC_BUILTINS
    473  1.1  mrg       __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
    474  1.1  mrg #else
    475  1.1  mrg       gomp_mutex_lock (&gomp_remaining_threads_lock);
    476  1.1  mrg       gomp_managed_threads -= team->nthreads - 1L;
    477  1.1  mrg       gomp_mutex_unlock (&gomp_remaining_threads_lock);
    478  1.1  mrg #endif
    479  1.1  mrg       /* This barrier has gomp_barrier_wait_last counterparts
    480  1.1  mrg 	 and ensures the team can be safely destroyed.  */
    481  1.1  mrg       gomp_barrier_wait (&team->barrier);
    482  1.1  mrg     }
    483  1.1  mrg 
    484  1.1  mrg   if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
    485  1.1  mrg     {
    486  1.1  mrg       struct gomp_work_share *ws = team->work_shares[0].next_alloc;
    487  1.1  mrg       do
    488  1.1  mrg 	{
    489  1.1  mrg 	  struct gomp_work_share *next_ws = ws->next_alloc;
    490  1.1  mrg 	  free (ws);
    491  1.1  mrg 	  ws = next_ws;
    492  1.1  mrg 	}
    493  1.1  mrg       while (ws != NULL);
    494  1.1  mrg     }
    495  1.1  mrg   gomp_sem_destroy (&team->master_release);
    496  1.1  mrg #ifndef HAVE_SYNC_BUILTINS
    497  1.1  mrg   gomp_mutex_destroy (&team->work_share_list_free_lock);
    498  1.1  mrg #endif
    499  1.1  mrg 
    500  1.1  mrg   if (__builtin_expect (thr->ts.team != NULL, 0)
    501  1.1  mrg       || __builtin_expect (team->nthreads == 1, 0))
    502  1.1  mrg     free_team (team);
    503  1.1  mrg   else
    504  1.1  mrg     {
    505  1.1  mrg       struct gomp_thread_pool *pool = thr->thread_pool;
    506  1.1  mrg       if (pool->last_team)
    507  1.1  mrg 	free_team (pool->last_team);
    508  1.1  mrg       pool->last_team = team;
    509  1.1  mrg     }
    510  1.1  mrg }
    511  1.1  mrg 
    512  1.1  mrg 
    513  1.1  mrg /* Constructors for this file.  */
    514  1.1  mrg 
    515  1.1  mrg static void __attribute__((constructor))
    516  1.1  mrg initialize_team (void)
    517  1.1  mrg {
    518  1.1  mrg   struct gomp_thread *thr;
    519  1.1  mrg 
    520  1.1  mrg #ifndef HAVE_TLS
    521  1.1  mrg   static struct gomp_thread initial_thread_tls_data;
    522  1.1  mrg 
    523  1.1  mrg   pthread_key_create (&gomp_tls_key, NULL);
    524  1.1  mrg   pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
    525  1.1  mrg #endif
    526  1.1  mrg 
    527  1.1  mrg   if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
    528  1.1  mrg     gomp_fatal ("could not create thread pool destructor.");
    529  1.1  mrg 
    530  1.1  mrg #ifdef HAVE_TLS
    531  1.1  mrg   thr = &gomp_tls_data;
    532  1.1  mrg #else
    533  1.1  mrg   thr = &initial_thread_tls_data;
    534  1.1  mrg #endif
    535  1.1  mrg   gomp_sem_init (&thr->release, 0);
    536  1.1  mrg }
    537  1.1  mrg 
    538  1.1  mrg static void __attribute__((destructor))
    539  1.1  mrg team_destructor (void)
    540  1.1  mrg {
    541  1.1  mrg   /* Without this dlclose on libgomp could lead to subsequent
    542  1.1  mrg      crashes.  */
    543  1.1  mrg   pthread_key_delete (gomp_thread_destructor);
    544  1.1  mrg }
    545  1.1  mrg 
    546  1.1  mrg struct gomp_task_icv *
    547  1.1  mrg gomp_new_icv (void)
    548  1.1  mrg {
    549  1.1  mrg   struct gomp_thread *thr = gomp_thread ();
    550  1.1  mrg   struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
    551  1.1  mrg   gomp_init_task (task, NULL, &gomp_global_icv);
    552  1.1  mrg   thr->task = task;
    553  1.1  mrg   pthread_setspecific (gomp_thread_destructor, thr);
    554  1.1  mrg   return &task->icv;
    555  1.1  mrg }
    556