ARGOBOTS  dce6e727ffc4ca5b3ffc04cb9517c6689be51ec5
thread.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 typedef enum {
13 ABTU_ret_err static inline int
14 ythread_create(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool,
15  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
16  ABTI_thread_type thread_type, ABTI_sched *p_sched,
17  thread_pool_op_kind pool_op, ABTI_ythread **pp_newthread);
18 ABTU_ret_err static inline int
19 thread_revive(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool,
20  void (*thread_func)(void *), void *arg,
21  thread_pool_op_kind pool_op, ABTI_thread *p_thread);
22 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread);
23 static inline void thread_free(ABTI_global *p_global, ABTI_local *p_local,
24  ABTI_thread *p_thread, ABT_bool free_unit);
25 static void thread_root_func(void *arg);
26 static void thread_main_sched_func(void *arg);
27 #ifndef ABT_CONFIG_DISABLE_MIGRATION
28 ABTU_ret_err static int thread_migrate_to_pool(ABTI_global *p_global,
29  ABTI_local *p_local,
30  ABTI_thread *p_thread,
31  ABTI_pool *p_pool);
32 #endif
33 static inline ABT_unit_id thread_get_new_id(void);
34 
35 static void thread_key_destructor_stackable_sched(void *p_value);
36 static ABTI_key g_thread_sched_key =
37  ABTI_KEY_STATIC_INITIALIZER(thread_key_destructor_stackable_sched,
38  ABTI_KEY_ID_STACKABLE_SCHED);
39 static void thread_key_destructor_migration(void *p_value);
40 static ABTI_key g_thread_mig_data_key =
41  ABTI_KEY_STATIC_INITIALIZER(thread_key_destructor_migration,
42  ABTI_KEY_ID_MIGRATION);
43 
97 int ABT_thread_create(ABT_pool pool, void (*thread_func)(void *), void *arg,
98  ABT_thread_attr attr, ABT_thread *newthread)
99 {
100  ABTI_UB_ASSERT(ABTI_initialized());
101  ABTI_UB_ASSERT(thread_func);
102 
103 #ifndef ABT_CONFIG_ENABLE_VER_20_API
104  /* Argobots 1.x sets newthread to NULL on error. */
105  if (newthread)
106  *newthread = ABT_THREAD_NULL;
107 #endif
108  ABTI_global *p_global;
109  ABTI_SETUP_GLOBAL(&p_global);
110  ABTI_local *p_local = ABTI_local_get_local();
111  ABTI_ythread *p_newthread;
112 
113  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
114  ABTI_CHECK_NULL_POOL_PTR(p_pool);
115 
116  ABTI_thread_type unit_type =
117  (newthread != NULL)
118  ? (ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_NAMED)
119  : ABTI_THREAD_TYPE_YIELDABLE;
120  int abt_errno = ythread_create(p_global, p_local, p_pool, thread_func, arg,
121  ABTI_thread_attr_get_ptr(attr), unit_type,
122  NULL, THREAD_POOL_OP_PUSH, &p_newthread);
123  ABTI_CHECK_ERROR(abt_errno);
124 
125  /* Return value */
126  if (newthread)
127  *newthread = ABTI_ythread_get_handle(p_newthread);
128  return ABT_SUCCESS;
129 }
130 
179 int ABT_thread_create_to(ABT_pool pool, void (*thread_func)(void *), void *arg,
180  ABT_thread_attr attr, ABT_thread *newthread)
181 {
182  ABTI_UB_ASSERT(ABTI_initialized());
183  ABTI_UB_ASSERT(thread_func);
184 
185  ABTI_global *p_global;
186  ABTI_SETUP_GLOBAL(&p_global);
187  ABTI_xstream *p_local_xstream;
188  ABTI_ythread *p_cur_ythread, *p_newthread;
189  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_cur_ythread);
190  ABTI_CHECK_TRUE(!(p_cur_ythread->thread.type & ABTI_THREAD_TYPE_MAIN_SCHED),
192 
193  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
194  ABTI_CHECK_NULL_POOL_PTR(p_pool);
195 
196  ABTI_thread_type unit_type =
197  (newthread != NULL)
198  ? (ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_NAMED)
199  : ABTI_THREAD_TYPE_YIELDABLE;
200  int abt_errno =
201  ythread_create(p_global, ABTI_xstream_get_local(p_local_xstream),
202  p_pool, thread_func, arg, ABTI_thread_attr_get_ptr(attr),
203  unit_type, NULL, THREAD_POOL_OP_INIT, &p_newthread);
204  ABTI_CHECK_ERROR(abt_errno);
205 
206  /* Set a return value before context switching. */
207  if (newthread)
208  *newthread = ABTI_ythread_get_handle(p_newthread);
209 
210  /* Yield to the target ULT. */
211  ABTI_ythread_yield_to(&p_local_xstream, p_cur_ythread, p_newthread,
212  ABTI_YTHREAD_YIELD_TO_KIND_CREATE_TO,
214  return ABT_SUCCESS;
215 }
216 
267  void (*thread_func)(void *), void *arg,
268  ABT_thread_attr attr, ABT_thread *newthread)
269 {
270  ABTI_UB_ASSERT(ABTI_initialized());
271  ABTI_UB_ASSERT(thread_func);
272 
273 #ifndef ABT_CONFIG_ENABLE_VER_20_API
274  /* Argobots 1.x sets newthread to NULL on error. */
275  if (newthread)
276  *newthread = ABT_THREAD_NULL;
277 #endif
278  ABTI_global *p_global;
279  ABTI_SETUP_GLOBAL(&p_global);
280  ABTI_local *p_local = ABTI_local_get_local();
281  ABTI_ythread *p_newthread;
282 
283  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
284  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
285 
286  /* TODO: need to consider the access type of target pool */
287  ABTI_pool *p_pool = ABTI_xstream_get_main_pool(p_xstream);
288  ABTI_thread_type unit_type =
289  (newthread != NULL)
290  ? (ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_NAMED)
291  : ABTI_THREAD_TYPE_YIELDABLE;
292  int abt_errno = ythread_create(p_global, p_local, p_pool, thread_func, arg,
293  ABTI_thread_attr_get_ptr(attr), unit_type,
294  NULL, THREAD_POOL_OP_PUSH, &p_newthread);
295  ABTI_CHECK_ERROR(abt_errno);
296 
297  /* Return value */
298  if (newthread)
299  *newthread = ABTI_ythread_get_handle(p_newthread);
300 
301  return ABT_SUCCESS;
302 }
303 
352 int ABT_thread_create_many(int num_threads, ABT_pool *pool_list,
353  void (**thread_func_list)(void *), void **arg_list,
354  ABT_thread_attr attr, ABT_thread *newthread_list)
355 {
356  ABTI_global *p_global;
357  ABTI_SETUP_GLOBAL(&p_global);
358  ABTI_local *p_local = ABTI_local_get_local();
359  int i;
360 
361  if (attr != ABT_THREAD_ATTR_NULL) {
362  /* This implies that the stack is given by a user. Since threads
363  * cannot use the same stack region, this is illegal. */
364  ABTI_CHECK_TRUE(ABTI_thread_attr_get_ptr(attr)->p_stack == NULL,
366  }
367 
368  if (newthread_list == NULL) {
369  for (i = 0; i < num_threads; i++) {
370  ABTI_ythread *p_newthread;
371  ABT_pool pool = pool_list[i];
372  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
373  ABTI_CHECK_NULL_POOL_PTR(p_pool);
374 
375  void (*thread_f)(void *) = thread_func_list[i];
376  void *arg = arg_list ? arg_list[i] : NULL;
377  int abt_errno = ythread_create(p_global, p_local, p_pool, thread_f,
378  arg, ABTI_thread_attr_get_ptr(attr),
379  ABTI_THREAD_TYPE_YIELDABLE, NULL,
380  THREAD_POOL_OP_PUSH, &p_newthread);
381  ABTI_CHECK_ERROR(abt_errno);
382  }
383  } else {
384  for (i = 0; i < num_threads; i++) {
385  ABTI_ythread *p_newthread;
386  ABT_pool pool = pool_list[i];
387  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
388  ABTI_CHECK_NULL_POOL_PTR(p_pool);
389 
390  void (*thread_f)(void *) = thread_func_list[i];
391  void *arg = arg_list ? arg_list[i] : NULL;
392  int abt_errno =
393  ythread_create(p_global, p_local, p_pool, thread_f, arg,
394  ABTI_thread_attr_get_ptr(attr),
395  ABTI_THREAD_TYPE_YIELDABLE |
396  ABTI_THREAD_TYPE_NAMED,
397  NULL, THREAD_POOL_OP_PUSH, &p_newthread);
398  newthread_list[i] = ABTI_ythread_get_handle(p_newthread);
399  /* TODO: Release threads that have been already created. */
400  ABTI_CHECK_ERROR(abt_errno);
401  }
402  }
403 
404  return ABT_SUCCESS;
405 }
406 
456 int ABT_thread_revive(ABT_pool pool, void (*thread_func)(void *), void *arg,
457  ABT_thread *thread)
458 {
459  ABTI_UB_ASSERT(ABTI_initialized());
460  ABTI_UB_ASSERT(thread_func);
461  ABTI_UB_ASSERT(thread);
462 
463  ABTI_global *p_global = ABTI_global_get_global();
464  ABTI_local *p_local = ABTI_local_get_local();
465 
466  ABTI_thread *p_thread = ABTI_thread_get_ptr(*thread);
467  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
468 
469  ABTI_CHECK_TRUE(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
472 
473  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
474  ABTI_CHECK_NULL_POOL_PTR(p_pool);
475 
476  int abt_errno = thread_revive(p_global, p_local, p_pool, thread_func, arg,
477  THREAD_POOL_OP_PUSH, p_thread);
478  ABTI_CHECK_ERROR(abt_errno);
479  return ABT_SUCCESS;
480 }
481 
533 int ABT_thread_revive_to(ABT_pool pool, void (*thread_func)(void *), void *arg,
534  ABT_thread *thread)
535 {
536  ABTI_UB_ASSERT(ABTI_initialized());
537  ABTI_UB_ASSERT(thread_func);
538  ABTI_UB_ASSERT(thread);
539 
540  ABTI_global *p_global = ABTI_global_get_global();
541  ABTI_xstream *p_local_xstream;
542  ABTI_ythread *p_self, *p_target;
543  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_self);
544  ABTI_CHECK_TRUE(!(p_self->thread.type & ABTI_THREAD_TYPE_MAIN_SCHED),
546  {
547  ABTI_thread *p_thread = ABTI_thread_get_ptr(*thread);
548  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
549  ABTI_CHECK_TRUE(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
552  ABTI_CHECK_YIELDABLE(p_thread, &p_target, ABT_ERR_INV_THREAD);
553  }
554 
555  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
556  ABTI_CHECK_NULL_POOL_PTR(p_pool);
557 
558  int abt_errno =
559  thread_revive(p_global, ABTI_xstream_get_local(p_local_xstream), p_pool,
560  thread_func, arg, THREAD_POOL_OP_INIT, &p_target->thread);
561  ABTI_CHECK_ERROR(abt_errno);
562 
563  /* Yield to the target ULT. */
564  ABTI_ythread_yield_to(&p_local_xstream, p_self, p_target,
565  ABTI_YTHREAD_YIELD_TO_KIND_REVIVE_TO,
567  return ABT_SUCCESS;
568 }
569 
610 int ABT_thread_free(ABT_thread *thread)
611 {
612  ABTI_UB_ASSERT(ABTI_initialized());
613  ABTI_UB_ASSERT(thread);
614 
615  ABTI_global *p_global;
616  ABTI_SETUP_GLOBAL(&p_global);
617  ABTI_local *p_local = ABTI_local_get_local();
618  ABT_thread h_thread = *thread;
619 
620  ABTI_thread *p_thread = ABTI_thread_get_ptr(h_thread);
621  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
622  ABTI_CHECK_TRUE(!ABTI_local_get_xstream_or_null(p_local) ||
623  p_thread != ABTI_local_get_xstream(p_local)->p_thread,
625  ABTI_CHECK_TRUE(!(p_thread->type &
626  (ABTI_THREAD_TYPE_PRIMARY | ABTI_THREAD_TYPE_MAIN_SCHED)),
628 
629  /* Wait until the thread terminates */
630  thread_join(&p_local, p_thread);
631  /* Free the ABTI_thread structure */
632  ABTI_thread_free(p_global, p_local, p_thread);
633 
634  /* Return value */
635  *thread = ABT_THREAD_NULL;
636 
637  return ABT_SUCCESS;
638 }
639 
673 int ABT_thread_free_many(int num_threads, ABT_thread *thread_list)
674 {
675  ABTI_global *p_global;
676  ABTI_SETUP_GLOBAL(&p_global);
677  ABTI_local *p_local = ABTI_local_get_local();
678  int i;
679 
680  for (i = 0; i < num_threads; i++) {
681  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread_list[i]);
682  thread_list[i] = ABT_THREAD_NULL;
683  if (!p_thread)
684  continue;
685  /* TODO: check input */
686  thread_join(&p_local, p_thread);
687  ABTI_thread_free(p_global, p_local, p_thread);
688  }
689  return ABT_SUCCESS;
690 }
691 
731 int ABT_thread_join(ABT_thread thread)
732 {
733  ABTI_UB_ASSERT(ABTI_initialized());
734 
735  ABTI_local *p_local = ABTI_local_get_local();
736  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
737  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
738  ABTI_CHECK_TRUE(!ABTI_local_get_xstream_or_null(p_local) ||
739  p_thread != ABTI_local_get_xstream(p_local)->p_thread,
741  ABTI_CHECK_TRUE(!(p_thread->type &
742  (ABTI_THREAD_TYPE_PRIMARY | ABTI_THREAD_TYPE_MAIN_SCHED)),
744 
745  thread_join(&p_local, p_thread);
746  return ABT_SUCCESS;
747 }
748 
780 int ABT_thread_join_many(int num_threads, ABT_thread *thread_list)
781 {
782  ABTI_local *p_local = ABTI_local_get_local();
783  int i;
784  for (i = 0; i < num_threads; i++) {
785  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread_list[i]);
786  if (!p_thread)
787  continue;
788  /* TODO: check input */
789  thread_join(&p_local, p_thread);
790  }
791  return ABT_SUCCESS;
792 }
793 
822 int ABT_thread_exit(void)
823 {
824  ABTI_xstream *p_local_xstream;
825  ABTI_ythread *p_ythread;
826 #ifndef ABT_CONFIG_ENABLE_VER_20_API
827  ABTI_SETUP_GLOBAL(NULL);
828 #else
829  ABTI_UB_ASSERT(ABTI_initialized());
830 #endif
831  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_ythread);
832  ABTI_CHECK_TRUE(!(p_ythread->thread.type & ABTI_THREAD_TYPE_PRIMARY),
834 
835  ABTI_ythread_exit(p_local_xstream, p_ythread);
836  return ABT_SUCCESS;
837 }
838 
871 int ABT_thread_cancel(ABT_thread thread)
872 {
873  ABTI_UB_ASSERT(ABTI_initialized());
874 
875 #ifdef ABT_CONFIG_DISABLE_CANCELLATION
876  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
877 #else
878  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
879  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
880  ABTI_CHECK_TRUE(!(p_thread->type & ABTI_THREAD_TYPE_PRIMARY),
882 
883  /* Set the cancel request */
884  ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_CANCEL);
885  return ABT_SUCCESS;
886 #endif
887 }
888 
924 int ABT_thread_self(ABT_thread *thread)
925 {
926  ABTI_UB_ASSERT(thread);
927 
928 #ifndef ABT_CONFIG_ENABLE_VER_20_API
929  *thread = ABT_THREAD_NULL;
930  ABTI_SETUP_GLOBAL(NULL);
931  ABTI_ythread *p_self;
932  ABTI_SETUP_LOCAL_YTHREAD(NULL, &p_self);
933  *thread = ABTI_thread_get_handle(&p_self->thread);
934 #else
935  ABTI_UB_ASSERT(ABTI_initialized());
936  ABTI_xstream *p_local_xstream;
937  ABTI_SETUP_LOCAL_XSTREAM(&p_local_xstream);
938  *thread = ABTI_thread_get_handle(p_local_xstream->p_thread);
939 #endif
940  return ABT_SUCCESS;
941 }
942 
977 {
978  ABTI_UB_ASSERT(id);
979 
980 #ifndef ABT_CONFIG_ENABLE_VER_20_API
981  ABTI_SETUP_GLOBAL(NULL);
982  ABTI_ythread *p_self;
983  ABTI_SETUP_LOCAL_YTHREAD(NULL, &p_self);
984  *id = ABTI_thread_get_id(&p_self->thread);
985 #else
986  ABTI_UB_ASSERT(ABTI_initialized());
987  ABTI_xstream *p_local_xstream;
988  ABTI_SETUP_LOCAL_XSTREAM(&p_local_xstream);
989  *id = ABTI_thread_get_id(p_local_xstream->p_thread);
990 #endif
991  return ABT_SUCCESS;
992 }
993 
1023 {
1024  ABTI_UB_ASSERT(ABTI_initialized());
1025  ABTI_UB_ASSERT(xstream);
1026 
1027  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1028  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1029 
1030  *xstream = ABTI_xstream_get_handle(p_thread->p_last_xstream);
1031  return ABT_SUCCESS;
1032 }
1033 
1067 {
1068  ABTI_UB_ASSERT(ABTI_initialized());
1069  ABTI_UB_ASSERT(state);
1070 
1071  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1072  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1073 
1074  *state = (ABT_thread_state)ABTD_atomic_acquire_load_int(&p_thread->state);
1075  return ABT_SUCCESS;
1076 }
1077 
1109 int ABT_thread_get_last_pool(ABT_thread thread, ABT_pool *pool)
1110 {
1111  ABTI_UB_ASSERT(ABTI_initialized());
1112  ABTI_UB_ASSERT(pool);
1113 
1114  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1115  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1116 
1117  *pool = ABTI_pool_get_handle(p_thread->p_pool);
1118  return ABT_SUCCESS;
1119 }
1120 
1152 int ABT_thread_get_last_pool_id(ABT_thread thread, int *id)
1153 {
1154  ABTI_UB_ASSERT(ABTI_initialized());
1155  ABTI_UB_ASSERT(id);
1156 
1157  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1158  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1159  *id = (int)p_thread->p_pool->id;
1160  return ABT_SUCCESS;
1161 }
1162 
1185 int ABT_thread_get_unit(ABT_thread thread, ABT_unit *unit)
1186 {
1187  ABTI_UB_ASSERT(ABTI_initialized());
1188  ABTI_UB_ASSERT(unit);
1189 
1190  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1191  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1192  *unit = p_thread->unit;
1193  return ABT_SUCCESS;
1194 }
1195 
1230 {
1231  ABTI_UB_ASSERT(ABTI_initialized());
1232 
1233  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1234  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1235  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
1236  ABTI_CHECK_NULL_POOL_PTR(p_pool);
1237  ABTI_global *p_global = ABTI_global_get_global();
1238 
1239  int abt_errno = ABTI_thread_set_associated_pool(p_global, p_thread, p_pool);
1240  ABTI_CHECK_ERROR(abt_errno);
1241  return ABT_SUCCESS;
1242 }
1243 
1279 int ABT_thread_yield_to(ABT_thread thread)
1280 {
1281  ABTI_UB_ASSERT(ABTI_initialized());
1282 
1283  ABTI_xstream *p_local_xstream;
1284  ABTI_ythread *p_cur_ythread;
1285  p_local_xstream = ABTI_local_get_xstream_or_null(ABTI_local_get_local());
1286  if (ABTI_IS_EXT_THREAD_ENABLED && p_local_xstream == NULL) {
1287  return ABT_SUCCESS;
1288  } else {
1289  p_cur_ythread =
1290  ABTI_thread_get_ythread_or_null(p_local_xstream->p_thread);
1291  if (!p_cur_ythread)
1292  return ABT_SUCCESS;
1293  }
1294 
1295  ABTI_thread *p_tar_thread = ABTI_thread_get_ptr(thread);
1296  ABTI_CHECK_NULL_THREAD_PTR(p_tar_thread);
1297  ABTI_ythread *p_tar_ythread = ABTI_thread_get_ythread_or_null(p_tar_thread);
1298  ABTI_CHECK_NULL_YTHREAD_PTR(p_tar_ythread);
1299  ABTI_CHECK_TRUE(p_cur_ythread != p_tar_ythread, ABT_ERR_INV_THREAD);
1300  ABTI_CHECK_TRUE(!(p_cur_ythread->thread.type & ABTI_THREAD_TYPE_MAIN_SCHED),
1302  ABTI_CHECK_TRUE(p_tar_ythread->thread.p_pool->deprecated_def.u_is_in_pool,
1303  ABT_ERR_POOL);
1304  ABTI_CHECK_TRUE(p_tar_ythread->thread.p_pool->deprecated_def.p_remove,
1305  ABT_ERR_POOL);
1306 
1307  /* If the target thread is not in READY, we don't yield. Note that ULT can
1308  * be regarded as 'ready' only if its state is READY and it has been
1309  * pushed into a pool. Since we set ULT's state to READY and then push it
1310  * into a pool, we check them in the reverse order, i.e., check if the ULT
1311  * is inside a pool and the its state. */
1312  if (!(p_tar_ythread->thread.p_pool->deprecated_def.u_is_in_pool(
1313  p_tar_ythread->thread.unit) == ABT_TRUE &&
1314  ABTD_atomic_acquire_load_int(&p_tar_ythread->thread.state) ==
1316  /* This is undefined behavior. */
1317  return ABT_SUCCESS;
1318  }
1319 
1320  /* Remove the target ULT from the pool */
1321  /* This is necessary to prevent the size of this pool from 0. */
1322  ABTI_pool_inc_num_blocked(p_cur_ythread->thread.p_pool);
1323  int abt_errno = ABTI_pool_remove(p_tar_ythread->thread.p_pool,
1324  p_tar_ythread->thread.unit);
1325  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
1326  ABTI_pool_dec_num_blocked(p_cur_ythread->thread.p_pool);
1327  ABTI_HANDLE_ERROR(abt_errno);
1328  }
1329 
1330  /* We set the last ES */
1331  p_tar_ythread->thread.p_last_xstream = p_local_xstream;
1332 
1333  /* Switch the context */
1334  ABTI_ythread_thread_yield_to(&p_local_xstream, p_cur_ythread, p_tar_ythread,
1335  ABT_SYNC_EVENT_TYPE_USER, NULL);
1336  return ABT_SUCCESS;
1337 }
1338 
1370 int ABT_thread_yield(void)
1371 {
1372  ABTI_UB_ASSERT(ABTI_initialized());
1373 
1374  ABTI_xstream *p_local_xstream;
1375  ABTI_ythread *p_ythread;
1376 #ifndef ABT_CONFIG_ENABLE_VER_20_API
1377  p_local_xstream = ABTI_local_get_xstream_or_null(ABTI_local_get_local());
1378  if (ABTI_IS_EXT_THREAD_ENABLED && ABTU_unlikely(p_local_xstream == NULL)) {
1379  return ABT_SUCCESS;
1380  } else {
1381  p_ythread = ABTI_thread_get_ythread_or_null(p_local_xstream->p_thread);
1382  if (ABTU_unlikely(!p_ythread)) {
1383  return ABT_SUCCESS;
1384  }
1385  }
1386 #else
1387  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_ythread);
1388 #endif
1389 
1390  ABTI_ythread_yield(&p_local_xstream, p_ythread,
1391  ABTI_YTHREAD_YIELD_KIND_USER, ABT_SYNC_EVENT_TYPE_USER,
1392  NULL);
1393  return ABT_SUCCESS;
1394 }
1395 
1425 int ABT_thread_resume(ABT_thread thread)
1426 {
1427  ABTI_UB_ASSERT(ABTI_initialized());
1428 
1429  ABTI_local *p_local = ABTI_local_get_local();
1430 
1431  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1432  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1433  ABTI_ythread *p_ythread;
1434  ABTI_CHECK_YIELDABLE(p_thread, &p_ythread, ABT_ERR_INV_THREAD);
1435 
1436 #ifndef ABT_CONFIG_ENABLE_VER_20_API
1437  /* The ULT must be in BLOCKED state. */
1438  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
1440  ABT_ERR_THREAD);
1441 #else
1442  ABTI_UB_ASSERT(ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
1444 #endif
1445 
1446  ABTI_ythread_resume_and_push(p_local, p_ythread);
1447  return ABT_SUCCESS;
1448 }
1449 
1502 {
1503  ABTI_UB_ASSERT(ABTI_initialized());
1504 
1505 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1506  ABTI_global *p_global;
1507  ABTI_SETUP_GLOBAL(&p_global);
1508  ABTI_local *p_local = ABTI_local_get_local();
1509 
1510  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1511  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1512  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
1513  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
1514  ABTI_CHECK_TRUE(p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE,
1516  ABTI_CHECK_TRUE(!(p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED),
1518  /* Check if a thread is associated with a pool of the main scheduler. */
1519  ABTI_sched *p_sched = p_xstream->p_main_sched;
1520  if (ABTI_IS_ERROR_CHECK_ENABLED) {
1521  size_t p;
1522  for (p = 0; p < p_sched->num_pools; p++)
1523  ABTI_CHECK_TRUE(ABTI_pool_get_ptr(p_sched->pools[p]) !=
1524  p_thread->p_pool,
1526  }
1527  /* Get the target pool. */
1528  ABTI_pool *p_pool = NULL;
1529  int abt_errno;
1530  abt_errno =
1531  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
1532  ABTI_CHECK_ERROR(abt_errno);
1533  /* Request a migration. */
1534  abt_errno = thread_migrate_to_pool(p_global, p_local, p_thread, p_pool);
1535  ABTI_CHECK_ERROR(abt_errno);
1536  return ABT_SUCCESS;
1537 #else
1538  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
1539 #endif
1540 }
1541 
1590 {
1591  ABTI_UB_ASSERT(ABTI_initialized());
1592 
1593 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1594  ABTI_global *p_global;
1595  ABTI_SETUP_GLOBAL(&p_global);
1596  ABTI_local *p_local = ABTI_local_get_local();
1597 
1598  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1599  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1600  ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
1601  ABTI_CHECK_NULL_SCHED_PTR(p_sched);
1602  ABTI_CHECK_TRUE(p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE,
1604  ABTI_CHECK_TRUE(!(p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED),
1606  /* Check if a thread is associated with a pool of the main scheduler. */
1607  if (ABTI_IS_ERROR_CHECK_ENABLED) {
1608  size_t p;
1609  for (p = 0; p < p_sched->num_pools; p++)
1610  ABTI_CHECK_TRUE(ABTI_pool_get_ptr(p_sched->pools[p]) !=
1611  p_thread->p_pool,
1613  }
1614  /* Get the target pool. */
1615  ABTI_pool *p_pool;
1616  int abt_errno;
1617  abt_errno =
1618  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
1619  ABTI_CHECK_ERROR(abt_errno);
1620  /* Request a migration. */
1621  abt_errno = thread_migrate_to_pool(p_global, p_local, p_thread, p_pool);
1622  ABTI_CHECK_ERROR(abt_errno);
1623  return ABT_SUCCESS;
1624 #else
1625  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
1626 #endif
1627 }
1628 
1675 {
1676  ABTI_UB_ASSERT(ABTI_initialized());
1677 
1678 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1679  ABTI_global *p_global;
1680  ABTI_SETUP_GLOBAL(&p_global);
1681  ABTI_local *p_local = ABTI_local_get_local();
1682 
1683  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1684  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1685  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
1686  ABTI_CHECK_NULL_POOL_PTR(p_pool);
1687  ABTI_CHECK_TRUE(p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE,
1689  ABTI_CHECK_TRUE(!(p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED),
1691  ABTI_CHECK_TRUE(p_thread->p_pool != p_pool, ABT_ERR_MIGRATION_TARGET);
1692  /* Request a migration. */
1693  int abt_errno = thread_migrate_to_pool(p_global, p_local, p_thread, p_pool);
1694  ABTI_CHECK_ERROR(abt_errno);
1695  return ABT_SUCCESS;
1696 #else
1697  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
1698 #endif
1699 }
1700 
1753 int ABT_thread_migrate(ABT_thread thread)
1754 {
1755  ABTI_UB_ASSERT(ABTI_initialized());
1756 
1757 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1758  /* TODO: fix the bug(s) */
1759  ABTI_global *p_global;
1760  ABTI_SETUP_GLOBAL(&p_global);
1761 
1762  ABTI_local *p_local = ABTI_local_get_local();
1763  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1764  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1765  ABTI_CHECK_TRUE(p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE,
1767  ABTI_CHECK_TRUE(!(p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED),
1769 
1770  /* Copy the target execution streams. */
1771  int i, num_xstreams, abt_errno;
1772  ABTI_xstream **xstreams;
1773  ABTD_spinlock_acquire(&p_global->xstream_list_lock);
1774  num_xstreams = p_global->num_xstreams;
1775  abt_errno =
1776  ABTU_malloc(sizeof(ABTI_xstream *) * num_xstreams, (void **)&xstreams);
1777  if (!(ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS)) {
1778  ABTI_xstream *p_xstream = p_global->p_xstream_head;
1779  i = 0;
1780  while (p_xstream) {
1781  xstreams[i++] = p_xstream;
1782  p_xstream = p_xstream->p_next;
1783  }
1784  }
1785  ABTD_spinlock_release(&p_global->xstream_list_lock);
1786  ABTI_CHECK_ERROR(abt_errno);
1787 
1788  /* Choose the destination xstream. The user needs to maintain all the pools
1789  * and execution streams alive. */
1790  for (i = 0; i < num_xstreams; i++) {
1791  ABTI_xstream *p_xstream = xstreams[i];
1792  if (p_xstream == p_thread->p_last_xstream)
1793  continue;
1794  if (ABTD_atomic_acquire_load_int(&p_xstream->state) !=
1796  continue;
1797  /* Check if a thread is associated with a pool of the main scheduler. */
1798  ABTI_sched *p_sched = p_xstream->p_main_sched;
1799  ABT_bool is_valid = ABT_TRUE;
1800  size_t p;
1801  for (p = 0; p < p_sched->num_pools; p++) {
1802  if (ABTI_pool_get_ptr(p_sched->pools[p]) != p_thread->p_pool) {
1803  is_valid = ABT_FALSE;
1804  break;
1805  }
1806  }
1807  if (!is_valid)
1808  continue;
1809  /* Get the target pool. */
1810  ABTI_pool *p_pool = NULL;
1811  abt_errno =
1812  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
1813  if (abt_errno != ABT_SUCCESS)
1814  continue;
1815  /* Request a migration. */
1816  abt_errno = thread_migrate_to_pool(p_global, p_local, p_thread, p_pool);
1817  if (abt_errno != ABT_SUCCESS)
1818  continue;
1819  /* Succeeds. Return. */
1820  ABTU_free(xstreams);
1821  return ABT_SUCCESS;
1822  }
1823  /* All attempts failed. */
1824  ABTU_free(xstreams);
1825  return ABT_ERR_MIGRATION_NA;
1826 #else
1827  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
1828 #endif
1829 }
1830 
1868  void (*cb_func)(ABT_thread thread, void *cb_arg),
1869  void *cb_arg)
1870 {
1871  ABTI_UB_ASSERT(ABTI_initialized());
1872 
1873 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1874  ABTI_global *p_global;
1875  ABTI_SETUP_GLOBAL(&p_global);
1876 
1877  ABTI_local *p_local = ABTI_local_get_local();
1878  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1879  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1880 
1881  ABTI_thread_mig_data *p_mig_data;
1882  int abt_errno =
1883  ABTI_thread_get_mig_data(p_global, p_local, p_thread, &p_mig_data);
1884  ABTI_CHECK_ERROR(abt_errno);
1885 
1886  p_mig_data->f_migration_cb = cb_func;
1887  p_mig_data->p_migration_cb_arg = cb_arg;
1888  return ABT_SUCCESS;
1889 #else
1890  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
1891 #endif
1892 }
1893 
1931 int ABT_thread_set_migratable(ABT_thread thread, ABT_bool migratable)
1932 {
1933  ABTI_UB_ASSERT(ABTI_initialized());
1934  ABTI_UB_ASSERT_BOOL(migratable);
1935 
1936 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1937  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1938  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1939 
1940 #ifndef ABT_CONFIG_ENABLE_VER_20_API
1941  if (p_thread->type &
1942  (ABTI_THREAD_TYPE_PRIMARY | ABTI_THREAD_TYPE_MAIN_SCHED))
1943  return ABT_SUCCESS;
1944 #else
1945  ABTI_CHECK_TRUE(!(p_thread->type &
1946  (ABTI_THREAD_TYPE_PRIMARY | ABTI_THREAD_TYPE_MAIN_SCHED)),
1948 #endif
1949 
1950  if (migratable) {
1951  p_thread->type |= ABTI_THREAD_TYPE_MIGRATABLE;
1952  } else {
1953  p_thread->type &= ~ABTI_THREAD_TYPE_MIGRATABLE;
1954  }
1955  return ABT_SUCCESS;
1956 #else
1957  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
1958 #endif
1959 }
1960 
1991 int ABT_thread_is_migratable(ABT_thread thread, ABT_bool *is_migratable)
1992 {
1993  ABTI_UB_ASSERT(ABTI_initialized());
1994  ABTI_UB_ASSERT(is_migratable);
1995 
1996 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1997  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1998  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1999 
2000  *is_migratable =
2001  (p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE) ? ABT_TRUE : ABT_FALSE;
2002  return ABT_SUCCESS;
2003 #else
2004  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
2005 #endif
2006 }
2007 
2036 int ABT_thread_is_primary(ABT_thread thread, ABT_bool *is_primary)
2037 {
2038  ABTI_UB_ASSERT(ABTI_initialized());
2039  ABTI_UB_ASSERT(is_primary);
2040 
2041  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2042  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2043 
2044  *is_primary =
2045  (p_thread->type & ABTI_THREAD_TYPE_PRIMARY) ? ABT_TRUE : ABT_FALSE;
2046  return ABT_SUCCESS;
2047 }
2048 
2077 int ABT_thread_is_unnamed(ABT_thread thread, ABT_bool *is_unnamed)
2078 {
2079  ABTI_UB_ASSERT(ABTI_initialized());
2080  ABTI_UB_ASSERT(is_unnamed);
2081 
2082  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2083  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2084 
2085  *is_unnamed =
2086  (p_thread->type & ABTI_THREAD_TYPE_NAMED) ? ABT_FALSE : ABT_TRUE;
2087  return ABT_SUCCESS;
2088 }
2089 
2127 int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result)
2128 {
2129  ABTI_UB_ASSERT(result);
2130 
2131  ABTI_thread *p_thread1 = ABTI_thread_get_ptr(thread1);
2132  ABTI_thread *p_thread2 = ABTI_thread_get_ptr(thread2);
2133  *result = (p_thread1 == p_thread2) ? ABT_TRUE : ABT_FALSE;
2134  return ABT_SUCCESS;
2135 }
2136 
2165 int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize)
2166 {
2167  ABTI_UB_ASSERT(ABTI_initialized());
2168  ABTI_UB_ASSERT(stacksize);
2169 
2170  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2171  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2172  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
2173  if (p_ythread) {
2174  *stacksize = ABTD_ythread_context_get_stacksize(&p_ythread->ctx);
2175  } else {
2176  *stacksize = 0;
2177  }
2178  return ABT_SUCCESS;
2179 }
2180 
2207 int ABT_thread_get_id(ABT_thread thread, ABT_unit_id *thread_id)
2208 {
2209  ABTI_UB_ASSERT(ABTI_initialized());
2210  ABTI_UB_ASSERT(thread_id);
2211 
2212  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2213  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2214 
2215  *thread_id = ABTI_thread_get_id(p_thread);
2216  return ABT_SUCCESS;
2217 }
2218 
2245 int ABT_thread_set_arg(ABT_thread thread, void *arg)
2246 {
2247  ABTI_UB_ASSERT(ABTI_initialized());
2248 
2249  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2250  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2251 
2252  p_thread->p_arg = arg;
2253  return ABT_SUCCESS;
2254 }
2255 
2282 int ABT_thread_get_arg(ABT_thread thread, void **arg)
2283 {
2284  ABTI_UB_ASSERT(ABTI_initialized());
2285  ABTI_UB_ASSERT(arg);
2286 
2287  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2288  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2289 
2290  *arg = p_thread->p_arg;
2291  return ABT_SUCCESS;
2292 }
2293 
2316 int ABT_thread_get_thread_func(ABT_thread thread, void (**thread_func)(void *))
2317 {
2318  ABTI_UB_ASSERT(ABTI_initialized());
2319  ABTI_UB_ASSERT(thread_func);
2320 
2321  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2322  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2323 
2324  *thread_func = p_thread->f_thread;
2325  return ABT_SUCCESS;
2326 }
2327 
2354 int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value)
2355 {
2356  ABTI_UB_ASSERT(ABTI_initialized());
2357 
2358  ABTI_global *p_global;
2359  ABTI_SETUP_GLOBAL(&p_global);
2360 
2361  ABTI_local *p_local = ABTI_local_get_local();
2362 
2363  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2364  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2365 
2366  ABTI_key *p_key = ABTI_key_get_ptr(key);
2367  ABTI_CHECK_NULL_KEY_PTR(p_key);
2368 
2369  /* Set the value. */
2370  int abt_errno =
2371  ABTI_ktable_set(p_global, p_local, &p_thread->p_keytable, p_key, value);
2372  ABTI_CHECK_ERROR(abt_errno);
2373  return ABT_SUCCESS;
2374 }
2375 
2405 int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value)
2406 {
2407  ABTI_UB_ASSERT(ABTI_initialized());
2408  ABTI_UB_ASSERT(value);
2409 
2410  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2411  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2412 
2413  ABTI_key *p_key = ABTI_key_get_ptr(key);
2414  ABTI_CHECK_NULL_KEY_PTR(p_key);
2415 
2416  /* Get the value. */
2417  *value = ABTI_ktable_get(&p_thread->p_keytable, p_key);
2418  return ABT_SUCCESS;
2419 }
2420 
2454 {
2455  ABTI_UB_ASSERT(ABTI_initialized());
2456  ABTI_UB_ASSERT(attr);
2457 
2458  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
2459  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
2460 
2461  ABTI_thread_attr thread_attr, *p_attr;
2462  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
2463 #ifndef ABT_CONFIG_ENABLE_VER_20_API
2464  ABTI_CHECK_TRUE(p_ythread, ABT_ERR_INV_THREAD);
2465 #endif
2466 
2467  if (p_ythread) {
2468  void *p_stacktop = ABTD_ythread_context_get_stacktop(&p_ythread->ctx);
2469  size_t stacksize = ABTD_ythread_context_get_stacksize(&p_ythread->ctx);
2470  if (p_stacktop) {
2471  thread_attr.p_stack = (void *)(((char *)p_stacktop) - stacksize);
2472  } else {
2473  thread_attr.p_stack = NULL;
2474  }
2475  thread_attr.stacksize = stacksize;
2476  } else {
2477  thread_attr.p_stack = NULL;
2478  thread_attr.stacksize = 0;
2479  }
2480 #ifndef ABT_CONFIG_DISABLE_MIGRATION
2481  thread_attr.migratable =
2482  (p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE) ? ABT_TRUE : ABT_FALSE;
2483  ABTI_thread_mig_data *p_mig_data =
2484  (ABTI_thread_mig_data *)ABTI_ktable_get(&p_thread->p_keytable,
2486  if (p_mig_data) {
2487  thread_attr.f_cb = p_mig_data->f_migration_cb;
2488  thread_attr.p_cb_arg = p_mig_data->p_migration_cb_arg;
2489  } else {
2490  thread_attr.f_cb = NULL;
2491  thread_attr.p_cb_arg = NULL;
2492  }
2493 #endif
2494  int abt_errno = ABTI_thread_attr_dup(&thread_attr, &p_attr);
2495  ABTI_CHECK_ERROR(abt_errno);
2496 
2497  *attr = ABTI_thread_attr_get_handle(p_attr);
2498  return ABT_SUCCESS;
2499 }
2500 
2501 /*****************************************************************************/
2502 /* Private APIs */
2503 /*****************************************************************************/
2504 
2505 ABTU_ret_err int ABTI_thread_revive(ABTI_global *p_global, ABTI_local *p_local,
2506  ABTI_pool *p_pool,
2507  void (*thread_func)(void *), void *arg,
2508  ABTI_thread *p_thread)
2509 {
2510  ABTI_ASSERT(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
2512  int abt_errno = thread_revive(p_global, p_local, p_pool, thread_func, arg,
2513  THREAD_POOL_OP_PUSH, p_thread);
2514  ABTI_CHECK_ERROR(abt_errno);
2515  return ABT_SUCCESS;
2516 }
2517 
2518 ABTU_ret_err int ABTI_ythread_create_primary(ABTI_global *p_global,
2519  ABTI_local *p_local,
2520  ABTI_xstream *p_xstream,
2521  ABTI_ythread **p_ythread)
2522 {
2523  ABTI_thread_attr attr;
2524  ABTI_pool *p_pool;
2525 
2526  /* Get the first pool of ES */
2527  p_pool = ABTI_pool_get_ptr(p_xstream->p_main_sched->pools[0]);
2528 
2529  /* Allocate a ULT object */
2530 
2531  ABTI_thread_attr_init(&attr, NULL, 0, ABT_FALSE);
2532 
2533  /* Although this primary ULT is running now, we add this primary ULT to the
2534  * pool so that the scheduler can schedule the primary ULT when the primary
2535  * ULT is context switched to the scheduler for the first time. */
2536  int abt_errno =
2537  ythread_create(p_global, p_local, p_pool, NULL, NULL, &attr,
2538  ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_PRIMARY,
2539  NULL, THREAD_POOL_OP_PUSH, p_ythread);
2540  ABTI_CHECK_ERROR(abt_errno);
2541  return ABT_SUCCESS;
2542 }
2543 
2544 ABTU_ret_err int ABTI_ythread_create_root(ABTI_global *p_global,
2545  ABTI_local *p_local,
2546  ABTI_xstream *p_xstream,
2547  ABTI_ythread **pp_root_ythread)
2548 {
2549  ABTI_thread_attr attr;
2550  /* Create a ULT context */
2551  if (p_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
2552  /* Create a thread with its stack */
2553  ABTI_thread_attr_init(&attr, NULL, p_global->sched_stacksize,
2554  ABT_FALSE);
2555  } else {
2556  /* For secondary ESs, the stack of an OS thread is used. */
2557  ABTI_thread_attr_init(&attr, NULL, 0, ABT_FALSE);
2558  }
2559  const ABTI_thread_type thread_type = ABTI_THREAD_TYPE_YIELDABLE |
2560  ABTI_THREAD_TYPE_ROOT |
2561  ABTI_THREAD_TYPE_NAMED;
2562  ABTI_ythread *p_root_ythread;
2563  int abt_errno =
2564  ythread_create(p_global, p_local, NULL, thread_root_func, NULL, &attr,
2565  thread_type, NULL, THREAD_POOL_OP_NONE, &p_root_ythread);
2566  ABTI_CHECK_ERROR(abt_errno);
2567  *pp_root_ythread = p_root_ythread;
2568  return ABT_SUCCESS;
2569 }
2570 
2571 ABTU_ret_err int ABTI_ythread_create_main_sched(ABTI_global *p_global,
2572  ABTI_local *p_local,
2573  ABTI_xstream *p_xstream,
2574  ABTI_sched *p_sched)
2575 {
2576  ABTI_thread_attr attr;
2577 
2578  /* Allocate a ULT object and its stack */
2579  ABTI_thread_attr_init(&attr, NULL, p_global->sched_stacksize, ABT_FALSE);
2580  int abt_errno =
2581  ythread_create(p_global, p_local, p_xstream->p_root_pool,
2582  thread_main_sched_func, NULL, &attr,
2583  ABTI_THREAD_TYPE_YIELDABLE |
2584  ABTI_THREAD_TYPE_MAIN_SCHED | ABTI_THREAD_TYPE_NAMED,
2585  p_sched, THREAD_POOL_OP_PUSH, &p_sched->p_ythread);
2586  ABTI_CHECK_ERROR(abt_errno);
2587  return ABT_SUCCESS;
2588 }
2589 
2590 /* This routine is to create a ULT for the scheduler. */
2591 ABTU_ret_err int ABTI_ythread_create_sched(ABTI_global *p_global,
2592  ABTI_local *p_local,
2593  ABTI_pool *p_pool,
2594  ABTI_sched *p_sched)
2595 {
2596  ABTI_thread_attr attr;
2597 
2598  /* Allocate a ULT object and its stack */
2599  ABTI_thread_attr_init(&attr, NULL, p_global->sched_stacksize, ABT_FALSE);
2600  int abt_errno = ythread_create(p_global, p_local, p_pool,
2601  (void (*)(void *))p_sched->run,
2602  (void *)ABTI_sched_get_handle(p_sched),
2603  &attr, ABTI_THREAD_TYPE_YIELDABLE, p_sched,
2604  THREAD_POOL_OP_PUSH, &p_sched->p_ythread);
2605  ABTI_CHECK_ERROR(abt_errno);
2606  return ABT_SUCCESS;
2607 }
2608 
2609 void ABTI_thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
2610 {
2611  thread_join(pp_local, p_thread);
2612 }
2613 
2614 void ABTI_thread_free(ABTI_global *p_global, ABTI_local *p_local,
2615  ABTI_thread *p_thread)
2616 {
2617  thread_free(p_global, p_local, p_thread, ABT_TRUE);
2618 }
2619 
2620 void ABTI_ythread_free_primary(ABTI_global *p_global, ABTI_local *p_local,
2621  ABTI_ythread *p_ythread)
2622 {
2623  ABTI_thread *p_thread = &p_ythread->thread;
2624  thread_free(p_global, p_local, p_thread, ABT_FALSE);
2625 }
2626 
2627 void ABTI_ythread_free_root(ABTI_global *p_global, ABTI_local *p_local,
2628  ABTI_ythread *p_ythread)
2629 {
2630  thread_free(p_global, p_local, &p_ythread->thread, ABT_FALSE);
2631 }
2632 
2633 ABTU_ret_err int ABTI_thread_get_mig_data(ABTI_global *p_global,
2634  ABTI_local *p_local,
2635  ABTI_thread *p_thread,
2636  ABTI_thread_mig_data **pp_mig_data)
2637 {
2638  ABTI_thread_mig_data *p_mig_data =
2639  (ABTI_thread_mig_data *)ABTI_ktable_get(&p_thread->p_keytable,
2641  if (!p_mig_data) {
2642  int abt_errno;
2643  abt_errno =
2644  ABTU_calloc(1, sizeof(ABTI_thread_mig_data), (void **)&p_mig_data);
2645  ABTI_CHECK_ERROR(abt_errno);
2646  abt_errno = ABTI_ktable_set(p_global, p_local, &p_thread->p_keytable,
2647  &g_thread_mig_data_key, (void *)p_mig_data);
2648  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
2649  /* Failed to add p_mig_data to p_thread's keytable. */
2650  ABTU_free(p_mig_data);
2651  return abt_errno;
2652  }
2653  }
2654  *pp_mig_data = p_mig_data;
2655  return ABT_SUCCESS;
2656 }
2657 
2658 void ABTI_thread_handle_request_cancel(ABTI_global *p_global,
2659  ABTI_xstream *p_local_xstream,
2660  ABTI_thread *p_thread)
2661 {
2662  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
2663  if (p_ythread) {
2664  /* When we cancel a ULT, if other ULT is blocked to join the canceled
2665  * ULT, we have to wake up the joiner ULT. */
2666  ABTI_ythread_resume_joiner(p_local_xstream, p_ythread);
2667  }
2668  ABTI_event_thread_cancel(p_local_xstream, p_thread);
2669  ABTI_thread_terminate(p_global, p_local_xstream, p_thread);
2670 }
2671 
2672 ABTU_ret_err int ABTI_thread_handle_request_migrate(ABTI_global *p_global,
2673  ABTI_local *p_local,
2674  ABTI_thread *p_thread)
2675 {
2676  int abt_errno;
2677 
2678  ABTI_thread_mig_data *p_mig_data;
2679  abt_errno =
2680  ABTI_thread_get_mig_data(p_global, p_local, p_thread, &p_mig_data);
2681  ABTI_CHECK_ERROR(abt_errno);
2682 
2683  /* Extracting an argument embedded in a migration request. */
2684  ABTI_pool *p_pool =
2685  ABTD_atomic_relaxed_load_ptr(&p_mig_data->p_migration_pool);
2686 
2687  /* Change the associated pool */
2688  abt_errno = ABTI_thread_set_associated_pool(p_global, p_thread, p_pool);
2689  ABTI_CHECK_ERROR(abt_errno);
2690  /* Call a callback function */
2691  if (p_mig_data->f_migration_cb) {
2692  ABT_thread thread = ABTI_thread_get_handle(p_thread);
2693  p_mig_data->f_migration_cb(thread, p_mig_data->p_migration_cb_arg);
2694  }
2695  /* Unset the migration request. */
2696  ABTI_thread_unset_request(p_thread, ABTI_THREAD_REQ_MIGRATE);
2697  return ABT_SUCCESS;
2698 }
2699 
2700 void ABTI_thread_print(ABTI_thread *p_thread, FILE *p_os, int indent)
2701 {
2702  if (p_thread == NULL) {
2703  fprintf(p_os, "%*s== NULL thread ==\n", indent, "");
2704  } else {
2705  ABTI_xstream *p_xstream = p_thread->p_last_xstream;
2706  int xstream_rank = p_xstream ? p_xstream->rank : 0;
2707  const char *type, *yieldable, *state, *named, *migratable;
2708 
2709  if (p_thread->type & ABTI_THREAD_TYPE_PRIMARY) {
2710  type = "PRIMARY";
2711  } else if (p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED) {
2712  type = "MAIN_SCHED";
2713  } else if (p_thread->type & ABTI_THREAD_TYPE_ROOT) {
2714  type = "ROOT";
2715  } else {
2716  type = "USER";
2717  }
2718  if (p_thread->type & ABTI_THREAD_TYPE_YIELDABLE) {
2719  yieldable = "yes";
2720  } else {
2721  yieldable = "no";
2722  }
2723  if (p_thread->type & ABTI_THREAD_TYPE_NAMED) {
2724  named = "yes";
2725  } else {
2726  named = "no";
2727  }
2728  if (p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE) {
2729  migratable = "yes";
2730  } else {
2731  migratable = "no";
2732  }
2733  switch (ABTD_atomic_acquire_load_int(&p_thread->state)) {
2735  state = "READY";
2736  break;
2738  state = "RUNNING";
2739  break;
2741  state = "BLOCKED";
2742  break;
2744  state = "TERMINATED";
2745  break;
2746  default:
2747  state = "UNKNOWN";
2748  break;
2749  }
2750  ABTI_thread_mig_data *p_mig_data =
2751  (ABTI_thread_mig_data *)ABTI_ktable_get(&p_thread->p_keytable,
2753  void *p_migration_cb_arg =
2754  p_mig_data ? p_mig_data->p_migration_cb_arg : NULL;
2755 
2756  fprintf(p_os,
2757  "%*s== Thread (%p) ==\n"
2758  "%*sid : %" PRIu64 "\n"
2759  "%*stype : %s\n"
2760  "%*syieldable : %s\n"
2761  "%*sstate : %s\n"
2762  "%*slast_ES : %p (%d)\n"
2763  "%*sparent : %p\n"
2764  "%*sp_arg : %p\n"
2765  "%*spool : %p\n"
2766  "%*snamed : %s\n"
2767  "%*smigratable : %s\n"
2768  "%*srequest : 0x%x\n"
2769  "%*smig_cb_arg : %p\n"
2770  "%*skeytable : %p\n",
2771  indent, "", (void *)p_thread, indent, "",
2772  ABTI_thread_get_id(p_thread), indent, "", type, indent, "",
2773  yieldable, indent, "", state, indent, "", (void *)p_xstream,
2774  xstream_rank, indent, "", (void *)p_thread->p_parent, indent,
2775  "", p_thread->p_arg, indent, "", (void *)p_thread->p_pool,
2776  indent, "", named, indent, "", migratable, indent, "",
2777  ABTD_atomic_acquire_load_uint32(&p_thread->request), indent, "",
2778  p_migration_cb_arg, indent, "",
2779  ABTD_atomic_acquire_load_ptr(&p_thread->p_keytable));
2780 
2781  if (p_thread->type & ABTI_THREAD_TYPE_YIELDABLE) {
2782  ABTI_ythread *p_ythread = ABTI_thread_get_ythread(p_thread);
2783  fprintf(p_os,
2784  "%*sstacktop : %p\n"
2785  "%*sstacksize : %zu\n",
2786  indent, "",
2787  ABTD_ythread_context_get_stacktop(&p_ythread->ctx), indent,
2788  "", ABTD_ythread_context_get_stacksize(&p_ythread->ctx));
2789  }
2790  }
2791  fflush(p_os);
2792 }
2793 
2794 static ABTD_atomic_uint64 g_thread_id =
2795  ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(0);
2796 void ABTI_thread_reset_id(void)
2797 {
2798  ABTD_atomic_release_store_uint64(&g_thread_id, 0);
2799 }
2800 
2801 ABT_unit_id ABTI_thread_get_id(ABTI_thread *p_thread)
2802 {
2803  if (p_thread == NULL)
2804  return ABTI_THREAD_INIT_ID;
2805 
2806  if (p_thread->id == ABTI_THREAD_INIT_ID) {
2807  p_thread->id = thread_get_new_id();
2808  }
2809  return p_thread->id;
2810 }
2811 
2812 /*****************************************************************************/
2813 /* Internal static functions */
2814 /*****************************************************************************/
2815 
2816 ABTU_ret_err static inline int
2817 ythread_create(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool,
2818  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
2819  ABTI_thread_type thread_type, ABTI_sched *p_sched,
2820  thread_pool_op_kind pool_op, ABTI_ythread **pp_newthread)
2821 {
2822  int abt_errno;
2823  ABTI_ythread *p_newthread;
2824  ABTI_ktable *p_keytable = NULL;
2825 
2826  /* Allocate a ULT object and its stack, then create a thread context. */
2827  if (!p_attr) {
2828  abt_errno =
2829  ABTI_mem_alloc_ythread_default(p_global, p_local, &p_newthread);
2830  ABTI_CHECK_ERROR(abt_errno);
2831 #ifndef ABT_CONFIG_DISABLE_MIGRATION
2832  thread_type |= ABTI_THREAD_TYPE_MIGRATABLE;
2833 #endif
2834  } else {
2835  /*
2836  * There are four memory management types for ULTs.
2837  * 1. A thread that uses a stack of a default size.
2838  * -> size == p_global->thread_stacksize, p_stack == NULL
2839  * 2. A thread that uses a stack of a non-default size.
2840  * -> size != 0, size != p_global->thread_stacksize, p_stack == NULL
2841  * 3. A thread that uses OS-level thread's stack (e.g., a primary ULT).
2842  * -> size == 0, p_stack = NULL
2843  * 4. A thread that uses a user-allocated stack.
2844  * -> p_stack != NULL
2845  * Only 1. is important for the performance.
2846  */
2847  if (ABTU_likely(p_attr->p_stack == NULL)) {
2848  const size_t default_stacksize = p_global->thread_stacksize;
2849  const size_t stacksize = p_attr->stacksize;
2850  if (ABTU_likely(stacksize == default_stacksize)) {
2851  /* 1. A thread that uses a stack of a default size. */
2852  abt_errno =
2853  ABTI_mem_alloc_ythread_mempool_desc_stack(p_global, p_local,
2854  stacksize,
2855  &p_newthread);
2856  } else if (stacksize != 0) {
2857  /* 2. A thread that uses a stack of a non-default size. */
2858  abt_errno =
2859  ABTI_mem_alloc_ythread_malloc_desc_stack(p_global,
2860  stacksize,
2861  &p_newthread);
2862  } else {
2863  /* 3. A thread that uses OS-level thread's stack */
2864  abt_errno =
2865  ABTI_mem_alloc_ythread_mempool_desc(p_global, p_local, 0,
2866  NULL, &p_newthread);
2867  }
2868  ABTI_CHECK_ERROR(abt_errno);
2869  } else {
2870  /* 4. A thread that uses a user-allocated stack. */
2871  void *p_stacktop =
2872  (void *)((char *)(p_attr->p_stack) + p_attr->stacksize);
2873  abt_errno =
2874  ABTI_mem_alloc_ythread_mempool_desc(p_global, p_local,
2875  p_attr->stacksize,
2876  p_stacktop, &p_newthread);
2877  ABTI_CHECK_ERROR(abt_errno);
2878  }
2879 #ifndef ABT_CONFIG_DISABLE_MIGRATION
2880  thread_type |= p_attr->migratable ? ABTI_THREAD_TYPE_MIGRATABLE : 0;
2881  if (ABTU_unlikely(p_attr->f_cb)) {
2882  ABTI_thread_mig_data *p_mig_data;
2883  abt_errno = ABTU_calloc(1, sizeof(ABTI_thread_mig_data),
2884  (void **)&p_mig_data);
2885  if (ABTI_IS_ERROR_CHECK_ENABLED &&
2886  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
2887  ABTI_mem_free_thread(p_global, p_local, &p_newthread->thread);
2888  return abt_errno;
2889  }
2890  p_mig_data->f_migration_cb = p_attr->f_cb;
2891  p_mig_data->p_migration_cb_arg = p_attr->p_cb_arg;
2892  abt_errno = ABTI_ktable_set_unsafe(p_global, p_local, &p_keytable,
2894  (void *)p_mig_data);
2895  if (ABTI_IS_ERROR_CHECK_ENABLED &&
2896  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
2897  if (p_keytable)
2898  ABTI_ktable_free(p_global, p_local, p_keytable);
2899  ABTU_free(p_mig_data);
2900  ABTI_mem_free_thread(p_global, p_local, &p_newthread->thread);
2901  return abt_errno;
2902  }
2903  }
2904 #endif
2905  }
2906 
2907  p_newthread->thread.f_thread = thread_func;
2908  p_newthread->thread.p_arg = arg;
2909 
2910  ABTD_atomic_release_store_int(&p_newthread->thread.state,
2912  ABTD_atomic_release_store_uint32(&p_newthread->thread.request, 0);
2913  p_newthread->thread.p_last_xstream = NULL;
2914  p_newthread->thread.p_parent = NULL;
2915  p_newthread->thread.type |= thread_type;
2916  p_newthread->thread.id = ABTI_THREAD_INIT_ID;
2917  if (p_sched && !(thread_type & (ABTI_THREAD_TYPE_PRIMARY |
2918  ABTI_THREAD_TYPE_MAIN_SCHED))) {
2919  /* Set a destructor for p_sched. */
2920  abt_errno = ABTI_ktable_set_unsafe(p_global, p_local, &p_keytable,
2921  &g_thread_sched_key, p_sched);
2922  if (ABTI_IS_ERROR_CHECK_ENABLED &&
2923  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
2924  if (p_keytable)
2925  ABTI_ktable_free(p_global, p_local, p_keytable);
2926  ABTI_mem_free_thread(p_global, p_local, &p_newthread->thread);
2927  return abt_errno;
2928  }
2929  }
2930  ABTD_atomic_relaxed_store_ptr(&p_newthread->thread.p_keytable, p_keytable);
2931 
2932  /* Create a wrapper unit */
2933  if (pool_op == THREAD_POOL_OP_PUSH || pool_op == THREAD_POOL_OP_INIT) {
2934  abt_errno =
2935  ABTI_thread_init_pool(p_global, &p_newthread->thread, p_pool);
2936  if (ABTI_IS_ERROR_CHECK_ENABLED &&
2937  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
2938  if (p_keytable)
2939  ABTI_ktable_free(p_global, p_local, p_keytable);
2940  ABTI_mem_free_thread(p_global, p_local, &p_newthread->thread);
2941  return abt_errno;
2942  }
2943  /* Invoke a thread creation event. */
2944  ABTI_event_thread_create(p_local, &p_newthread->thread,
2945  ABTI_local_get_xstream_or_null(p_local)
2946  ? ABTI_local_get_xstream(p_local)->p_thread
2947  : NULL,
2948  p_pool);
2949  if (pool_op == THREAD_POOL_OP_PUSH) {
2950  /* Add this thread to the pool */
2951  ABTI_pool_push(p_pool, p_newthread->thread.unit,
2953  }
2954  } else {
2955  /* pool_op == THREAD_POOL_OP_NONE */
2956  p_newthread->thread.p_pool = p_pool;
2957  p_newthread->thread.unit = ABT_UNIT_NULL;
2958  /* Invoke a thread creation event. */
2959  ABTI_event_thread_create(p_local, &p_newthread->thread,
2960  ABTI_local_get_xstream_or_null(p_local)
2961  ? ABTI_local_get_xstream(p_local)->p_thread
2962  : NULL,
2963  NULL);
2964  }
2965 
2966  /* Return value */
2967  *pp_newthread = p_newthread;
2968  return ABT_SUCCESS;
2969 }
2970 
2971 ABTU_ret_err static inline int
2972 thread_revive(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool,
2973  void (*thread_func)(void *), void *arg,
2974  thread_pool_op_kind pool_op, ABTI_thread *p_thread)
2975 {
2976  ABTI_UB_ASSERT(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
2978  /* Set the new pool */
2979  int abt_errno = ABTI_thread_set_associated_pool(p_global, p_thread, p_pool);
2980  ABTI_CHECK_ERROR(abt_errno);
2981 
2982  p_thread->f_thread = thread_func;
2983  p_thread->p_arg = arg;
2984 
2985  ABTD_atomic_relaxed_store_int(&p_thread->state, ABT_THREAD_STATE_READY);
2986  ABTD_atomic_relaxed_store_uint32(&p_thread->request, 0);
2987  p_thread->p_last_xstream = NULL;
2988  p_thread->p_parent = NULL;
2989 
2990  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
2991  if (p_ythread) {
2992  /* Create a ULT context */
2993  ABTD_ythread_context_reinit(&p_ythread->ctx);
2994  }
2995 
2996  /* Invoke a thread revive event. */
2997  ABTI_event_thread_revive(p_local, p_thread,
2998  ABTI_local_get_xstream_or_null(p_local)
2999  ? ABTI_local_get_xstream(p_local)->p_thread
3000  : NULL,
3001  p_pool);
3002 
3003  if (pool_op == THREAD_POOL_OP_PUSH) {
3004  /* Add this thread to the pool */
3005  ABTI_pool_push(p_pool, p_thread->unit,
3007  }
3008  return ABT_SUCCESS;
3009 }
3010 
3011 #ifndef ABT_CONFIG_DISABLE_MIGRATION
3012 ABTU_ret_err static int thread_migrate_to_pool(ABTI_global *p_global,
3013  ABTI_local *p_local,
3014  ABTI_thread *p_thread,
3015  ABTI_pool *p_pool)
3016 {
3017  /* Adding request to the thread. p_migration_pool must be updated before
3018  * setting the request since the target thread would read p_migration_pool
3019  * after ABTI_THREAD_REQ_MIGRATE. The update must be "atomic" (but does not
3020  * require acq-rel) since two threads can update the pointer value
3021  * simultaneously. */
3022 
3023  ABTI_thread_mig_data *p_mig_data;
3024  int abt_errno =
3025  ABTI_thread_get_mig_data(p_global, p_local, p_thread, &p_mig_data);
3026  ABTI_CHECK_ERROR(abt_errno);
3027 
3028  ABTD_atomic_relaxed_store_ptr(&p_mig_data->p_migration_pool,
3029  (void *)p_pool);
3030  ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_MIGRATE);
3031  return ABT_SUCCESS;
3032 }
3033 #endif
3034 
3035 static inline void thread_free(ABTI_global *p_global, ABTI_local *p_local,
3036  ABTI_thread *p_thread, ABT_bool free_unit)
3038  /* Invoke a thread freeing event. */
3039  ABTI_event_thread_free(p_local, p_thread,
3040  ABTI_local_get_xstream_or_null(p_local)
3041  ? ABTI_local_get_xstream(p_local)->p_thread
3042  : NULL);
3043 
3044  /* Free the unit */
3045  if (free_unit) {
3046  ABTI_thread_unset_associated_pool(p_global, p_thread);
3047  }
3048 
3049  /* Free the key-value table */
3050  ABTI_ktable *p_ktable = ABTD_atomic_acquire_load_ptr(&p_thread->p_keytable);
3051  /* No parallel access to TLS is allowed. */
3052  ABTI_ASSERT(p_ktable != ABTI_KTABLE_LOCKED);
3053  if (p_ktable) {
3054  ABTI_ktable_free(p_global, p_local, p_ktable);
3055  }
3056 
3057  /* Free ABTI_thread (stack will also be freed) */
3058  ABTI_mem_free_thread(p_global, p_local, p_thread);
3059 }
3060 
3061 static void thread_key_destructor_stackable_sched(void *p_value)
3062 {
3063  /* This destructor should be called in ABTI_ythread_free(), so it should not
3064  * free the thread again. */
3065  ABTI_sched *p_sched = (ABTI_sched *)p_value;
3066  p_sched->used = ABTI_SCHED_NOT_USED;
3067  if (p_sched->automatic == ABT_TRUE) {
3068  ABTI_global *p_global = ABTI_global_get_global();
3069  p_sched->p_ythread = NULL;
3070  ABTI_sched_free(p_global, ABTI_local_get_local_uninlined(), p_sched,
3071  ABT_FALSE);
3072  } else {
3073  /* If it is not automatic, p_ythread must be set to NULL to avoid double
3074  * free corruption. */
3075  p_sched->p_ythread = NULL;
3076  }
3077 }
3078 
3079 static void thread_key_destructor_migration(void *p_value)
3080 {
3081  ABTI_thread_mig_data *p_mig_data = (ABTI_thread_mig_data *)p_value;
3082  ABTU_free(p_mig_data);
3083 }
3084 
3085 static void thread_join_busywait(ABTI_thread *p_thread)
3086 {
3087  while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
3089  ABTD_atomic_pause();
3090  }
3091  ABTI_event_thread_join(NULL, p_thread, NULL);
3092 }
3093 
3094 #ifndef ABT_CONFIG_ACTIVE_WAIT_POLICY
3095 static void thread_join_futexwait(ABTI_thread *p_thread)
3096 {
3097  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
3098  if (p_ythread) {
3099  /* tell that this thread will join */
3100  uint32_t req = ABTD_atomic_fetch_or_uint32(&p_ythread->thread.request,
3101  ABTI_THREAD_REQ_JOIN);
3102  if (!(req & ABTI_THREAD_REQ_JOIN)) {
3103  ABTD_futex_single futex;
3104  ABTD_futex_single_init(&futex);
3105  ABTI_ythread dummy_ythread;
3106  dummy_ythread.thread.type = ABTI_THREAD_TYPE_EXT;
3107  /* Just arbitrarily choose p_arg to store futex. */
3108  dummy_ythread.thread.p_arg = &futex;
3109  ABTD_atomic_release_store_ythread_context_ptr(&p_ythread->ctx
3110  .p_link,
3111  &dummy_ythread.ctx);
3112  ABTD_futex_suspend(&futex);
3113  /* Resumed. */
3114  } else {
3115  /* If request already has ABTI_THREAD_REQ_JOIN, p_ythread is
3116  * terminating. We can't suspend in this case. */
3117  }
3118  }
3119  /* No matter whether this thread has been resumed or not, we need to busy-
3120  * wait to make sure that the thread's state gets terminated. */
3121  thread_join_busywait(p_thread);
3122 }
3123 #endif
3124 
3125 static void thread_join_yield_thread(ABTI_xstream **pp_local_xstream,
3126  ABTI_ythread *p_self,
3127  ABTI_thread *p_thread)
3128 {
3129  while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
3131  ABTI_ythread_yield(pp_local_xstream, p_self,
3132  ABTI_YTHREAD_YIELD_KIND_YIELD_LOOP,
3133  ABT_SYNC_EVENT_TYPE_THREAD_JOIN, (void *)p_thread);
3134  }
3135  ABTI_event_thread_join(ABTI_xstream_get_local(*pp_local_xstream), p_thread,
3136  &p_self->thread);
3137 }
3138 
3139 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
3140 {
3141  if (ABTD_atomic_acquire_load_int(&p_thread->state) ==
3143  ABTI_event_thread_join(*pp_local, p_thread,
3144  ABTI_local_get_xstream_or_null(*pp_local)
3145  ? ABTI_local_get_xstream(*pp_local)->p_thread
3146  : NULL);
3147  return;
3148  }
3149  /* The primary ULT cannot be joined. */
3150  ABTI_ASSERT(!(p_thread->type & ABTI_THREAD_TYPE_PRIMARY));
3151 
3152  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
3153  if (ABTI_IS_EXT_THREAD_ENABLED && !p_local_xstream) {
3154 #ifdef ABT_CONFIG_ACTIVE_WAIT_POLICY
3155  thread_join_busywait(p_thread);
3156 #else
3157  thread_join_futexwait(p_thread);
3158 #endif
3159  return;
3160  }
3161 
3162  ABTI_thread *p_self_thread = p_local_xstream->p_thread;
3163 
3164  ABTI_ythread *p_self = ABTI_thread_get_ythread_or_null(p_self_thread);
3165  if (!p_self) {
3166 #ifdef ABT_CONFIG_ACTIVE_WAIT_POLICY
3167  thread_join_busywait(p_thread);
3168 #else
3169  thread_join_futexwait(p_thread);
3170 #endif
3171  return;
3172  }
3173 
3174  /* The target ULT should be different. */
3175  ABTI_ASSERT(p_thread != p_self_thread);
3176 
3177  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
3178  if (!p_ythread) {
3179  thread_join_yield_thread(&p_local_xstream, p_self, p_thread);
3180  *pp_local = ABTI_xstream_get_local(p_local_xstream);
3181  return;
3182  }
3183 
3184  /* Tell p_ythread that there has been a join request. */
3185  /* If request already has ABTI_THREAD_REQ_JOIN, p_ythread is
3186  * terminating. We can't block p_self in this case. */
3187  uint32_t req = ABTD_atomic_fetch_or_uint32(&p_ythread->thread.request,
3188  ABTI_THREAD_REQ_JOIN);
3189  if (req & ABTI_THREAD_REQ_JOIN) {
3190  /* Fall-back to the yield-based join. */
3191  thread_join_yield_thread(&p_local_xstream, p_self, &p_ythread->thread);
3192  *pp_local = ABTI_xstream_get_local(p_local_xstream);
3193  } else {
3194  /* Suspend the current ULT */
3195  ABTI_ythread_suspend_join(&p_local_xstream, p_self, p_ythread,
3197  (void *)p_ythread);
3198  /* This thread is resumed by a target thread. Since this ULT is resumed
3199  * before the target thread is fully terminated, let's wait for the
3200  * completion. */
3201  thread_join_yield_thread(&p_local_xstream, p_self, &p_ythread->thread);
3202  *pp_local = ABTI_xstream_get_local(p_local_xstream);
3203  }
3204 }
3205 
3206 static void thread_root_func(void *arg)
3207 {
3208  /* root thread is working on a special context, so it should not rely on
3209  * functionality that needs yield. */
3210  ABTI_global *p_global = ABTI_global_get_global();
3211  ABTI_local *p_local = ABTI_local_get_local();
3212  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
3213  ABTI_ASSERT(ABTD_atomic_relaxed_load_int(&p_local_xstream->state) ==
3215 
3216  ABTI_ythread *p_root_ythread = p_local_xstream->p_root_ythread;
3217  p_local_xstream->p_thread = &p_root_ythread->thread;
3218  ABTI_pool *p_root_pool = p_local_xstream->p_root_pool;
3219 
3220  do {
3221  ABT_thread thread =
3222  ABTI_pool_pop(p_root_pool, ABT_POOL_CONTEXT_OWNER_PRIMARY);
3223  if (thread != ABT_THREAD_NULL) {
3224  ABTI_xstream *p_xstream = p_local_xstream;
3225  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
3226  ABTI_ythread_schedule(p_global, &p_xstream, p_thread);
3227  /* The root thread must be executed on the same execution stream. */
3228  ABTI_ASSERT(p_xstream == p_local_xstream);
3229  }
3230  } while (ABTD_atomic_acquire_load_int(
3231  &p_local_xstream->p_main_sched->p_ythread->thread.state) !=
3233  /* The main scheduler thread finishes. */
3234 
3235  /* Set the ES's state as TERMINATED */
3236  ABTD_atomic_release_store_int(&p_local_xstream->state,
3238 
3239  if (p_local_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
3240  /* Let us jump back to the primary thread (then finalize Argobots) */
3241  ABTI_ythread_exit_to_primary(p_global, p_local_xstream, p_root_ythread);
3242  }
3243 }
3244 
3245 static void thread_main_sched_func(void *arg)
3246 {
3247  ABTI_local *p_local = ABTI_local_get_local();
3248  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
3249 
3250  while (1) {
3251  /* Execute the run function of scheduler */
3252  ABTI_sched *p_sched = p_local_xstream->p_main_sched;
3253  ABTI_ASSERT(p_local_xstream->p_thread == &p_sched->p_ythread->thread);
3254 
3255  p_sched->run(ABTI_sched_get_handle(p_sched));
3256  /* The main scheduler's thread must be executed on the same execution
3257  * stream. */
3258  ABTI_ASSERT(p_local == ABTI_local_get_local_uninlined());
3259 
3260  /* We free the current main scheduler and replace it if requested. */
3261  if (ABTD_atomic_relaxed_load_uint32(&p_sched->request) &
3262  ABTI_SCHED_REQ_REPLACE) {
3263  ABTI_ythread *p_waiter = p_sched->p_replace_waiter;
3264  ABTI_sched *p_new_sched = p_sched->p_replace_sched;
3265  /* Set this scheduler as a main scheduler */
3266  p_new_sched->used = ABTI_SCHED_MAIN;
3267  /* Take the ULT of the current main scheduler and use it for the new
3268  * scheduler. */
3269  p_new_sched->p_ythread = p_sched->p_ythread;
3270  p_local_xstream->p_main_sched = p_new_sched;
3271  /* Now, we free the current main scheduler. p_sched->p_ythread must
3272  * be NULL to avoid freeing it in ABTI_sched_discard_and_free(). */
3273  p_sched->p_ythread = NULL;
3274  ABTI_sched_discard_and_free(ABTI_global_get_global(), p_local,
3275  p_sched, ABT_FALSE);
3276  /* We do not need to unset ABTI_SCHED_REQ_REPLACE since that p_sched
3277  * has already been replaced. */
3278  p_sched = p_new_sched;
3279  /* Resume the waiter. */
3280  ABTI_ythread_resume_and_push(p_local, p_waiter);
3281  }
3282  ABTI_ASSERT(p_sched == p_local_xstream->p_main_sched);
3283  uint32_t request = ABTD_atomic_acquire_load_uint32(
3284  &p_sched->p_ythread->thread.request);
3285 
3286  /* If there is an exit or a cancel request, the ES terminates
3287  * regardless of remaining work units. */
3288  if (request & ABTI_THREAD_REQ_CANCEL)
3289  break;
3290 
3291  /* When join is requested, the ES terminates after finishing
3292  * execution of all work units. */
3293  if ((ABTD_atomic_relaxed_load_uint32(&p_sched->request) &
3294  ABTI_SCHED_REQ_FINISH) &&
3295  !ABTI_sched_has_unit(p_sched)) {
3296  break;
3297  }
3298  }
3299  /* Finish this thread and goes back to the root thread. */
3300 }
3301 
3302 static inline ABT_unit_id thread_get_new_id(void)
3303 {
3304  return (ABT_unit_id)ABTD_atomic_fetch_add_uint64(&g_thread_id, 1);
3305 }
ABT_THREAD_STATE_TERMINATED
@ ABT_THREAD_STATE_TERMINATED
Definition: abt.h:433
ABT_thread_migrate_to_xstream
int ABT_thread_migrate_to_xstream(ABT_thread thread, ABT_xstream xstream)
Request a migration of a work unit to a specific execution stream.
Definition: thread.c:1503
ABT_SYNC_EVENT_TYPE_USER
@ ABT_SYNC_EVENT_TYPE_USER
Definition: abt.h:700
ABT_key
struct ABT_key_opaque * ABT_key
Work-unit-specific data key handle type.
Definition: abt.h:980
ABT_ERR_INV_THREAD
#define ABT_ERR_INV_THREAD
Error code: invalid work unit.
Definition: abt.h:186
ABT_thread_get_arg
int ABT_thread_get_arg(ABT_thread thread, void **arg)
Retrieve an argument for a work-unit function of a work unit.
Definition: thread.c:2284
ABT_thread_resume
int ABT_thread_resume(ABT_thread thread)
Resume a ULT.
Definition: thread.c:1427
ABT_thread_is_primary
int ABT_thread_is_primary(ABT_thread thread, ABT_bool *is_primary)
Check if a work unit is the primary ULT.
Definition: thread.c:2038
ABT_bool
int ABT_bool
Boolean type.
Definition: abt.h:1043
ABT_POOL_CONTEXT_OP_THREAD_CREATE
#define ABT_POOL_CONTEXT_OP_THREAD_CREATE
A flag that hints a push operation in a thread creation routine without a yield operation.
Definition: abt.h:1673
ABT_THREAD_STATE_READY
@ ABT_THREAD_STATE_READY
Definition: abt.h:427
thread_main_sched_func
static void thread_main_sched_func(void *arg)
Definition: thread.c:3247
ABT_thread_attr
struct ABT_thread_attr_opaque * ABT_thread_attr
ULT attribute handle type.
Definition: abt.h:939
ABT_thread
struct ABT_thread_opaque * ABT_thread
Work unit handle type.
Definition: abt.h:932
THREAD_POOL_OP_PUSH
@ THREAD_POOL_OP_PUSH
Definition: thread.c:10
g_thread_id
static ABTD_atomic_uint64 g_thread_id
Definition: thread.c:2796
ABT_thread_join_many
int ABT_thread_join_many(int num_threads, ABT_thread *thread_list)
Wait for a set of work units to terminate.
Definition: thread.c:782
ABT_thread_create_many
int ABT_thread_create_many(int num_threads, ABT_pool *pool_list, void(**thread_func_list)(void *), void **arg_list, ABT_thread_attr attr, ABT_thread *newthread_list)
Create a set of new ULTs.
Definition: thread.c:354
ABT_thread_get_stacksize
int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize)
Get a stack size of a work unit.
Definition: thread.c:2167
thread_revive
static ABTU_ret_err int thread_revive(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool, void(*thread_func)(void *), void *arg, thread_pool_op_kind pool_op, ABTI_thread *p_thread)
Definition: thread.c:2974
ABT_POOL_CONTEXT_OP_THREAD_REVIVE
#define ABT_POOL_CONTEXT_OP_THREAD_REVIVE
A flag that hints a push operation in a thread revival routine without a yield operation.
Definition: abt.h:1698
ABT_thread_set_migratable
int ABT_thread_set_migratable(ABT_thread thread, ABT_bool migratable)
Set the migratability in a work unit.
Definition: thread.c:1933
ABT_ERR_POOL
#define ABT_ERR_POOL
Error code: error related to a pool.
Definition: abt.h:292
ABT_thread_get_id
int ABT_thread_get_id(ABT_thread thread, ABT_unit_id *thread_id)
Get ID of a work unit.
Definition: thread.c:2209
ythread_create
static ABTU_ret_err int ythread_create(ABTI_global *p_global, ABTI_local *p_local, ABTI_pool *p_pool, void(*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr, ABTI_thread_type thread_type, ABTI_sched *p_sched, thread_pool_op_kind pool_op, ABTI_ythread **pp_newthread)
Definition: thread.c:2819
ABT_THREAD_NULL
#define ABT_THREAD_NULL
Definition: abt.h:1105
ABT_thread_get_state
int ABT_thread_get_state(ABT_thread thread, ABT_thread_state *state)
Get a state of a work unit.
Definition: thread.c:1068
ABT_pool
struct ABT_pool_opaque * ABT_pool
Pool handle type.
Definition: abt.h:878
ABT_ERR_THREAD
#define ABT_ERR_THREAD
Error code: error related to a work unit.
Definition: abt.h:302
ABTU_likely
#define ABTU_likely(cond)
Definition: abtu.h:119
thread_pool_op_kind
thread_pool_op_kind
Definition: thread.c:8
ABT_thread_state
ABT_thread_state
State of a work unit.
Definition: abt.h:425
ABT_thread_free
int ABT_thread_free(ABT_thread *thread)
Free a work unit.
Definition: thread.c:612
g_thread_sched_key
static ABTI_key g_thread_sched_key
Definition: thread.c:36
ABT_sched
struct ABT_sched_opaque * ABT_sched
Scheduler handle type.
Definition: abt.h:845
ABT_thread_migrate_to_sched
int ABT_thread_migrate_to_sched(ABT_thread thread, ABT_sched sched)
Request a migration of a work unit to a specific scheduler.
Definition: thread.c:1591
ABT_thread_exit
int ABT_thread_exit(void)
Terminate a calling ULT.
Definition: thread.c:824
ABT_THREAD_STATE_BLOCKED
@ ABT_THREAD_STATE_BLOCKED
Definition: abt.h:431
abti.h
ABT_thread_set_specific
int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value)
Set a value with a work-unit-specific data key in a work unit.
Definition: thread.c:2356
thread_free
static void thread_free(ABTI_global *p_global, ABTI_local *p_local, ABTI_thread *p_thread, ABT_bool free_unit)
Definition: thread.c:3037
ABT_unit_id
uint64_t ABT_unit_id
Work unit ID type.
Definition: abt.h:921
ABT_xstream
struct ABT_xstream_opaque * ABT_xstream
Execution stream handle type.
Definition: abt.h:826
ABT_thread_yield
int ABT_thread_yield(void)
Yield the calling ULT to its parent ULT.
Definition: thread.c:1372
ABT_thread_get_last_pool
int ABT_thread_get_last_pool(ABT_thread thread, ABT_pool *pool)
Get the last pool of a work unit.
Definition: thread.c:1111
thread_key_destructor_migration
static void thread_key_destructor_migration(void *p_value)
Definition: thread.c:3081
ABTU_malloc
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
Definition: abtu.h:235
ABT_thread_create_to
int ABT_thread_create_to(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread)
Create a new ULT and yield to it.
Definition: thread.c:180
ABT_thread_set_associated_pool
int ABT_thread_set_associated_pool(ABT_thread thread, ABT_pool pool)
Set an associated pool for the target work unit.
Definition: thread.c:1231
ABT_thread_join
int ABT_thread_join(ABT_thread thread)
Wait for a work unit to terminate.
Definition: thread.c:733
ABT_ERR_INV_THREAD_ATTR
#define ABT_ERR_INV_THREAD_ATTR
Error code: invalid ULT attribute.
Definition: abt.h:191
thread_root_func
static void thread_root_func(void *arg)
Definition: thread.c:3208
ABT_unit
struct ABT_unit_opaque * ABT_unit
Work unit handle type for scheduling.
Definition: abt.h:911
thread_migrate_to_pool
static ABTU_ret_err int thread_migrate_to_pool(ABTI_global *p_global, ABTI_local *p_local, ABTI_thread *p_thread, ABTI_pool *p_pool)
Definition: thread.c:3014
thread_join
static void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
Definition: thread.c:3141
ABT_thread_is_unnamed
int ABT_thread_is_unnamed(ABT_thread thread, ABT_bool *is_unnamed)
Check if a work unit is unnamed.
Definition: thread.c:2079
ABTU_calloc
static ABTU_ret_err int ABTU_calloc(size_t num, size_t size, void **p_ptr)
Definition: abtu.h:244
ABT_ERR_MIGRATION_TARGET
#define ABT_ERR_MIGRATION_TARGET
Error code: error related to a migration target.
Definition: abt.h:370
THREAD_POOL_OP_INIT
@ THREAD_POOL_OP_INIT
Definition: thread.c:11
ABT_THREAD_ATTR_NULL
#define ABT_THREAD_ATTR_NULL
Definition: abt.h:1106
ABT_thread_create_on_xstream
int ABT_thread_create_on_xstream(ABT_xstream xstream, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread)
Create a new ULT associated with an execution stream.
Definition: thread.c:268
thread_key_destructor_stackable_sched
static void thread_key_destructor_stackable_sched(void *p_value)
Definition: thread.c:3063
ABT_thread_set_arg
int ABT_thread_set_arg(ABT_thread thread, void *arg)
Set an argument for a work-unit function of a work unit.
Definition: thread.c:2247
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
thread_get_new_id
static ABT_unit_id thread_get_new_id(void)
Definition: thread.c:3304
ABTU_ret_err
#define ABTU_ret_err
Definition: abtu.h:155
ABT_thread_get_last_xstream
int ABT_thread_get_last_xstream(ABT_thread thread, ABT_xstream *xstream)
Get an execution stream associated with a work unit.
Definition: thread.c:1024
ABT_thread_migrate
int ABT_thread_migrate(ABT_thread thread)
Request a migration of a work unit to any available execution stream.
Definition: thread.c:1755
ABT_thread_self_id
int ABT_thread_self_id(ABT_unit_id *id)
Get ID of the calling work unit.
Definition: thread.c:978
ABT_thread_self
int ABT_thread_self(ABT_thread *thread)
Get the calling work unit.
Definition: thread.c:926
ABT_thread_free_many
int ABT_thread_free_many(int num_threads, ABT_thread *thread_list)
Free a set of work units.
Definition: thread.c:675
ABTU_unlikely
#define ABTU_unlikely(cond)
Definition: abtu.h:120
ABT_TRUE
#define ABT_TRUE
True constant for ABT_bool.
Definition: abt.h:784
ABT_XSTREAM_STATE_RUNNING
@ ABT_XSTREAM_STATE_RUNNING
Definition: abt.h:416
ABT_thread_set_callback
int ABT_thread_set_callback(ABT_thread thread, void(*cb_func)(ABT_thread thread, void *cb_arg), void *cb_arg)
Register a callback function in a work unit.
Definition: thread.c:1869
ABT_ERR_MIGRATION_NA
#define ABT_ERR_MIGRATION_NA
Error code: migration is not supported.
Definition: abt.h:379
ABT_SYNC_EVENT_TYPE_THREAD_JOIN
@ ABT_SYNC_EVENT_TYPE_THREAD_JOIN
Definition: abt.h:706
ABT_thread_get_thread_func
int ABT_thread_get_thread_func(ABT_thread thread, void(**thread_func)(void *))
Retrieve a work-unit function of a work unit.
Definition: thread.c:2318
ABT_FALSE
#define ABT_FALSE
False constant for ABT_bool.
Definition: abt.h:786
ABT_thread_is_migratable
int ABT_thread_is_migratable(ABT_thread thread, ABT_bool *is_migratable)
Get the migratability of a work unit.
Definition: thread.c:1993
ABT_thread_create
int ABT_thread_create(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread)
Create a new ULT.
Definition: thread.c:98
ABTU_free
static void ABTU_free(void *ptr)
Definition: abtu.h:228
ABT_thread_get_unit
int ABT_thread_get_unit(ABT_thread thread, ABT_unit *unit)
Get a unit handle of the target work unit.
Definition: thread.c:1187
ABT_thread_revive_to
int ABT_thread_revive_to(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread *thread)
Revive a terminated ULT and yield to it.
Definition: thread.c:535
thread_join_futexwait
static void thread_join_futexwait(ABTI_thread *p_thread)
Definition: thread.c:3097
thread_join_busywait
static void thread_join_busywait(ABTI_thread *p_thread)
Definition: thread.c:3087
ABT_POOL_CONTEXT_OWNER_PRIMARY
#define ABT_POOL_CONTEXT_OWNER_PRIMARY
A flag that hints a primary ownership of a pool.
Definition: abt.h:1632
ABT_thread_yield_to
int ABT_thread_yield_to(ABT_thread thread)
Yield the calling ULT to another ULT.
Definition: thread.c:1281
g_thread_mig_data_key
static ABTI_key g_thread_mig_data_key
Definition: thread.c:40
ABT_thread_get_last_pool_id
int ABT_thread_get_last_pool_id(ABT_thread thread, int *id)
Get the last pool's ID of a work unit.
Definition: thread.c:1154
ABT_thread_revive
int ABT_thread_revive(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread *thread)
Revive a terminated work unit.
Definition: thread.c:458
ABT_thread_equal
int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result)
Compare two work unit handles for equality.
Definition: thread.c:2129
ABT_ERR_FEATURE_NA
#define ABT_ERR_FEATURE_NA
Error code: unsupported feature.
Definition: abt.h:391
ABT_THREAD_STATE_RUNNING
@ ABT_THREAD_STATE_RUNNING
Definition: abt.h:429
thread_join_yield_thread
static void thread_join_yield_thread(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self, ABTI_thread *p_thread)
Definition: thread.c:3127
ABT_thread_get_specific
int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value)
Get a value associated with a work-unit-specific data key in a work unit.
Definition: thread.c:2407
THREAD_POOL_OP_NONE
@ THREAD_POOL_OP_NONE
Definition: thread.c:9
ABT_UNIT_NULL
#define ABT_UNIT_NULL
Definition: abt.h:1104
ABT_thread_migrate_to_pool
int ABT_thread_migrate_to_pool(ABT_thread thread, ABT_pool pool)
Request a migration of a work unit to a specific pool.
Definition: thread.c:1676
ABT_thread_cancel
int ABT_thread_cancel(ABT_thread thread)
Send a cancellation request to a work unit.
Definition: thread.c:873
ABT_XSTREAM_STATE_TERMINATED
@ ABT_XSTREAM_STATE_TERMINATED
Definition: abt.h:418
ABT_thread_get_attr
int ABT_thread_get_attr(ABT_thread thread, ABT_thread_attr *attr)
Get attributes of a work unit.
Definition: thread.c:2455