8 static inline int ABTI_thread_create_internal(
9 ABTI_local *p_local, ABTI_pool *p_pool,
void (*thread_func)(
void *),
10 void *arg, ABTI_thread_attr *p_attr, ABTI_thread_type thread_type,
11 ABTI_sched *p_sched,
int refcount, ABTI_xstream *p_parent_xstream,
12 ABT_bool push_pool, ABTI_thread **pp_newthread);
13 static int ABTI_thread_revive(ABTI_local *p_local, ABTI_pool *p_pool,
14 void (*thread_func)(
void *),
void *arg,
15 ABTI_thread *p_thread);
16 static inline int ABTI_thread_join(ABTI_local **pp_local,
17 ABTI_thread *p_thread);
18 #ifndef ABT_CONFIG_DISABLE_MIGRATION 19 static int ABTI_thread_migrate_to_xstream(ABTI_local **pp_local,
20 ABTI_thread *p_thread,
21 ABTI_xstream *p_xstream);
23 static inline ABT_bool ABTI_thread_is_ready(ABTI_thread *p_thread);
24 static inline void ABTI_thread_free_internal(ABTI_thread *p_thread);
57 ABTI_local *p_local = ABTI_local_get_local();
58 ABTI_thread *p_newthread;
60 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
61 ABTI_CHECK_NULL_POOL_PTR(p_pool);
63 int refcount = (newthread != NULL) ? 1 : 0;
65 ABTI_thread_create_internal(p_local, p_pool, thread_func, arg,
66 ABTI_thread_attr_get_ptr(attr),
67 ABTI_THREAD_TYPE_USER, NULL, refcount, NULL,
72 *newthread = ABTI_thread_get_handle(p_newthread);
125 void (*thread_func)(
void *),
void *arg,
129 ABTI_local *p_local = ABTI_local_get_local();
130 ABTI_thread *p_newthread;
132 ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
133 ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
136 ABTI_pool *p_pool = ABTI_xstream_get_main_pool(p_xstream);
137 int refcount = (newthread != NULL) ? 1 : 0;
139 ABTI_thread_create_internal(p_local, p_pool, thread_func, arg,
140 ABTI_thread_attr_get_ptr(attr),
141 ABTI_THREAD_TYPE_USER, NULL, refcount, NULL,
143 ABTI_CHECK_ERROR(abt_errno);
147 *newthread = ABTI_thread_get_handle(p_newthread);
183 void (**thread_func_list)(
void *),
void **arg_list,
187 ABTI_local *p_local = ABTI_local_get_local();
191 if (ABTI_thread_attr_get_ptr(attr)->stacktype == ABTI_STACK_TYPE_USER) {
197 if (newthread_list == NULL) {
198 for (i = 0; i < num; i++) {
200 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
201 ABTI_CHECK_NULL_POOL_PTR(p_pool);
203 void (*thread_f)(
void *) = thread_func_list[i];
204 void *arg = arg_list ? arg_list[i] : NULL;
206 ABTI_thread_create_internal(p_local, p_pool, thread_f, arg,
207 ABTI_thread_attr_get_ptr(attr),
208 ABTI_THREAD_TYPE_USER, NULL, 0,
210 ABTI_CHECK_ERROR(abt_errno);
213 for (i = 0; i < num; i++) {
214 ABTI_thread *p_newthread;
216 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
217 ABTI_CHECK_NULL_POOL_PTR(p_pool);
219 void (*thread_f)(
void *) = thread_func_list[i];
220 void *arg = arg_list ? arg_list[i] : NULL;
222 ABTI_thread_create_internal(p_local, p_pool, thread_f, arg,
223 ABTI_thread_attr_get_ptr(attr),
224 ABTI_THREAD_TYPE_USER, NULL, 1,
226 newthread_list[i] = ABTI_thread_get_handle(p_newthread);
228 ABTI_CHECK_ERROR(abt_errno);
263 ABTI_local *p_local = ABTI_local_get_local();
265 ABTI_thread *p_thread = ABTI_thread_get_ptr(*thread);
266 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
268 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
269 ABTI_CHECK_NULL_POOL_PTR(p_pool);
271 abt_errno = ABTI_thread_revive(p_local, p_pool, thread_func, arg, p_thread);
272 ABTI_CHECK_ERROR(abt_errno);
298 ABTI_local *p_local = ABTI_local_get_local();
301 ABTI_thread *p_thread = ABTI_thread_get_ptr(h_thread);
302 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
306 ABTI_CHECK_TRUE_MSG(p_local == NULL || p_thread != p_local->p_thread,
308 "The current thread cannot be freed.");
310 ABTI_CHECK_TRUE_MSG(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
311 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
313 "The main thread cannot be freed explicitly.");
316 if (ABTD_atomic_acquire_load_int(&p_thread->state) !=
318 ABTI_thread_join(&p_local, p_thread);
322 ABTI_thread_free(p_local, p_thread);
350 ABTI_local *p_local = ABTI_local_get_local();
353 for (i = 0; i < num; i++) {
354 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread_list[i]);
355 ABTI_thread_join(&p_local, p_thread);
356 ABTI_thread_free(p_local, p_thread);
374 ABTI_local *p_local = ABTI_local_get_local();
375 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
376 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
377 abt_errno = ABTI_thread_join(&p_local, p_thread);
378 ABTI_CHECK_ERROR(abt_errno);
403 ABTI_local *p_local = ABTI_local_get_local();
405 for (i = 0; i < num_threads; i++) {
407 ABTI_thread_join(&p_local, ABTI_thread_get_ptr(thread_list[i]));
408 ABTI_CHECK_ERROR(abt_errno);
433 ABTI_local *p_local = ABTI_local_get_local();
442 if (p_local == NULL) {
447 ABTI_thread *p_thread = p_local->p_thread;
448 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
451 ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_EXIT);
454 ABTD_thread_exit(p_local, p_thread);
474 #ifdef ABT_CONFIG_DISABLE_THREAD_CANCEL 478 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
479 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
481 ABTI_CHECK_TRUE_MSG(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
482 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
484 "The main thread cannot be canceled.");
487 ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_CANCEL);
517 ABTI_local *p_local = ABTI_local_get_local();
519 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 528 if (p_local == NULL) {
535 ABTI_thread *p_thread = p_local->p_thread;
536 if (p_thread != NULL) {
537 *thread = ABTI_thread_get_handle(p_thread);
562 ABTI_local *p_local = ABTI_local_get_local();
564 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 572 if (p_local == NULL) {
578 ABTI_thread *p_thread = p_local->p_thread;
579 if (p_thread != NULL) {
580 *
id = ABTI_thread_get_id(p_thread);
601 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
602 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
631 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
632 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
635 *pool = ABTI_pool_get_handle(p_thread->p_pool);
663 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
664 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
666 ABTI_ASSERT(p_thread->p_pool);
667 *
id = (int)(p_thread->p_pool->id);
698 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
699 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
700 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
701 ABTI_CHECK_NULL_POOL_PTR(p_pool);
703 p_thread->p_pool = p_pool;
728 ABTI_local *p_local = ABTI_local_get_local();
729 ABTI_thread *p_cur_thread = NULL;
731 #ifdef ABT_CONFIG_DISABLE_EXT_THREAD 732 p_cur_thread = p_local->p_thread;
735 if (p_local != NULL) {
736 p_cur_thread = p_local->p_thread;
738 if (p_cur_thread == NULL)
742 ABTI_xstream *p_xstream = p_local->p_xstream;
743 ABTI_thread *p_tar_thread = ABTI_thread_get_ptr(thread);
744 ABTI_CHECK_NULL_THREAD_PTR(p_tar_thread);
745 LOG_EVENT(
"[U%" PRIu64
":E%d] yield_to -> U%" PRIu64
"\n",
746 ABTI_thread_get_id(p_cur_thread),
747 p_cur_thread->p_last_xstream->rank,
748 ABTI_thread_get_id(p_tar_thread));
752 "The caller and target ULTs are the same.");
754 ABTI_CHECK_TRUE_MSG(ABTD_atomic_relaxed_load_int(&p_tar_thread->state) !=
757 "Cannot yield to the terminated thread");
761 ABTI_CHECK_TRUE_MSG(p_cur_thread->p_pool == p_tar_thread->p_pool,
763 "The target thread's pool is not the same as mine.");
766 if (ABTI_thread_is_ready(p_tar_thread) ==
ABT_FALSE) {
773 ABTI_POOL_PUSH(p_cur_thread->p_pool, p_cur_thread->unit,
774 ABTI_self_get_native_thread_id(p_local));
776 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 778 if (p_cur_thread->is_sched != NULL) {
779 ABTI_xstream_pop_sched(p_xstream);
785 ABTI_POOL_REMOVE(p_tar_thread->p_pool, p_tar_thread->unit,
786 ABTI_self_get_native_thread_id(p_local));
788 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 790 if (p_tar_thread->is_sched != NULL) {
791 p_tar_thread->is_sched->p_ctx = ABTI_xstream_get_sched_ctx(p_xstream);
792 ABTI_xstream_push_sched(p_xstream, p_tar_thread->is_sched);
798 p_tar_thread->p_last_xstream = p_xstream;
801 ABTD_atomic_release_store_int(&p_tar_thread->state,
803 ABTI_thread_context_switch_thread_to_thread(&p_local, p_cur_thread,
828 ABTI_local *p_local = ABTI_local_get_local();
829 ABTI_thread *p_thread = NULL;
831 #ifdef ABT_CONFIG_DISABLE_EXT_THREAD 832 p_thread = p_local->p_thread;
835 if (p_local != NULL) {
836 p_thread = p_local->p_thread;
838 if (p_thread == NULL)
842 ABTI_CHECK_TRUE(p_thread->p_last_xstream == p_local->p_xstream,
845 ABTI_thread_yield(&p_local, p_thread);
874 ABTI_local *p_local = ABTI_local_get_local();
875 ABTI_thread *p_thread;
877 p_thread = ABTI_thread_get_ptr(thread);
878 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
880 abt_errno = ABTI_thread_set_ready(p_local, p_thread);
881 ABTI_CHECK_ERROR(abt_errno);
911 #ifndef ABT_CONFIG_DISABLE_MIGRATION 913 ABTI_local *p_local = ABTI_local_get_local();
914 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
915 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
916 ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
917 ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
919 abt_errno = ABTI_thread_migrate_to_xstream(&p_local, p_thread, p_xstream);
920 ABTI_CHECK_ERROR(abt_errno);
953 #ifndef ABT_CONFIG_DISABLE_MIGRATION 955 ABTI_local *p_local = ABTI_local_get_local();
956 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
957 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
958 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
959 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
964 ABTI_CHECK_TRUE(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
965 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
967 ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
973 ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
974 ABTI_CHECK_NULL_POOL_PTR(p_pool);
976 abt_errno = ABTI_thread_migrate_to_pool(&p_local, p_thread, p_pool);
977 ABTI_CHECK_ERROR(abt_errno);
979 ABTI_pool_inc_num_migrations(p_pool);
1009 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1011 ABTI_local *p_local = ABTI_local_get_local();
1012 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1013 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1014 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
1015 ABTI_CHECK_NULL_POOL_PTR(p_pool);
1017 abt_errno = ABTI_thread_migrate_to_pool(&p_local, p_thread, p_pool);
1018 ABTI_CHECK_ERROR(abt_errno);
1020 ABTI_pool_inc_num_migrations(p_pool);
1051 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1054 ABTI_local *p_local = ABTI_local_get_local();
1055 ABTI_xstream *p_xstream;
1057 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1058 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1075 if (p_xstream && p_xstream != p_thread->p_last_xstream) {
1076 if (ABTD_atomic_acquire_load_int(&p_xstream->state) ==
1078 abt_errno = ABTI_thread_migrate_to_xstream(&p_local, p_thread,
1082 ABTI_CHECK_ERROR(abt_errno);
1114 void (*cb_func)(
ABT_thread thread,
void *cb_arg),
1117 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1119 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1120 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1122 p_thread->attr.f_cb = cb_func;
1123 p_thread->attr.p_cb_arg = cb_arg;
1153 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1155 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1156 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1158 if (p_thread->type == ABTI_THREAD_TYPE_USER) {
1159 p_thread->attr.migratable = flag;
1189 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1191 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1192 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1194 *flag = p_thread->attr.migratable;
1226 ABTI_thread *p_thread;
1228 p_thread = ABTI_thread_get_ptr(thread);
1229 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1258 ABTI_thread *p_thread1 = ABTI_thread_get_ptr(thread1);
1259 ABTI_thread *p_thread2 = ABTI_thread_get_ptr(thread2);
1279 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1280 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1283 *stacksize = p_thread->attr.stacksize;
1308 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1309 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1311 *thread_id = ABTI_thread_get_id(p_thread);
1336 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1337 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1339 ABTD_thread_context_set_arg(&p_thread->ctx, arg);
1366 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1367 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1369 *arg = ABTD_thread_context_get_arg(&p_thread->ctx);
1398 ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1399 ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1401 ABTI_thread_attr *p_attr;
1402 p_attr = ABTI_thread_attr_dup(&p_thread->attr);
1404 *attr = ABTI_thread_attr_get_handle(p_attr);
1418 static inline int ABTI_thread_create_internal(
1419 ABTI_local *p_local, ABTI_pool *p_pool,
void (*thread_func)(
void *),
1420 void *arg, ABTI_thread_attr *p_attr, ABTI_thread_type thread_type,
1421 ABTI_sched *p_sched,
int refcount, ABTI_xstream *p_parent_xstream,
1422 ABT_bool push_pool, ABTI_thread **pp_newthread)
1425 ABTI_thread *p_newthread;
1429 p_newthread = ABTI_mem_alloc_thread(p_local, p_attr);
1430 if ((thread_type == ABTI_THREAD_TYPE_MAIN ||
1431 thread_type == ABTI_THREAD_TYPE_MAIN_SCHED) &&
1432 p_newthread->attr.p_stack == NULL) {
1436 abt_errno = ABTD_thread_context_invalidate(&p_newthread->ctx);
1437 }
else if (p_sched == NULL) {
1438 #if ABT_CONFIG_THREAD_TYPE != ABT_THREAD_TYPE_DYNAMIC_PROMOTION 1439 size_t stack_size = p_newthread->attr.stacksize;
1440 void *p_stack = p_newthread->attr.p_stack;
1441 abt_errno = ABTD_thread_context_create_thread(NULL, thread_func, arg,
1442 stack_size, p_stack,
1447 ABTD_thread_context_init(NULL, thread_func, arg, &p_newthread->ctx);
1450 size_t stack_size = p_newthread->attr.stacksize;
1451 void *p_stack = p_newthread->attr.p_stack;
1453 ABTD_thread_context_create_sched(NULL, thread_func, arg, stack_size,
1454 p_stack, &p_newthread->ctx);
1456 ABTI_CHECK_ERROR(abt_errno);
1459 ABTD_atomic_release_store_uint32(&p_newthread->request, 0);
1460 p_newthread->p_last_xstream = NULL;
1461 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 1462 p_newthread->is_sched = p_sched;
1464 p_newthread->p_pool = p_pool;
1465 p_newthread->refcount = refcount;
1466 p_newthread->type = thread_type;
1467 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1468 p_newthread->p_req_arg = NULL;
1470 p_newthread->p_keytable = NULL;
1471 p_newthread->id = ABTI_THREAD_INIT_ID;
1473 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1475 ABTI_spinlock_clear(&p_newthread->lock);
1478 #ifdef ABT_CONFIG_USE_DEBUG_LOG 1480 if (thread_type == ABTI_THREAD_TYPE_MAIN) {
1481 LOG_EVENT(
"[U%" PRIu64
":E%d] main ULT created\n", thread_id,
1482 p_parent_xstream ? p_parent_xstream->rank : 0);
1483 }
else if (thread_type == ABTI_THREAD_TYPE_MAIN_SCHED) {
1484 LOG_EVENT(
"[U%" PRIu64
":E%d] main sched ULT created\n", thread_id,
1485 p_parent_xstream ? p_parent_xstream->rank : 0);
1487 LOG_EVENT(
"[U%" PRIu64
"] created\n", thread_id);
1492 h_newthread = ABTI_thread_get_handle(p_newthread);
1494 p_newthread->unit = p_pool->u_create_from_thread(h_newthread);
1496 #ifdef ABT_CONFIG_DISABLE_POOL_PRODUCER_CHECK 1497 ABTI_pool_push(p_pool, p_newthread->unit);
1499 abt_errno = ABTI_pool_push(p_pool, p_newthread->unit,
1500 ABTI_self_get_native_thread_id(p_local));
1502 if (thread_type == ABTI_THREAD_TYPE_MAIN) {
1503 ABTI_thread_free_main(p_local, p_newthread);
1504 }
else if (thread_type == ABTI_THREAD_TYPE_MAIN_SCHED) {
1505 ABTI_thread_free_main_sched(p_local, p_newthread);
1507 ABTI_thread_free(p_local, p_newthread);
1517 *pp_newthread = p_newthread;
1523 *pp_newthread = NULL;
1528 int ABTI_thread_create(ABTI_local *p_local, ABTI_pool *p_pool,
1529 void (*thread_func)(
void *),
void *arg,
1530 ABTI_thread_attr *p_attr, ABTI_thread **pp_newthread)
1533 int refcount = (pp_newthread != NULL) ? 1 : 0;
1535 ABTI_thread_create_internal(p_local, p_pool, thread_func, arg, p_attr,
1536 ABTI_THREAD_TYPE_USER, NULL, refcount, NULL,
1541 int ABTI_thread_migrate_to_pool(ABTI_local **pp_local, ABTI_thread *p_thread,
1544 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1546 ABTI_local *p_local = *pp_local;
1549 ABTI_CHECK_TRUE(ABTI_pool_accept_migration(p_pool, p_thread->p_pool) ==
1552 ABTI_CHECK_TRUE(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
1553 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
1555 ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
1563 ABTI_spinlock_acquire(&p_thread->lock);
1564 ABTI_thread_add_req_arg(p_thread, ABTI_THREAD_REQ_MIGRATE, p_pool);
1565 ABTI_spinlock_release(&p_thread->lock);
1566 ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_MIGRATE);
1569 if (p_local != NULL && p_thread == p_local->p_thread) {
1570 ABTI_thread_yield(pp_local, p_thread);
1585 int ABTI_thread_create_main(ABTI_local *p_local, ABTI_xstream *p_xstream,
1586 ABTI_thread **p_thread)
1589 ABTI_thread_attr attr;
1590 ABTI_thread *p_newthread;
1594 p_pool = ABTI_pool_get_ptr(p_xstream->p_main_sched->pools[0]);
1599 ABTI_thread_attr_init(&attr, NULL, 0, ABTI_STACK_TYPE_MAIN,
ABT_FALSE);
1605 abt_errno = ABTI_thread_create_internal(p_local, p_pool, NULL, NULL, &attr,
1606 ABTI_THREAD_TYPE_MAIN, NULL, 0,
1607 p_xstream, push_pool, &p_newthread);
1608 ABTI_CHECK_ERROR(abt_errno);
1611 *p_thread = p_newthread;
1623 int ABTI_thread_create_main_sched(ABTI_local *p_local, ABTI_xstream *p_xstream,
1624 ABTI_sched *p_sched)
1627 ABTI_thread *p_newthread;
1630 if (p_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
1632 ABTI_thread_attr attr;
1633 ABTI_thread_attr_init(&attr, NULL, ABTI_global_get_sched_stacksize(),
1635 ABTI_thread *p_main_thread = ABTI_global_get_main();
1637 ABTI_thread_create_internal(p_local, NULL, ABTI_xstream_schedule,
1638 (
void *)p_xstream, &attr,
1639 ABTI_THREAD_TYPE_MAIN_SCHED, p_sched, 0,
1641 ABTI_CHECK_ERROR(abt_errno);
1644 ABTD_atomic_relaxed_store_thread_context_ptr(&p_newthread->ctx.p_link,
1645 &p_main_thread->ctx);
1649 ABTI_thread_attr attr;
1650 ABTI_thread_attr_init(&attr, NULL, 0, ABTI_STACK_TYPE_MAIN,
ABT_FALSE);
1652 ABTI_thread_create_internal(p_local, NULL, ABTI_xstream_schedule,
1653 (
void *)p_xstream, &attr,
1654 ABTI_THREAD_TYPE_MAIN_SCHED, p_sched, 0,
1656 ABTI_CHECK_ERROR(abt_errno);
1660 p_sched->p_thread = p_newthread;
1661 p_sched->p_ctx = &p_newthread->ctx;
1672 int ABTI_thread_create_sched(ABTI_local *p_local, ABTI_pool *p_pool,
1673 ABTI_sched *p_sched)
1676 ABTI_thread *p_newthread;
1677 ABTI_thread_attr attr;
1680 if (p_sched->p_thread) {
1681 ABT_sched h_sched = ABTI_sched_get_handle(p_sched);
1683 ABTI_thread_revive(p_local, p_pool, (
void (*)(
void *))p_sched->run,
1684 (
void *)h_sched, p_sched->p_thread);
1685 ABTI_CHECK_ERROR(abt_errno);
1690 ABTI_thread_attr_init(&attr, NULL, ABTI_global_get_sched_stacksize(),
1693 ABTI_thread_create_internal(p_local, p_pool,
1694 (
void (*)(
void *))p_sched->run,
1695 (
void *)ABTI_sched_get_handle(p_sched),
1696 &attr, ABTI_THREAD_TYPE_USER, p_sched, 1,
1698 ABTI_CHECK_ERROR(abt_errno);
1704 p_sched->p_thread = NULL;
1709 static inline void ABTI_thread_free_internal(ABTI_thread *p_thread)
1712 p_thread->p_pool->u_free(&p_thread->unit);
1715 ABTD_thread_context_free(&p_thread->ctx);
1718 if (p_thread->p_keytable) {
1719 ABTI_ktable_free(p_thread->p_keytable);
1723 void ABTI_thread_free(ABTI_local *p_local, ABTI_thread *p_thread)
1725 #ifndef ABT_CONFIG_DISABLE_MIGRATION 1728 ABTI_spinlock_acquire(&p_thread->lock);
1731 LOG_EVENT(
"[U%" PRIu64
":E%d] freed\n", ABTI_thread_get_id(p_thread),
1732 p_thread->p_last_xstream->rank);
1734 ABTI_thread_free_internal(p_thread);
1737 ABTI_mem_free_thread(p_local, p_thread);
1740 void ABTI_thread_free_main(ABTI_local *p_local, ABTI_thread *p_thread)
1742 LOG_EVENT(
"[U%" PRIu64
":E%d] main ULT freed\n",
1743 ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
1746 if (p_thread->p_keytable) {
1747 ABTI_ktable_free(p_thread->p_keytable);
1750 ABTI_mem_free_thread(p_local, p_thread);
1753 void ABTI_thread_free_main_sched(ABTI_local *p_local, ABTI_thread *p_thread)
1755 LOG_EVENT(
"[U%" PRIu64
":E%d] main sched ULT freed\n",
1756 ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
1759 ABTD_thread_context_free(&p_thread->ctx);
1762 if (p_thread->p_keytable) {
1763 ABTI_ktable_free(p_thread->p_keytable);
1766 ABTI_mem_free_thread(p_local, p_thread);
1769 int ABTI_thread_set_blocked(ABTI_thread *p_thread)
1774 ABTI_CHECK_TRUE(p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
1778 ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_BLOCK);
1784 ABTI_pool *p_pool = p_thread->p_pool;
1785 ABTI_pool_inc_num_blocked(p_pool);
1787 LOG_EVENT(
"[U%" PRIu64
":E%d] blocked\n", ABTI_thread_get_id(p_thread),
1788 p_thread->p_last_xstream->rank);
1799 void ABTI_thread_suspend(ABTI_local **pp_local, ABTI_thread *p_thread)
1801 ABTI_local *p_local = *pp_local;
1802 ABTI_ASSERT(p_thread == p_local->p_thread);
1803 ABTI_ASSERT(p_thread->p_last_xstream == p_local->p_xstream);
1806 ABTI_xstream *p_xstream = p_local->p_xstream;
1807 ABTI_sched *p_sched = ABTI_xstream_get_top_sched(p_xstream);
1808 LOG_EVENT(
"[U%" PRIu64
":E%d] suspended\n", ABTI_thread_get_id(p_thread),
1810 ABTI_thread_context_switch_thread_to_sched(pp_local, p_thread, p_sched);
1813 LOG_EVENT(
"[U%" PRIu64
":E%d] resumed\n", ABTI_thread_get_id(p_thread),
1814 p_thread->p_last_xstream->rank);
1817 int ABTI_thread_set_ready(ABTI_local *p_local, ABTI_thread *p_thread)
1822 ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) ==
1829 while (ABTD_atomic_acquire_load_uint32(&p_thread->request) &
1830 ABTI_THREAD_REQ_BLOCK)
1831 ABTD_atomic_pause();
1833 LOG_EVENT(
"[U%" PRIu64
":E%d] set ready\n", ABTI_thread_get_id(p_thread),
1834 p_thread->p_last_xstream->rank);
1840 ABTI_pool *p_pool = p_thread->p_pool;
1843 ABTI_POOL_ADD_THREAD(p_thread, ABTI_self_get_native_thread_id(p_local));
1846 ABTI_pool_dec_num_blocked(p_pool);
1856 static inline ABT_bool ABTI_thread_is_ready(ABTI_thread *p_thread)
1862 ABTI_pool *p_pool = p_thread->p_pool;
1863 if (p_pool->u_is_in_pool(p_thread->unit) ==
ABT_TRUE &&
1864 ABTD_atomic_acquire_load_int(&p_thread->state) ==
1872 void ABTI_thread_print(ABTI_thread *p_thread, FILE *p_os,
int indent)
1876 if (p_thread == NULL) {
1877 fprintf(p_os,
"%s== NULL ULT ==\n", prefix);
1881 ABTI_xstream *p_xstream = p_thread->p_last_xstream;
1882 int xstream_rank = p_xstream ? p_xstream->rank : 0;
1886 switch (p_thread->type) {
1887 case ABTI_THREAD_TYPE_MAIN:
1890 case ABTI_THREAD_TYPE_MAIN_SCHED:
1891 type =
"MAIN_SCHED";
1893 case ABTI_THREAD_TYPE_USER:
1900 switch (ABTD_atomic_acquire_load_int(&p_thread->state)) {
1911 state =
"TERMINATED";
1917 ABTI_thread_attr_get_str(&p_thread->attr, attr);
1920 "%s== ULT (%p) ==\n" 1921 "%sid : %" PRIu64
"\n" 1924 "%slast_ES : %p (%d)\n" 1925 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
1930 "%srequest : 0x%x\n" 1931 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1936 prefix, (
void *)p_thread, prefix, ABTI_thread_get_id(p_thread),
1937 prefix, type, prefix, state, prefix, (
void *)p_xstream,
1939 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
1940 prefix, (
void *)p_thread->is_sched,
1942 prefix, (
void *)p_thread->p_pool, prefix, p_thread->refcount,
1943 prefix, ABTD_atomic_acquire_load_uint32(&p_thread->request),
1944 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1945 prefix, (
void *)p_thread->p_req_arg,
1947 prefix, (
void *)p_thread->p_keytable, prefix, attr);
1954 int ABTI_thread_print_stack(ABTI_thread *p_thread, FILE *p_os)
1956 void *p_stack = p_thread->attr.p_stack;
1957 size_t i, j, stacksize = p_thread->attr.stacksize;
1958 if (stacksize == 0 || p_stack == NULL) {
1963 const size_t value_width = 8;
1964 const int num_bytes = 32;
1965 char *buffer = (
char *)alloca(num_bytes);
1966 for (i = 0; i < stacksize; i += num_bytes) {
1967 if (stacksize >= i + num_bytes) {
1968 memcpy(buffer, &((uint8_t *)p_stack)[i], num_bytes);
1970 memset(buffer, 0, num_bytes);
1971 memcpy(buffer, &((uint8_t *)p_stack)[i], stacksize - i);
1974 #if SIZEOF_VOID_P == 8 1975 fprintf(p_os,
"%016" PRIxPTR
":",
1976 (uintptr_t)(&((uint8_t *)p_stack)[i]));
1977 #elif SIZEOF_VOID_P == 4 1978 fprintf(p_os,
"%08" PRIxPTR
":", (uintptr_t)(&((uint8_t *)p_stack)[i]));
1980 #error "unknown pointer size" 1983 for (j = 0; j < num_bytes / value_width; j++) {
1984 if (value_width == 8) {
1985 uint64_t val = ((uint64_t *)buffer)[j];
1986 fprintf(p_os,
" %016" PRIx64, val);
1987 }
else if (value_width == 4) {
1988 uint32_t val = ((uint32_t *)buffer)[j];
1989 fprintf(p_os,
" %08" PRIx32, val);
1990 }
else if (value_width == 2) {
1991 uint16_t val = ((uint16_t *)buffer)[j];
1992 fprintf(p_os,
" %04" PRIx16, val);
1994 uint8_t val = ((uint8_t *)buffer)[j];
1995 fprintf(p_os,
" %02" PRIx8, val);
1997 if (j == (num_bytes / value_width) - 1)
1998 fprintf(p_os,
"\n");
2004 #ifndef ABT_CONFIG_DISABLE_MIGRATION 2005 void ABTI_thread_add_req_arg(ABTI_thread *p_thread, uint32_t req,
void *arg)
2007 ABTI_thread_req_arg *
new;
2008 ABTI_thread_req_arg *p_head = p_thread->p_req_arg;
2011 while (p_head != NULL) {
2012 if (p_head->request == req) {
2013 p_head->p_arg = arg;
2018 new = (ABTI_thread_req_arg *)
ABTU_malloc(
sizeof(ABTI_thread_req_arg));
2025 if (p_head == NULL) {
2026 p_thread->p_req_arg =
new;
2028 while (p_head->next != NULL)
2029 p_head = p_head->next;
2034 void *ABTI_thread_extract_req_arg(ABTI_thread *p_thread, uint32_t req)
2036 void *result = NULL;
2037 ABTI_thread_req_arg *p_last = NULL, *p_head = p_thread->p_req_arg;
2039 while (p_head != NULL) {
2040 if (p_head->request == req) {
2041 result = p_head->p_arg;
2043 p_thread->p_req_arg = p_head->next;
2045 p_last->next = p_head->next;
2050 p_head = p_head->next;
2056 void ABTI_thread_put_req_arg(ABTI_thread *p_thread,
2057 ABTI_thread_req_arg *p_req_arg)
2059 ABTI_spinlock_acquire(&p_thread->lock);
2060 ABTI_thread_req_arg *p_head = p_thread->p_req_arg;
2062 if (p_head == NULL) {
2063 p_thread->p_req_arg = p_req_arg;
2065 while (p_head->next != NULL) {
2066 p_head = p_head->next;
2068 p_head->next = p_req_arg;
2070 ABTI_spinlock_release(&p_thread->lock);
2073 ABTI_thread_req_arg *ABTI_thread_get_req_arg(ABTI_thread *p_thread,
2076 ABTI_thread_req_arg *p_result = NULL;
2077 ABTI_thread_req_arg *p_last = NULL;
2079 ABTI_spinlock_acquire(&p_thread->lock);
2080 ABTI_thread_req_arg *p_head = p_thread->p_req_arg;
2081 while (p_head != NULL) {
2082 if (p_head->request == req) {
2085 p_thread->p_req_arg = p_head->next;
2087 p_last->next = p_head->next;
2091 p_head = p_head->next;
2093 ABTI_spinlock_release(&p_thread->lock);
2100 ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(0);
2101 void ABTI_thread_reset_id(
void)
2103 ABTD_atomic_release_store_uint64(&g_thread_id, 0);
2108 if (p_thread == NULL)
2109 return ABTI_THREAD_INIT_ID;
2111 if (p_thread->id == ABTI_THREAD_INIT_ID) {
2112 p_thread->id = ABTI_thread_get_new_id();
2114 return p_thread->id;
2119 ABTI_thread *p_self = NULL;
2121 p_self = p_local->p_thread;
2122 return ABTI_thread_get_id(p_self);
2125 int ABTI_thread_get_xstream_rank(ABTI_thread *p_thread)
2127 if (p_thread == NULL)
2130 if (p_thread->p_last_xstream) {
2131 return p_thread->p_last_xstream->rank;
2137 int ABTI_thread_self_xstream_rank(ABTI_local *p_local)
2139 ABTI_thread *p_self = NULL;
2141 p_self = p_local->p_thread;
2142 return ABTI_thread_get_xstream_rank(p_self);
2149 static int ABTI_thread_revive(ABTI_local *p_local, ABTI_pool *p_pool,
2150 void (*thread_func)(
void *),
void *arg,
2151 ABTI_thread *p_thread)
2156 ABTI_CHECK_TRUE(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
2161 stacksize = p_thread->attr.stacksize;
2162 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 2163 if (p_thread->is_sched) {
2165 ABTD_thread_context_create_sched(NULL, thread_func, arg, stacksize,
2166 p_thread->attr.p_stack,
2171 ABTD_thread_context_create_thread(NULL, thread_func, arg, stacksize,
2172 p_thread->attr.p_stack,
2174 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 2177 ABTI_CHECK_ERROR(abt_errno);
2180 ABTD_atomic_relaxed_store_uint32(&p_thread->request, 0);
2181 p_thread->p_last_xstream = NULL;
2182 p_thread->refcount = 1;
2183 p_thread->type = ABTI_THREAD_TYPE_USER;
2185 if (p_thread->p_pool != p_pool) {
2187 p_thread->p_pool->u_free(&p_thread->unit);
2190 p_thread->p_pool = p_pool;
2193 ABT_thread h_thread = ABTI_thread_get_handle(p_thread);
2194 p_thread->unit = p_pool->u_create_from_thread(h_thread);
2197 LOG_EVENT(
"[U%" PRIu64
"] revived\n", ABTI_thread_get_id(p_thread));
2200 #ifdef ABT_CONFIG_DISABLE_POOL_PRODUCER_CHECK 2201 ABTI_pool_push(p_pool, p_thread->unit);
2203 abt_errno = ABTI_pool_push(p_pool, p_thread->unit,
2204 ABTI_self_get_native_thread_id(p_local));
2205 ABTI_CHECK_ERROR(abt_errno);
2216 static inline int ABTI_thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
2220 if (ABTD_atomic_acquire_load_int(&p_thread->state) ==
2224 ABTI_CHECK_TRUE_MSG(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
2225 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
2228 ABTI_local *p_local = *pp_local;
2229 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 2232 goto busywait_based;
2236 "The target ULT should be different.");
2238 ABTI_thread *p_self = p_local->p_thread;
2241 if ((p_self->p_pool == p_thread->p_pool) &&
2244 (ABTD_atomic_acquire_load_int(&p_thread->state) ==
2247 ABTI_xstream *p_xstream = p_self->p_last_xstream;
2254 while (p_thread->p_pool->u_is_in_pool(p_thread->unit) !=
ABT_TRUE) {
2260 ABTI_pool_inc_num_blocked(p_self->p_pool);
2262 ABTI_POOL_REMOVE(p_thread->p_pool, p_thread->unit,
2263 ABTI_self_get_native_thread_id(p_local));
2267 ABTD_atomic_relaxed_store_thread_context_ptr(&p_thread->ctx.p_link,
2270 p_thread->p_last_xstream = p_xstream;
2271 ABTD_atomic_release_store_int(&p_thread->state,
2277 LOG_EVENT(
"[U%" PRIu64
":E%d] blocked to join U%" PRIu64
"\n",
2278 ABTI_thread_get_id(p_self), p_self->p_last_xstream->rank,
2279 ABTI_thread_get_id(p_thread));
2280 LOG_EVENT(
"[U%" PRIu64
":E%d] start running\n",
2281 ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
2284 ABTI_thread_context_switch_thread_to_thread(pp_local, p_self, p_thread);
2285 p_local = *pp_local;
2287 }
else if ((p_self->p_pool != p_thread->p_pool) &&
2299 uint32_t req = ABTD_atomic_fetch_or_uint32(&p_thread->request,
2300 ABTI_THREAD_REQ_JOIN);
2301 if (req & ABTI_THREAD_REQ_JOIN)
2304 ABTI_thread_set_blocked(p_self);
2305 LOG_EVENT(
"[U%" PRIu64
":E%d] blocked to join U%" PRIu64
"\n",
2306 ABTI_thread_get_id(p_self), p_self->p_last_xstream->rank,
2307 ABTI_thread_get_id(p_thread));
2312 ABTD_atomic_release_store_thread_context_ptr(&p_thread->ctx.p_link,
2316 ABTI_thread_suspend(pp_local, p_self);
2317 p_local = *pp_local;
2326 if (ABTD_atomic_relaxed_load_int(&p_self->state) ==
2329 ABTI_pool_dec_num_blocked(p_self->p_pool);
2330 LOG_EVENT(
"[U%" PRIu64
":E%d] resume after join\n",
2331 ABTI_thread_get_id(p_self), p_self->p_last_xstream->rank);
2336 while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
2338 ABTI_thread_yield(pp_local, p_local->p_thread);
2339 p_local = *pp_local;
2343 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 2346 while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
2348 ABTD_atomic_pause();
2359 #ifndef ABT_CONFIG_DISABLE_MIGRATION 2360 static int ABTI_thread_migrate_to_xstream(ABTI_local **pp_local,
2361 ABTI_thread *p_thread,
2362 ABTI_xstream *p_xstream)
2367 ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_xstream->state) !=
2370 ABTI_CHECK_TRUE(p_thread->type != ABTI_THREAD_TYPE_MAIN &&
2371 p_thread->type != ABTI_THREAD_TYPE_MAIN_SCHED,
2373 ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
2378 ABTI_pool *p_pool = NULL;
2379 ABTI_sched *p_sched = NULL;
2381 ABTI_spinlock_acquire(&p_xstream->sched_lock);
2384 if (ABTD_atomic_acquire_load_int(&p_xstream->state) ==
2387 ABTI_spinlock_release(&p_xstream->sched_lock);
2390 }
else if (ABTD_atomic_acquire_load_int(&p_xstream->state) ==
2392 p_sched = ABTI_xstream_get_top_sched(p_xstream);
2395 p_sched = p_xstream->p_main_sched;
2401 ABTI_spinlock_release(&p_xstream->sched_lock);
2405 ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
2406 if (p_pool == NULL) {
2408 ABTI_spinlock_release(&p_xstream->sched_lock);
2413 ABTI_pool_inc_num_migrations(p_pool);
2415 ABTI_spinlock_release(&p_xstream->sched_lock);
2416 }
while (p_pool == NULL);
2418 abt_errno = ABTI_thread_migrate_to_pool(pp_local, p_thread, p_pool);
2420 ABTI_pool_dec_num_migrations(p_pool);
2435 return (
ABT_thread_id)ABTD_atomic_fetch_add_uint64(&g_thread_id, 1);
struct ABT_thread_attr_opaque * ABT_thread_attr
int ABT_thread_exit(void)
The calling ULT terminates its execution.
int ABT_thread_get_last_pool_id(ABT_thread thread, int *id)
Get the last pool's ID of the ULT.
int ABT_thread_get_state(ABT_thread thread, ABT_thread_state *state)
Return the state of thread.
struct ABT_xstream_opaque * ABT_xstream
struct ABT_sched_opaque * ABT_sched
int ABT_thread_join_many(int num_threads, ABT_thread *thread_list)
Wait for a number of ULTs to terminate.
char * ABTU_get_indent_str(int indent)
int ABT_thread_cancel(ABT_thread thread)
Request the cancellation of the target thread.
static ABTD_atomic_uint64 g_thread_id
#define ABT_ERR_INV_THREAD
int ABT_thread_free(ABT_thread *thread)
Release the thread object associated with thread handle.
int ABT_thread_get_attr(ABT_thread thread, ABT_thread_attr *attr)
Get attributes of the target ULT.
int ABT_thread_resume(ABT_thread thread)
Resume the target ULT.
static void * ABTU_malloc(size_t size)
int ABT_thread_migrate(ABT_thread thread)
Request migration of the thread to an any available ES.
int ABT_thread_migrate_to_xstream(ABT_thread thread, ABT_xstream xstream)
Migrate a thread to a specific ES.
struct ABT_pool_opaque * ABT_pool
int ABT_thread_create(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread)
Create a new thread and return its handle through newthread.
int ABT_thread_set_callback(ABT_thread thread, void(*cb_func)(ABT_thread thread, void *cb_arg), void *cb_arg)
Set the callback function.
int ABT_thread_yield(void)
Yield the processor from the current running ULT back to the scheduler.
int ABT_thread_get_last_pool(ABT_thread thread, ABT_pool *pool)
Return the last pool of ULT.
struct ABT_thread_opaque * ABT_thread
int ABT_thread_self_id(ABT_thread_id *id)
Return the calling ULT's ID.
int ABT_thread_free_many(int num, ABT_thread *thread_list)
Release a set of ULT objects.
#define HANDLE_ERROR_FUNC_WITH_CODE(n)
int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize)
Get the ULT's stack size.
int ABT_thread_is_migratable(ABT_thread thread, ABT_bool *flag)
Get the ULT's migratability.
ABTI_global * gp_ABTI_global
#define LOG_EVENT(fmt,...)
int ABT_thread_is_primary(ABT_thread thread, ABT_bool *flag)
Check if the target ULT is the primary ULT.
int ABT_thread_create_many(int num, ABT_pool *pool_list, void(**thread_func_list)(void *), void **arg_list, ABT_thread_attr attr, ABT_thread *newthread_list)
Create a set of ULTs.
int ABT_thread_set_migratable(ABT_thread thread, ABT_bool flag)
Set the ULT's migratability.
#define ABT_THREAD_ATTR_NULL
int ABT_thread_join(ABT_thread thread)
Wait for thread to terminate.
#define ABT_ERR_UNINITIALIZED
int ABT_thread_migrate_to_pool(ABT_thread thread, ABT_pool pool)
Migrate a thread to a specific pool.
int ABT_thread_set_associated_pool(ABT_thread thread, ABT_pool pool)
Set the associated pool for the target ULT.
int ABT_thread_get_id(ABT_thread thread, ABT_thread_id *thread_id)
Get the ULT's id.
int ABT_thread_set_arg(ABT_thread thread, void *arg)
Set the argument for the ULT function.
#define ABT_ERR_FEATURE_NA
#define ABT_ERR_MIGRATION_NA
int ABT_thread_get_arg(ABT_thread thread, void **arg)
Retrieve the argument for the ULT function.
#define ABT_ERR_MIGRATION_TARGET
int ABT_thread_create_on_xstream(ABT_xstream xstream, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread)
Create a new ULT associated with the target ES (xstream).
int ABT_thread_yield_to(ABT_thread thread)
Yield the processor from the current running thread to the specific thread.
#define ABT_ERR_INV_XSTREAM
int ABT_thread_revive(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread *thread)
Revive the ULT.
#define ABT_ERR_INV_THREAD_ATTR
int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result)
Compare two ULT handles for equality.
static void ABTU_free(void *ptr)
int ABT_thread_self(ABT_thread *thread)
Return the handle of the calling ULT.
int ABT_thread_migrate_to_sched(ABT_thread thread, ABT_sched sched)
Migrate a thread to a specific scheduler.