8 #ifdef ABT_CONFIG_USE_DEBUG_LOG 9 static inline uint64_t ABTI_sched_get_new_id(
void);
46 abt_errno = ABTI_sched_create(def, num_pools, pools, config, def_automatic,
48 ABTI_CHECK_ERROR(abt_errno);
51 *newsched = ABTI_sched_get_handle(p_sched);
98 ABTI_sched *p_newsched;
100 ABTI_sched_create_basic(predef, num_pools, pools, config, &p_newsched);
101 ABTI_CHECK_ERROR(abt_errno);
102 *newsched = ABTI_sched_get_handle(p_newsched);
130 ABTI_local *p_local = ABTI_local_get_local();
131 ABTI_sched *p_sched = ABTI_sched_get_ptr(*sched);
132 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
135 abt_errno = ABTI_sched_free(p_local, p_sched);
136 ABTI_CHECK_ERROR(abt_errno);
166 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
167 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
169 *num_pools = p_sched->num_pools;
195 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
196 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
198 ABTI_CHECK_TRUE(idx + max_pools <= p_sched->num_pools,
ABT_ERR_SCHED);
201 for (p = idx; p < idx + max_pools; p++) {
202 pools[p - idx] = p_sched->pools[p];
227 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
228 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
230 ABTI_sched_finish(p_sched);
255 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
256 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
258 ABTI_sched_exit(p_sched);
288 ABTI_local *p_local = ABTI_local_get_local();
293 if (p_local == NULL) {
298 ABTI_xstream *p_xstream = p_local->p_xstream;
300 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
301 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
303 *stop = ABTI_sched_has_to_stop(&p_local, p_sched, p_xstream);
313 ABT_bool ABTI_sched_has_to_stop(ABTI_local **pp_local, ABTI_sched *p_sched,
314 ABTI_xstream *p_xstream)
320 if (ABTD_atomic_acquire_load_uint32(&p_sched->request) &
321 ABTI_SCHED_REQ_EXIT) {
322 ABTI_spinlock_acquire(&p_xstream->sched_lock);
328 size = ABTI_sched_get_effective_size(*pp_local, p_sched);
330 if (ABTD_atomic_acquire_load_uint32(&p_sched->request) &
331 ABTI_SCHED_REQ_FINISH) {
335 ABTI_spinlock_acquire(&p_xstream->sched_lock);
336 size = ABTI_sched_get_effective_size(*pp_local, p_sched);
341 ABTI_spinlock_release(&p_xstream->sched_lock);
343 }
else if (p_sched->used == ABTI_SCHED_IN_POOL) {
353 ABTI_sched *p_par_sched;
354 p_par_sched = ABTI_xstream_get_parent_sched(p_xstream);
355 ABTI_thread_context_switch_sched_to_sched(pp_local, p_sched,
380 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
381 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
382 p_sched->data = data;
408 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
409 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
411 *data = p_sched->data;
435 size_t pool_size = 0;
437 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
438 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
440 pool_size = ABTI_sched_get_size(p_sched);
451 size_t ABTI_sched_get_size(ABTI_sched *p_sched)
453 size_t pool_size = 0;
456 for (p = 0; p < p_sched->num_pools; p++) {
457 ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[p]);
458 pool_size += ABTI_pool_get_size(p_pool);
478 size_t pool_size = 0;
480 ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
481 ABTI_CHECK_NULL_SCHED_PTR(p_sched);
483 pool_size = ABTI_sched_get_total_size(p_sched);
494 size_t ABTI_sched_get_total_size(ABTI_sched *p_sched)
496 size_t pool_size = 0;
499 for (p = 0; p < p_sched->num_pools; p++) {
500 ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[p]);
501 pool_size += ABTI_pool_get_total_size(p_pool);
512 size_t ABTI_sched_get_effective_size(ABTI_local *p_local, ABTI_sched *p_sched)
514 size_t pool_size = 0;
517 #ifndef ABT_CONFIG_DISABLE_POOL_CONSUMER_CHECK 518 ABTI_native_thread_id self_id = ABTI_self_get_native_thread_id(p_local);
521 for (p = 0; p < p_sched->num_pools; p++) {
523 ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
524 pool_size += ABTI_pool_get_size(p_pool);
525 pool_size += ABTD_atomic_acquire_load_int32(&p_pool->num_migrations);
526 switch (p_pool->access) {
529 ABTD_atomic_acquire_load_int32(&p_pool->num_blocked);
535 #ifdef ABT_CONFIG_DISABLE_POOL_CONSUMER_CHECK 536 if (ABTD_atomic_acquire_load_int32(&p_pool->num_scheds) == 1) {
538 ABTD_atomic_acquire_load_int32(&p_pool->num_blocked);
541 if (ABTD_atomic_acquire_load_int32(&p_pool->num_scheds) == 1 &&
542 p_pool->consumer_id == self_id) {
544 ABTD_atomic_acquire_load_int32(&p_pool->num_blocked);
560 void ABTI_sched_finish(ABTI_sched *p_sched)
562 ABTI_sched_set_request(p_sched, ABTI_SCHED_REQ_FINISH);
565 void ABTI_sched_exit(ABTI_sched *p_sched)
567 ABTI_sched_set_request(p_sched, ABTI_SCHED_REQ_EXIT);
572 ABTI_sched **pp_newsched)
578 p_sched = (ABTI_sched *)
ABTU_malloc(
sizeof(ABTI_sched));
583 for (p = 0; p < num_pools; p++) {
585 ABTI_pool *p_newpool;
589 ABTI_CHECK_ERROR(abt_errno);
590 pool_list[p] = ABTI_pool_get_handle(p_newpool);
592 pool_list[p] = pools[p];
597 for (p = 0; p < num_pools; p++) {
598 ABTI_pool_retain(ABTI_pool_get_ptr(pool_list[p]));
601 p_sched->used = ABTI_SCHED_NOT_USED;
602 p_sched->automatic = automatic;
603 p_sched->kind = ABTI_sched_get_kind(def);
605 ABTD_atomic_relaxed_store_uint32(&p_sched->request, 0);
606 p_sched->pools = pool_list;
607 p_sched->num_pools = num_pools;
608 p_sched->type = def->
type;
609 p_sched->p_thread = NULL;
610 p_sched->p_task = NULL;
611 p_sched->p_ctx = NULL;
613 p_sched->init = def->
init;
614 p_sched->run = def->
run;
615 p_sched->free = def->
free;
618 #ifdef ABT_CONFIG_USE_DEBUG_LOG 619 p_sched->id = ABTI_sched_get_new_id();
621 LOG_EVENT(
"[S%" PRIu64
"] created\n", p_sched->id);
624 ABT_sched newsched = ABTI_sched_get_handle(p_sched);
627 p_sched->init(newsched, config);
628 *pp_newsched = p_sched;
640 ABTI_sched **pp_newsched)
653 abt_errno = ABTI_sched_config_read_global(config, &access, &automatic);
654 ABTI_CHECK_ERROR(abt_errno);
662 for (p = 0; p < num_pools; p++) {
664 ABTI_pool *p_newpool;
667 ABTI_CHECK_ERROR(abt_errno);
668 pool_list[p] = ABTI_pool_get_handle(p_newpool);
670 pool_list[p] = pools[p];
679 ABTI_sched_create(ABTI_sched_get_basic_def(), num_pools,
681 automatic, pp_newsched);
684 abt_errno = ABTI_sched_create(ABTI_sched_get_basic_wait_def(),
685 num_pools, pool_list,
691 ABTI_sched_create(ABTI_sched_get_prio_def(), num_pools,
693 automatic, pp_newsched);
697 ABTI_sched_create(ABTI_sched_get_randws_def(), num_pools,
699 automatic, pp_newsched);
705 ABTI_CHECK_ERROR(abt_errno);
723 num_pools = ABTI_SCHED_NUM_PRIO;
730 ABTI_CHECK_ERROR(abt_errno);
736 ABT_pool pool_list[ABTI_SCHED_NUM_PRIO];
738 for (p = 0; p < num_pools; p++) {
739 ABTI_pool *p_newpool;
741 ABTI_pool_create_basic(kind, access,
ABT_TRUE, &p_newpool);
742 ABTI_CHECK_ERROR(abt_errno);
743 pool_list[p] = ABTI_pool_get_handle(p_newpool);
750 abt_errno = ABTI_sched_create(ABTI_sched_get_basic_def(),
751 num_pools, pool_list, config,
752 automatic, pp_newsched);
755 abt_errno = ABTI_sched_create(ABTI_sched_get_basic_wait_def(),
756 num_pools, pool_list, config,
757 automatic, pp_newsched);
760 abt_errno = ABTI_sched_create(ABTI_sched_get_prio_def(),
761 num_pools, pool_list, config,
762 automatic, pp_newsched);
765 abt_errno = ABTI_sched_create(ABTI_sched_get_randws_def(),
766 num_pools, pool_list, config,
767 automatic, pp_newsched);
771 ABTI_CHECK_ERROR(abt_errno);
775 ABTI_CHECK_ERROR(abt_errno);
785 int ABTI_sched_free(ABTI_local *p_local, ABTI_sched *p_sched)
791 if (p_sched->used != ABTI_SCHED_NOT_USED) {
798 for (p = 0; p < p_sched->num_pools; p++) {
799 ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[p]);
800 int32_t num_scheds = ABTI_pool_release(p_pool);
801 if (p_pool->automatic ==
ABT_TRUE && num_scheds == 0) {
802 ABTI_CHECK_NULL_POOL_PTR(p_pool);
803 ABTI_pool_free(p_pool);
810 if (p_sched->p_thread) {
811 if (p_sched->p_thread->type == ABTI_THREAD_TYPE_MAIN_SCHED) {
812 ABTI_thread_free_main_sched(p_local, p_sched->p_thread);
814 ABTI_thread_free(p_local, p_sched->p_thread);
818 if (p_sched->p_task) {
819 ABTI_task_free(p_local, p_sched->p_task);
823 LOG_EVENT(
"[S%" PRIu64
"] freed\n", p_sched->id);
825 p_sched->free(ABTI_sched_get_handle(p_sched));
826 p_sched->data = NULL;
839 int ABTI_sched_get_migration_pool(ABTI_sched *p_sched, ABTI_pool *source_pool,
843 ABT_sched sched = ABTI_sched_get_handle(p_sched);
851 if (p_sched->get_migr_pool == NULL) {
852 if (p_sched->num_pools == 0)
855 p_pool = ABTI_pool_get_ptr(p_sched->pools[0]);
857 p_pool = ABTI_pool_get_ptr(p_sched->get_migr_pool(sched));
860 if (ABTI_pool_accept_migration(p_pool, source_pool) ==
ABT_TRUE) {
877 return (ABTI_sched_kind)def;
880 void ABTI_sched_print(ABTI_sched *p_sched, FILE *p_os,
int indent,
885 if (p_sched == NULL) {
886 fprintf(p_os,
"%s== NULL SCHED ==\n", prefix);
890 ABTI_sched_kind kind;
891 char *kind_str, *type, *state, *used;
896 kind = p_sched->kind;
897 if (kind == ABTI_sched_get_kind(ABTI_sched_get_basic_def())) {
899 }
else if (kind == ABTI_sched_get_kind(ABTI_sched_get_basic_wait_def())) {
900 kind_str =
"BASIC_WAIT";
901 }
else if (kind == ABTI_sched_get_kind(ABTI_sched_get_prio_def())) {
907 switch (p_sched->type) {
918 switch (p_sched->state) {
929 state =
"TERMINATED";
935 switch (p_sched->used) {
936 case ABTI_SCHED_NOT_USED:
939 case ABTI_SCHED_MAIN:
942 case ABTI_SCHED_IN_POOL:
950 size =
sizeof(char) * (p_sched->num_pools * 20 + 4);
955 for (i = 0; i < p_sched->num_pools; i++) {
956 ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[i]);
957 sprintf(&pools_str[pos],
"%p ", (
void *)p_pool);
958 pos = strlen(pools_str);
960 pools_str[pos] =
']';
963 "%s== SCHED (%p) ==\n" 964 #ifdef ABT_CONFIG_USE_DEBUG_LOG
965 "%sid : %" PRIu64
"\n" 967 "%skind : %" PRIxPTR
" (%s)\n" 978 prefix, (
void *)p_sched,
979 #ifdef ABT_CONFIG_USE_DEBUG_LOG
982 prefix, p_sched->kind, kind_str, prefix, type, prefix, state,
983 prefix, used, prefix,
984 (p_sched->automatic ==
ABT_TRUE) ?
"TRUE" :
"FALSE", prefix,
985 ABTD_atomic_acquire_load_uint32(&p_sched->request), prefix,
986 p_sched->num_pools, prefix, pools_str, prefix,
987 ABTI_sched_get_size(p_sched), prefix,
988 ABTI_sched_get_total_size(p_sched), prefix, p_sched->data);
992 for (i = 0; i < p_sched->num_pools; i++) {
993 ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[i]);
994 ABTI_pool_print(p_pool, p_os, indent + 2);
1003 static ABTD_atomic_uint64
g_sched_id = ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(0);
1004 void ABTI_sched_reset_id(
void)
1006 ABTD_atomic_relaxed_store_uint64(&
g_sched_id, 0);
1013 #ifdef ABT_CONFIG_USE_DEBUG_LOG 1014 static inline uint64_t ABTI_sched_get_new_id(
void)
1016 return ABTD_atomic_fetch_add_uint64(&
g_sched_id, 1);
int ABT_sched_get_size(ABT_sched sched, size_t *size)
Get the sum of the sizes of the pool of sched.
#define ABT_ERR_INV_POOL_ACCESS
struct ABT_sched_opaque * ABT_sched
int ABT_sched_finish(ABT_sched sched)
Ask a scheduler to finish.
char * ABTU_get_indent_str(int indent)
static void * ABTU_malloc(size_t size)
int ABT_sched_create(ABT_sched_def *def, int num_pools, ABT_pool *pools, ABT_sched_config config, ABT_sched *newsched)
Create a new user-defined scheduler and return its handle through newsched.
struct ABT_pool_opaque * ABT_pool
int ABT_sched_exit(ABT_sched sched)
Ask a scheduler to stop as soon as possible.
int ABT_sched_get_num_pools(ABT_sched sched, int *num_pools)
Get the number of pools associated with scheduler.
int ABT_sched_get_total_size(ABT_sched sched, size_t *size)
Get the sum of the sizes of the pool of sched.
#define HANDLE_ERROR_FUNC_WITH_CODE(n)
int ABT_sched_free(ABT_sched *sched)
Release the scheduler object associated with sched handle.
#define LOG_EVENT(fmt,...)
int ABT_sched_get_data(ABT_sched sched, void **data)
Retrieve the specific data of the target user-defined scheduler.
int ABT_sched_get_pools(ABT_sched sched, int max_pools, int idx, ABT_pool *pools)
Get the pools of the scheduler sched.
int ABT_sched_create_basic(ABT_sched_predef predef, int num_pools, ABT_pool *pools, ABT_sched_config config, ABT_sched *newsched)
Create a predefined scheduler.
static ABTD_atomic_uint64 g_sched_id
struct ABT_sched_config_opaque * ABT_sched_config
ABT_sched_get_migr_pool_fn get_migr_pool
#define ABT_ERR_INV_SCHED
#define ABT_SCHED_CONFIG_NULL
#define ABT_ERR_INV_XSTREAM
int ABT_sched_set_data(ABT_sched sched, void *data)
Set the specific data of the target user-defined scheduler.
static void ABTU_free(void *ptr)
int ABT_sched_has_to_stop(ABT_sched sched, ABT_bool *stop)
Check if the scheduler needs to stop.
#define ABT_ERR_INV_SCHED_PREDEF
static void * ABTU_calloc(size_t num, size_t size)