38 ABTI_mutex *p_newmutex;
40 p_newmutex = (ABTI_mutex *)
ABTU_calloc(1,
sizeof(ABTI_mutex));
41 ABTI_mutex_init(p_newmutex);
44 *newmutex = ABTI_mutex_get_handle(p_newmutex);
68 ABTI_mutex_attr *p_attr = ABTI_mutex_attr_get_ptr(attr);
69 ABTI_CHECK_NULL_MUTEX_ATTR_PTR(p_attr);
70 ABTI_mutex *p_newmutex;
72 p_newmutex = (ABTI_mutex *)
ABTU_malloc(
sizeof(ABTI_mutex));
73 ABTI_mutex_init(p_newmutex);
74 ABTI_mutex_attr_copy(&p_newmutex->attr, p_attr);
77 *newmutex = ABTI_mutex_get_handle(p_newmutex);
106 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(h_mutex);
107 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
109 ABTI_mutex_fini(p_mutex);
144 ABTI_local *p_local = ABTI_local_get_local();
145 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
146 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
148 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
150 ABTI_mutex_lock(&p_local, p_mutex);
152 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
154 ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
155 if (self_id != p_mutex->attr.owner_id) {
156 ABTI_mutex_lock(&p_local, p_mutex);
157 p_mutex->attr.owner_id = self_id;
158 ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
160 p_mutex->attr.nesting_cnt++;
165 ABTI_mutex_lock(&p_local, p_mutex);
176 static inline void ABTI_mutex_lock_low(ABTI_local **pp_local,
179 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX 180 ABTI_local *p_local = *pp_local;
183 LOG_EVENT(
"%p: lock_low - try\n", p_mutex);
184 while (!ABTD_atomic_bool_cas_weak_uint32(&p_mutex->val, 0, 1)) {
185 ABTI_thread_yield(pp_local, p_local->p_thread);
188 LOG_EVENT(
"%p: lock_low - acquired\n", p_mutex);
190 ABTI_mutex_spinlock(p_mutex);
194 ABTI_local *p_local = *pp_local;
200 LOG_EVENT(
"%p: lock_low - try\n", p_mutex);
206 ABTI_thread_htable *p_htable = p_mutex->p_htable;
207 ABTI_thread *p_self = p_local->p_thread;
208 ABTI_xstream *p_xstream = p_self->p_last_xstream;
209 int rank = (int)p_xstream->rank;
210 ABTI_thread_queue *p_queue = &p_htable->queue[rank];
211 if (p_queue->low_num_threads > 0) {
212 ABT_bool ret = ABTI_thread_htable_switch_low(pp_local, p_queue,
220 if ((c = ABTD_atomic_val_cas_strong_uint32(&p_mutex->val, 0, 1)) != 0) {
222 c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
225 ABTI_mutex_wait_low(pp_local, p_mutex, 2);
231 if (p_mutex->p_handover) {
232 if (p_self == p_mutex->p_handover) {
233 p_mutex->p_handover = NULL;
234 ABTD_atomic_release_store_uint32(&p_mutex->val, 2);
237 ABTI_thread *p_giver = p_mutex->p_giver;
238 ABTD_atomic_release_store_int(&p_giver->state,
240 ABTI_POOL_PUSH(p_giver->p_pool, p_giver->unit,
241 ABTI_self_get_native_thread_id(
247 c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
250 LOG_EVENT(
"%p: lock_low - acquired\n", p_mutex);
252 ABTI_mutex_spinlock(p_mutex);
280 ABTI_local *p_local = ABTI_local_get_local();
281 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
282 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
284 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
286 ABTI_mutex_lock_low(&p_local, p_mutex);
288 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
290 ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
291 if (self_id != p_mutex->attr.owner_id) {
292 ABTI_mutex_lock_low(&p_local, p_mutex);
293 p_mutex->attr.owner_id = self_id;
294 ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
296 p_mutex->attr.nesting_cnt++;
301 ABTI_mutex_lock_low(&p_local, p_mutex);
337 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
338 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
340 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
342 abt_errno = ABTI_mutex_trylock(p_mutex);
344 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
346 ABTI_local *p_local = ABTI_local_get_local();
347 ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
348 if (self_id != p_mutex->attr.owner_id) {
349 abt_errno = ABTI_mutex_trylock(p_mutex);
351 p_mutex->attr.owner_id = self_id;
352 ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
355 p_mutex->attr.nesting_cnt++;
361 abt_errno = ABTI_mutex_trylock(p_mutex);
389 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
390 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
392 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
394 ABTI_mutex_spinlock(p_mutex);
396 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
398 ABTI_local *p_local = ABTI_local_get_local();
399 ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
400 if (self_id != p_mutex->attr.owner_id) {
401 ABTI_mutex_spinlock(p_mutex);
402 p_mutex->attr.owner_id = self_id;
403 ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
405 p_mutex->attr.nesting_cnt++;
410 ABTI_mutex_spinlock(p_mutex);
436 ABTI_local *p_local = ABTI_local_get_local();
437 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
438 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
440 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
442 ABTI_mutex_unlock(p_local, p_mutex);
444 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
446 ABTI_CHECK_TRUE(ABTI_self_get_unit_id(p_local) ==
447 p_mutex->attr.owner_id,
449 if (p_mutex->attr.nesting_cnt == 0) {
450 p_mutex->attr.owner_id = 0;
451 ABTI_mutex_unlock(p_local, p_mutex);
453 p_mutex->attr.nesting_cnt--;
458 ABTI_mutex_unlock(p_local, p_mutex);
470 static inline int ABTI_mutex_unlock_se(ABTI_local **pp_local,
475 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX 476 ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
478 ABTI_local *p_local = *pp_local;
479 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 481 ABTI_thread_yield(pp_local, p_local->p_thread);
483 ABTI_thread_yield(pp_local, p_local->p_thread);
487 ABTI_xstream *p_xstream;
488 ABTI_thread *p_next = NULL;
489 ABTI_thread *p_thread;
490 ABTI_thread_queue *p_queue;
495 if (ABTD_atomic_fetch_sub_uint32(&p_mutex->val, 1) == 1) {
497 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD 499 ABTI_thread_yield(pp_local, (*pp_local)->p_thread);
501 ABTI_thread_yield(pp_local, (*pp_local)->p_thread);
507 ABTI_thread_htable *p_htable = p_mutex->p_htable;
509 p_thread = (*pp_local)->p_thread;
510 p_xstream = p_thread->p_last_xstream;
511 ABTI_ASSERT(p_xstream == (*pp_local)->p_xstream);
512 i = (int)p_xstream->rank;
513 p_queue = &p_htable->queue[i];
517 if (p_queue->num_handovers >= p_mutex->attr.max_handovers) {
518 ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
520 ABTI_mutex_wake_de(*pp_local, p_mutex);
521 p_queue->num_handovers = 0;
522 ABTI_thread_yield(pp_local, p_thread);
527 if (p_queue->num_threads <= 1) {
528 if (p_htable->h_list != NULL) {
529 ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
531 ABTI_mutex_wake_de(*pp_local, p_mutex);
532 ABTI_thread_yield(pp_local, p_thread);
536 p_next = ABTI_thread_htable_pop(p_htable, p_queue);
545 if (p_queue->low_num_threads <= 1) {
546 ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
548 ABTI_mutex_wake_de(*pp_local, p_mutex);
549 ABTI_thread_yield(pp_local, p_thread);
552 p_next = ABTI_thread_htable_pop_low(p_htable, p_queue);
560 p_queue->num_handovers++;
563 p_mutex->p_handover = p_next;
564 p_mutex->p_giver = p_thread;
566 LOG_EVENT(
"%p: handover -> U%" PRIu64
"\n", p_mutex,
567 ABTI_thread_get_id(p_next));
570 while (ABTD_atomic_acquire_load_uint32(&p_next->request) &
571 ABTI_THREAD_REQ_BLOCK)
573 ABTI_pool_dec_num_blocked(p_next->p_pool);
575 ABTI_thread_context_switch_thread_to_thread(pp_local, p_thread, p_next);
601 ABTI_local *p_local = ABTI_local_get_local();
602 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
603 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
605 if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
607 ABTI_mutex_unlock_se(&p_local, p_mutex);
609 }
else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
611 ABTI_CHECK_TRUE(ABTI_self_get_unit_id(p_local) ==
612 p_mutex->attr.owner_id,
614 if (p_mutex->attr.nesting_cnt == 0) {
615 p_mutex->attr.owner_id = 0;
616 ABTI_mutex_unlock_se(&p_local, p_mutex);
618 p_mutex->attr.nesting_cnt--;
623 ABTI_mutex_unlock_se(&p_local, p_mutex);
637 ABTI_local *p_local = ABTI_local_get_local();
638 ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
639 ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
641 ABTI_mutex_unlock(p_local, p_mutex);
668 ABTI_mutex *p_mutex1 = ABTI_mutex_get_ptr(mutex1);
669 ABTI_mutex *p_mutex2 = ABTI_mutex_get_ptr(mutex2);
670 *result = ABTI_mutex_equal(p_mutex1, p_mutex2);
674 void ABTI_mutex_wait(ABTI_local **pp_local, ABTI_mutex *p_mutex,
int val)
676 ABTI_local *p_local = *pp_local;
677 ABTI_thread_htable *p_htable = p_mutex->p_htable;
678 ABTI_thread *p_self = p_local->p_thread;
679 ABTI_xstream *p_xstream = p_self->p_last_xstream;
681 int rank = (int)p_xstream->rank;
682 ABTI_ASSERT(rank < p_htable->num_rows);
683 ABTI_thread_queue *p_queue = &p_htable->queue[rank];
685 ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
687 if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
688 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
692 if (p_queue->p_h_next == NULL) {
693 ABTI_thread_htable_add_h_node(p_htable, p_queue);
697 ABTI_thread_set_blocked(p_self);
700 ABTI_thread_htable_push(p_htable, rank, p_self);
703 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
706 ABTI_thread_suspend(pp_local, p_self);
709 void ABTI_mutex_wait_low(ABTI_local **pp_local, ABTI_mutex *p_mutex,
int val)
711 ABTI_local *p_local = *pp_local;
712 ABTI_thread_htable *p_htable = p_mutex->p_htable;
713 ABTI_thread *p_self = p_local->p_thread;
714 ABTI_xstream *p_xstream = p_self->p_last_xstream;
716 int rank = (int)p_xstream->rank;
717 ABTI_ASSERT(rank < p_htable->num_rows);
718 ABTI_thread_queue *p_queue = &p_htable->queue[rank];
720 ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
722 if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
723 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
727 if (p_queue->p_l_next == NULL) {
728 ABTI_thread_htable_add_l_node(p_htable, p_queue);
732 ABTI_thread_set_blocked(p_self);
735 ABTI_thread_htable_push_low(p_htable, rank, p_self);
738 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
741 ABTI_thread_suspend(pp_local, p_self);
744 void ABTI_mutex_wake_de(ABTI_local *p_local, ABTI_mutex *p_mutex)
747 ABTI_thread *p_thread;
748 ABTI_thread_htable *p_htable = p_mutex->p_htable;
749 int num = p_mutex->attr.max_wakeups;
750 ABTI_thread_queue *p_start, *p_curr;
753 for (n = 0; n < num; n++) {
756 ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
758 if (ABTD_atomic_acquire_load_uint32(&p_htable->num_elems) == 0) {
759 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
764 p_start = p_htable->h_list;
765 for (p_curr = p_start; p_curr;) {
766 p_thread = ABTI_thread_htable_pop(p_htable, p_curr);
767 if (p_curr->num_threads == 0) {
768 ABTI_thread_htable_del_h_head(p_htable);
770 p_htable->h_list = p_curr->p_h_next;
772 if (p_thread != NULL)
774 p_curr = p_htable->h_list;
775 if (p_curr == p_start)
780 p_start = p_htable->l_list;
781 for (p_curr = p_start; p_curr;) {
782 p_thread = ABTI_thread_htable_pop_low(p_htable, p_curr);
783 if (p_curr->low_num_threads == 0) {
784 ABTI_thread_htable_del_l_head(p_htable);
786 p_htable->l_list = p_curr->p_l_next;
788 if (p_thread != NULL)
790 p_curr = p_htable->l_list;
791 if (p_curr == p_start)
796 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
797 LOG_EVENT(
"%p: nothing to wake up\n", p_mutex);
801 ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
804 LOG_EVENT(
"%p: wake up U%" PRIu64
":E%d\n", p_mutex,
805 ABTI_thread_get_id(p_thread),
806 ABTI_thread_get_xstream_rank(p_thread));
807 ABTI_thread_set_ready(p_local, p_thread);
int ABT_mutex_unlock_se(ABT_mutex mutex)
Hand over the mutex within the ES.
int ABT_mutex_unlock_de(ABT_mutex mutex)
struct ABT_mutex_attr_opaque * ABT_mutex_attr
#define ABT_ERR_INV_THREAD
int ABT_mutex_free(ABT_mutex *mutex)
Free the mutex object.
int ABT_mutex_lock_low(ABT_mutex mutex)
Lock the mutex with low priority.
int ABT_mutex_create_with_attr(ABT_mutex_attr attr, ABT_mutex *newmutex)
Create a new mutex with attributes.
static void * ABTU_malloc(size_t size)
struct ABT_mutex_opaque * ABT_mutex
int ABT_mutex_create(ABT_mutex *newmutex)
Create a new mutex.
#define HANDLE_ERROR_FUNC_WITH_CODE(n)
int ABT_mutex_lock_high(ABT_mutex mutex)
#define LOG_EVENT(fmt,...)
int ABT_mutex_lock(ABT_mutex mutex)
Lock the mutex.
int ABT_mutex_equal(ABT_mutex mutex1, ABT_mutex mutex2, ABT_bool *result)
Compare two mutex handles for equality.
int ABT_mutex_unlock(ABT_mutex mutex)
Unlock the mutex.
int ABT_mutex_trylock(ABT_mutex mutex)
Attempt to lock a mutex without blocking.
int ABT_mutex_spinlock(ABT_mutex mutex)
Lock the mutex without context switch.
static void ABTU_free(void *ptr)
static void * ABTU_calloc(size_t num, size_t size)