26 memset(p_htable->
queue, 0, q_size);
28 #if defined(HAVE_LH_LOCK_H)
29 lh_lock_init(&p_htable->
mutex);
30 #elif defined(HAVE_CLH_H)
31 clh_init(&p_htable->
mutex);
32 #elif defined(USE_PTHREAD_MUTEX)
33 int ret = pthread_mutex_init(&p_htable->
mutex, NULL);
46 *pp_htable = p_htable;
54 #if defined(HAVE_LH_LOCK_H)
55 lh_lock_destroy(&p_htable->
mutex);
56 #elif defined(HAVE_CLH_H)
57 clh_destroy(&p_htable->
mutex);
58 #elif defined(USE_PTHREAD_MUTEX)
59 int ret = pthread_mutex_destroy(&p_htable->
mutex);
79 p_queue = &p_htable->
queue[idx];
81 if (p_queue->
head == NULL) {
82 p_queue->
head = p_ythread;
83 p_queue->
tail = p_ythread;
86 p_queue->
tail = p_ythread;
104 p_queue = &p_htable->
queue[idx];
126 p_ythread = p_queue->
head;
127 if (p_queue->
head == p_queue->
tail) {
128 p_queue->
head = NULL;
129 p_queue->
tail = NULL;
184 sync_event_type, p_sync);
static void ABTI_ythread_queue_acquire_mutex(ABTI_ythread_queue *p_queue)
void ABTI_ythread_htable_push(ABTI_ythread_htable *p_htable, int idx, ABTI_ythread *p_ythread)
ABTU_ret_err int ABTI_ythread_htable_create(uint32_t num_rows, ABTI_ythread_htable **pp_htable)
#define ABTU_unreachable()
static void ABTD_atomic_release_store_int(ABTD_atomic_int *ptr, int val)
static void ABTI_ythread_queue_acquire_low_mutex(ABTI_ythread_queue *p_queue)
static void ABTI_spinlock_clear(ABTI_spinlock *p_lock)
struct ABTI_ythread_queue ABTI_ythread_queue
static void ABTI_ythread_queue_release_mutex(ABTI_ythread_queue *p_queue)
ABTD_atomic_uint32 num_elems
ABT_unit_id ABTI_thread_get_id(ABTI_thread *p_thread)
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
static uint32_t ABTD_atomic_fetch_add_uint32(ABTD_atomic_uint32 *ptr, uint32_t v)
ABTI_ythread_queue * h_list
ABTI_ythread * ABTI_ythread_htable_pop(ABTI_ythread_htable *p_htable, ABTI_ythread_queue *p_queue)
static ABTU_ret_err int ABTU_memalign(size_t alignment, size_t size, void **p_ptr)
ABTI_ythread_queue * l_list
static void ABTD_atomic_relaxed_store_uint32(ABTD_atomic_uint32 *ptr, uint32_t val)
ABTI_ythread_queue * queue
ABT_bool ABTI_ythread_htable_switch_low(ABTI_xstream **pp_local_xstream, ABTI_ythread_queue *p_queue, ABTI_ythread *p_ythread, ABTI_ythread_htable *p_htable, ABT_sync_event_type sync_event_type, void *p_sync)
static uint32_t ABTD_atomic_fetch_sub_uint32(ABTD_atomic_uint32 *ptr, uint32_t v)
static uint32_t ABTD_atomic_relaxed_load_uint32(const ABTD_atomic_uint32 *ptr)
#define ABTI_ASSERT(cond)
#define LOG_DEBUG(fmt,...)
#define ABTI_CHECK_ERROR(abt_errno)
void ABTI_ythread_htable_push_low(ABTI_ythread_htable *p_htable, int idx, ABTI_ythread *p_ythread)
static void ABTI_ythread_queue_release_low_mutex(ABTI_ythread_queue *p_queue)
void ABTI_ythread_htable_free(ABTI_ythread_htable *p_htable)
static ABTI_ythread * ABTI_thread_get_ythread(ABTI_thread *p_thread)
ABTI_ythread * ABTI_ythread_htable_pop_low(ABTI_ythread_htable *p_htable, ABTI_ythread_queue *p_queue)
static ABTI_local * ABTI_xstream_get_local(ABTI_xstream *p_xstream)
#define ABTI_IS_ERROR_CHECK_ENABLED
static void ABTU_free(void *ptr)
static ABTI_ythread * ABTI_ythread_context_switch_to_sibling(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_old, ABTI_ythread *p_new)
#define ABTI_STATIC_ASSERT(cond)