6 #ifndef ABTI_THREAD_H_INCLUDED 7 #define ABTI_THREAD_H_INCLUDED 11 static inline ABTI_thread *ABTI_thread_get_ptr(
ABT_thread thread)
13 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK 14 ABTI_thread *p_thread;
18 p_thread = (ABTI_thread *)thread;
22 return (ABTI_thread *)thread;
26 static inline ABT_thread ABTI_thread_get_handle(ABTI_thread *p_thread)
28 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK 30 if (p_thread == NULL) {
41 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION 42 static inline ABT_bool ABTI_thread_is_dynamic_promoted(ABTI_thread *p_thread)
123 return ABTD_thread_context_is_dynamic_promoted(&p_thread->ctx);
126 static inline void ABTI_thread_dynamic_promote_thread(ABTI_thread *p_thread)
128 LOG_EVENT(
"[U%" PRIu64
"] dynamic-promote ULT\n",
129 ABTI_thread_get_id(p_thread));
130 void *p_stack = p_thread->attr.p_stack;
131 size_t stacksize = p_thread->attr.stacksize;
132 void *p_stacktop = (
void *)(((
char *)p_stack) + stacksize);
133 ABTD_thread_context_dynamic_promote_thread(p_stacktop);
137 static inline void ABTI_thread_context_switch_thread_to_thread_internal(
138 ABTI_local *p_local, ABTI_thread *p_old, ABTI_thread *p_new,
141 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 142 ABTI_ASSERT(!p_old->is_sched && !p_new->is_sched);
144 p_local->p_thread = p_new;
145 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION 147 if (!is_finish && !ABTI_thread_is_dynamic_promoted(p_old)) {
148 ABTI_thread_dynamic_promote_thread(p_old);
150 if (!ABTI_thread_is_dynamic_promoted(p_new)) {
152 ABTD_thread_context_arm_thread(p_new->attr.stacksize,
153 p_new->attr.p_stack, &p_new->ctx);
157 ABTD_thread_finish_context(&p_old->ctx, &p_new->ctx);
159 ABTD_thread_context_switch(&p_old->ctx, &p_new->ctx);
163 static inline void ABTI_thread_context_switch_thread_to_sched_internal(
164 ABTI_thread *p_old, ABTI_sched *p_new,
ABT_bool is_finish)
166 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 167 ABTI_ASSERT(!p_old->is_sched);
169 ABTI_LOG_SET_SCHED(p_new);
170 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION 172 if (!is_finish && !ABTI_thread_is_dynamic_promoted(p_old))
173 ABTI_thread_dynamic_promote_thread(p_old);
175 ABTI_ASSERT(!p_new->p_thread ||
176 ABTI_thread_is_dynamic_promoted(p_new->p_thread));
179 ABTD_thread_finish_context(&p_old->ctx, p_new->p_ctx);
181 ABTD_thread_context_switch(&p_old->ctx, p_new->p_ctx);
185 static inline void ABTI_thread_context_switch_sched_to_thread_internal(
186 ABTI_local *p_local, ABTI_sched *p_old, ABTI_thread *p_new,
189 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED 190 ABTI_ASSERT(!p_new->is_sched);
192 ABTI_LOG_SET_SCHED(NULL);
193 p_local->p_thread = p_new;
194 p_local->p_task = NULL;
195 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION 197 ABTI_ASSERT(!p_old->p_thread ||
198 ABTI_thread_is_dynamic_promoted(p_old->p_thread));
199 if (!ABTI_thread_is_dynamic_promoted(p_new)) {
201 ((
char *)p_new->attr.p_stack) + p_new->attr.stacksize;
202 LOG_EVENT(
"[U%" PRIu64
"] run ULT (dynamic promotion)\n",
203 ABTI_thread_get_id(p_new));
204 ABTD_thread_context_make_and_call(p_old->p_ctx, p_new->ctx.f_thread,
205 p_new->ctx.p_arg, p_stacktop);
210 ABTI_thread *p_prev = p_local->p_thread;
211 if (!ABTI_thread_is_dynamic_promoted(p_prev)) {
212 ABTI_ASSERT(p_prev == p_new);
215 ABTD_thread_context *p_ctx = &p_prev->ctx;
216 ABTD_thread_context *p_link =
217 ABTD_atomic_acquire_load_thread_context_ptr(&p_ctx->p_link);
221 ABTI_thread *p_joiner = (ABTI_thread *)p_link;
224 ABTI_thread_set_ready(p_local, p_joiner);
228 ABTD_atomic_release_store_uint32(&p_prev->request,
229 ABTI_THREAD_REQ_TERMINATE);
232 ABTD_atomic_fetch_or_uint32(&p_prev->request,
233 ABTI_THREAD_REQ_JOIN |
234 ABTI_THREAD_REQ_TERMINATE);
235 if (req & ABTI_THREAD_REQ_JOIN) {
240 p_link = ABTD_atomic_acquire_load_thread_context_ptr(
243 ABTI_thread_set_ready(p_local, (ABTI_thread *)p_link);
247 ABTI_LOG_SET_SCHED(p_old);
252 ABTD_thread_finish_context(p_old->p_ctx, &p_new->ctx);
254 ABTD_thread_context_switch(p_old->p_ctx, &p_new->ctx);
258 static inline void ABTI_thread_context_switch_sched_to_sched_internal(
259 ABTI_sched *p_old, ABTI_sched *p_new,
ABT_bool is_finish)
261 ABTI_LOG_SET_SCHED(p_new);
262 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION 264 ABTI_ASSERT(!p_old->p_thread ||
265 ABTI_thread_is_dynamic_promoted(p_old->p_thread));
266 ABTI_ASSERT(!p_new->p_thread ||
267 ABTI_thread_is_dynamic_promoted(p_new->p_thread));
270 ABTD_thread_finish_context(p_old->p_ctx, p_new->p_ctx);
272 ABTD_thread_context_switch(p_old->p_ctx, p_new->p_ctx);
276 static inline void ABTI_thread_context_switch_thread_to_thread(
277 ABTI_local **pp_local, ABTI_thread *p_old, ABTI_thread *p_new)
279 ABTI_thread_context_switch_thread_to_thread_internal(*pp_local, p_old,
281 *pp_local = ABTI_local_get_local_uninlined();
284 static inline void ABTI_thread_context_switch_thread_to_sched(
285 ABTI_local **pp_local, ABTI_thread *p_old, ABTI_sched *p_new)
287 ABTI_thread_context_switch_thread_to_sched_internal(p_old, p_new,
289 *pp_local = ABTI_local_get_local_uninlined();
292 static inline void ABTI_thread_context_switch_sched_to_thread(
293 ABTI_local **pp_local, ABTI_sched *p_old, ABTI_thread *p_new)
295 ABTI_thread_context_switch_sched_to_thread_internal(*pp_local, p_old, p_new,
297 *pp_local = ABTI_local_get_local_uninlined();
301 ABTI_thread_context_switch_sched_to_sched(ABTI_local **pp_local,
302 ABTI_sched *p_old, ABTI_sched *p_new)
304 ABTI_thread_context_switch_sched_to_sched_internal(p_old, p_new,
ABT_FALSE);
305 *pp_local = ABTI_local_get_local_uninlined();
308 static inline void ABTI_thread_finish_context_thread_to_thread(
309 ABTI_local *p_local, ABTI_thread *p_old, ABTI_thread *p_new)
311 ABTI_thread_context_switch_thread_to_thread_internal(p_local, p_old, p_new,
316 ABTI_thread_finish_context_thread_to_sched(ABTI_thread *p_old,
319 ABTI_thread_context_switch_thread_to_sched_internal(p_old, p_new,
ABT_TRUE);
322 static inline void ABTI_thread_finish_context_sched_to_thread(
323 ABTI_local *p_local, ABTI_sched *p_old, ABTI_thread *p_new)
325 ABTI_thread_context_switch_sched_to_thread_internal(p_local, p_old, p_new,
329 static inline void ABTI_thread_finish_context_sched_to_sched(ABTI_sched *p_old,
332 ABTI_thread_context_switch_sched_to_sched_internal(p_old, p_new,
ABT_TRUE);
335 static inline void ABTI_thread_set_request(ABTI_thread *p_thread, uint32_t req)
337 ABTD_atomic_fetch_or_uint32(&p_thread->request, req);
340 static inline void ABTI_thread_unset_request(ABTI_thread *p_thread,
343 ABTD_atomic_fetch_and_uint32(&p_thread->request, ~req);
346 static inline void ABTI_thread_yield(ABTI_local **pp_local,
347 ABTI_thread *p_thread)
351 LOG_EVENT(
"[U%" PRIu64
":E%d] yield\n", ABTI_thread_get_id(p_thread),
352 p_thread->p_last_xstream->rank);
358 p_sched = ABTI_xstream_get_top_sched(p_thread->p_last_xstream);
359 ABTI_thread_context_switch_thread_to_sched(pp_local, p_thread, p_sched);
362 LOG_EVENT(
"[U%" PRIu64
":E%d] resume after yield\n",
363 ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
struct ABT_thread_opaque * ABT_thread
#define LOG_EVENT(fmt,...)