18 #ifndef __SCHED_CONTEXT_H__
19 #define __SCHED_CONTEXT_H__
24 #include <starpu_sched_ctx.h>
25 #include <starpu_sched_ctx_hypervisor.h>
26 #include <starpu_scheduler.h>
27 #include <common/config.h>
31 #include <semaphore.h>
35 #ifdef STARPU_HAVE_HWLOC
43 #define STARPU_GLOBAL_SCHED_CTX 0
44 #define STARPU_NMAXSMS 13
65 struct starpu_worker_collection *workers;
84 struct starpu_task_list empty_ctx_tasks;
87 struct starpu_task_list waiting_tasks;
112 int min_priority_is_set;
113 int max_priority_is_set;
116 #ifdef STARPU_HAVE_HWLOC
120 #ifdef STARPU_USE_SC_HYPERVISOR
123 #endif //STARPU_USE_SC_HYPERVISOR
155 int sub_ctxs[STARPU_NMAXWORKERS];
165 starpu_pthread_rwlock_t rwlock;
166 starpu_pthread_t lock_write_owner;
173 int nworkers_to_notify;
174 int *workerids_to_notify;
175 int nworkers_to_change;
176 int *workerids_to_change;
186 int min_prio_set,
int min_prio,
187 int max_prio_set,
int max_prio,
unsigned awake_workers,
void (*sched_policy_init)(
unsigned),
void *
user_data,
188 int nsub_ctxs,
int *sub_ctxs,
int nsms);
203 void _starpu_increment_nsubmitted_tasks_of_sched_ctx(
unsigned sched_ctx_id);
204 int _starpu_get_nsubmitted_tasks_of_sched_ctx(
unsigned sched_ctx_id);
205 int _starpu_check_nsubmitted_tasks_of_sched_ctx(
unsigned sched_ctx_id);
207 void _starpu_decrement_nready_tasks_of_sched_ctx(
unsigned sched_ctx_id,
double ready_flops);
208 unsigned _starpu_increment_nready_tasks_of_sched_ctx(
unsigned sched_ctx_id,
double ready_flops,
struct starpu_task *task);
209 int _starpu_wait_for_no_ready_of_sched_ctx(
unsigned sched_ctx_id);
239 void _starpu_fetch_tasks_from_empty_ctx_list(
struct _starpu_sched_ctx *sched_ctx);
241 unsigned _starpu_sched_ctx_allow_hypervisor(
unsigned sched_ctx_id);
243 struct starpu_perfmodel_arch * _starpu_sched_ctx_get_perf_archtype(
unsigned sched_ctx);
244 #ifdef STARPU_USE_SC_HYPERVISOR
248 #endif //STARPU_USE_SC_HYPERVISOR
250 void starpu_sched_ctx_add_combined_workers(
int *combined_workers_to_add,
unsigned n_combined_workers_to_add,
unsigned sched_ctx_id);
255 #define _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(w,j) \
256 (_starpu_get_nsched_ctxs() <= 1 ? _starpu_get_sched_ctx_struct(0) : __starpu_sched_ctx_get_sched_ctx_for_worker_and_job((w),(j)))
258 static inline struct _starpu_sched_ctx *_starpu_get_sched_ctx_struct(
unsigned id);
260 static inline int _starpu_sched_ctx_check_write_locked(
unsigned sched_ctx_id)
262 struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
263 return starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self());
265 #define STARPU_SCHED_CTX_CHECK_LOCK(sched_ctx_id) STARPU_ASSERT(_starpu_sched_ctx_check_write_locked((sched_ctx_id)))
267 static inline void _starpu_sched_ctx_lock_write(
unsigned sched_ctx_id)
269 struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
270 STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
271 STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
272 STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
273 STARPU_PTHREAD_RWLOCK_WRLOCK(&sched_ctx->rwlock);
274 sched_ctx->lock_write_owner = starpu_pthread_self();
277 static inline void _starpu_sched_ctx_unlock_write(
unsigned sched_ctx_id)
279 struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
280 STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
281 STARPU_ASSERT(starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
282 memset(&sched_ctx->lock_write_owner, 0,
sizeof(sched_ctx->lock_write_owner));
283 STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
284 STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
287 static inline void _starpu_sched_ctx_lock_read(
unsigned sched_ctx_id)
289 struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
290 STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
291 STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
292 STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
293 STARPU_PTHREAD_RWLOCK_RDLOCK(&sched_ctx->rwlock);
296 static inline void _starpu_sched_ctx_unlock_read(
unsigned sched_ctx_id)
298 struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
299 STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
300 STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
301 STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
302 STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
305 static inline unsigned _starpu_sched_ctx_worker_is_master_for_child_ctx(
unsigned sched_ctx_id,
unsigned workerid,
struct starpu_task *task)
307 unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
308 if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
310 starpu_sched_ctx_move_task_to_ctx_locked(task, child_sched_ctx, 1);
311 starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
320 #endif // __SCHED_CONTEXT_H__