2 * Copyright (C) 2006-2014 Andre Noll <maan@systemlinux.org>
4 * Licensed under the GPL v2. For licencing details see COPYING.
7 /** \file sched.c Paraslash's scheduling functions. */
21 static struct timeval now_struct;
22 struct timeval *now = &now_struct;
24 static inline bool timeout_is_zero(struct sched *s)
26 struct timeval *tv = &s->select_timeout;
27 return tv->tv_sec == 0 && tv->tv_usec == 0;
30 static void sched_preselect(struct sched *s)
34 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
37 if (t->notification != 0)
44 static void unlink_and_free_task(struct task *t)
46 PARA_INFO_LOG("freeing task %s\n", t->status);
51 //#define SCHED_DEBUG 1
52 static inline void call_post_select(struct sched *s, struct task *t)
55 t->error = t->post_select(s, t);
57 struct timeval t1, t2, diff;
60 clock_get_realtime(&t1);
61 t->error = t->post_select(s, t);
62 clock_get_realtime(&t2);
63 tv_diff(&t1, &t2, &diff);
66 PARA_WARNING_LOG("%s: post_select time: %lums\n",
71 static unsigned sched_post_select(struct sched *s)
74 unsigned num_running_tasks = 0;
76 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
78 if (t->dead) /* task has been reaped */
79 unlink_and_free_task(t);
82 call_post_select(s, t);
87 return num_running_tasks;
91 * The core function of all paraslash programs.
93 * \param s Pointer to the scheduler struct.
95 * This function updates the global \a now pointer, calls all registered
96 * pre_select hooks which may set the timeout and add any file descriptors to
97 * the fd sets of \a s. Next, it calls para_select() and makes the result available
98 * to the registered tasks by calling their post_select hook.
100 * \return Zero if no more tasks are left in the task list, negative if the
101 * select function returned an error.
103 * \sa \ref task, \ref now.
105 int schedule(struct sched *s)
108 unsigned num_running_tasks;
110 if (!s->select_function)
111 s->select_function = para_select;
115 s->select_timeout = s->default_timeout;
117 clock_get_realtime(now);
119 ret = s->select_function(s->max_fileno + 1, &s->rfds, &s->wfds,
125 * APUE: Be careful not to check the descriptor sets on return
126 * unless the return value is greater than zero. The return
127 * state of the descriptor sets is implementation dependent if
128 * either a signal is caught or the timer expires.
133 clock_get_realtime(now);
134 num_running_tasks = sched_post_select(s);
135 if (num_running_tasks == 0)
141 * Obtain the error status of a task and deallocate its resources.
143 * \param tptr Identifies the task to reap.
145 * This function is similar to wait(2) in that it returns information about a
146 * terminated task and allows to release the resources associated with the
147 * task. Until this function is called, the terminated task remains in a zombie
150 * \return If \a tptr is \p NULL, or \a *tptr is \p NULL, the function does
151 * nothing and returns zero. Otherwise, it is checked whether the task
152 * identified by \a tptr is still running. If it is, the function returns zero
153 * and again, no action is taken. Otherwise the (negative) error code of the
154 * terminated task is returned and \a *tptr is set to \p NULL. The task will
155 * then be removed removed from the scheduler task list.
157 * \sa \ref sched_shutdown(), wait(2).
159 int task_reap(struct task **tptr)
170 if (t->dead) /* will be freed in sched_post_select() */
173 * With list_for_each_entry_safe() it is only safe to remove the
174 * _current_ list item. Since we are being called from the loop in
175 * schedule() via some task's ->post_select() function, freeing the
176 * given task here would result in use-after-free bugs in schedule().
177 * So we only set t->dead which tells schedule() to free the task in
178 * the next iteration of its loop.
186 * Deallocate all resources of all tasks of a scheduler instance.
188 * \param s The scheduler instance.
190 * This should only be called after \ref schedule() has returned.
192 void sched_shutdown(struct sched *s)
194 struct task *t, *tmp;
196 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
198 /* The task list should contain only terminated tasks. */
199 PARA_WARNING_LOG("shutting down running task %s\n",
201 unlink_and_free_task(t);
206 * Add a task to the scheduler task list.
208 * \param info Task information supplied by the caller.
209 * \param s The scheduler instance.
211 * \return A pointer to a newly allocated task structure. It will be
212 * freed by sched_shutdown().
214 struct task *task_register(struct task_info *info, struct sched *s)
216 struct task *t = para_malloc(sizeof(*t));
218 assert(info->post_select);
220 if (!s->task_list.next)
221 INIT_LIST_HEAD(&s->task_list);
223 snprintf(t->status, sizeof(t->status) - 1, "%s", info->name);
224 t->status[sizeof(t->status) - 1] = '\0';
228 t->pre_select = info->pre_select;
229 t->post_select = info->post_select;
230 t->context = info->context;
231 list_add_tail(&t->node, &s->task_list);
236 * Obtain the context pointer of a task.
238 * \param t Return this task's context pointer.
240 * \return A pointer to the memory location specified previously as \a
241 * task_info->context when the task was registered with \ref task_register().
243 void *task_context(struct task *t)
249 * Get the list of all registered tasks.
251 * \param s The scheduler instance to get the task list from.
253 * \return The task list.
255 * Each entry of the list contains an identifier which is simply a hex number.
256 * The result is dynamically allocated and must be freed by the caller.
258 char *get_task_list(struct sched *s)
260 struct task *t, *tmp;
263 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
265 tmp_msg = make_message("%s%p\t%s\t%s\n", msg? msg : "", t,
266 t->error < 0? (t->dead? "dead" : "zombie") : "running",
275 * Set the notification value of a task.
277 * \param t The task to notify.
278 * \param err A positive error code.
280 * Tasks which honor notifications are supposed to call \ref
281 * task_get_notification() in their post_select function and act on the
282 * returned notification value.
284 * If the scheduler detects during its pre_select loop that at least one task
285 * has been notified, the loop terminates, and the post_select methods of all
286 * taks are immediately called again.
288 * The notification for a task is reset after the call to its post_select
291 * \sa \ref task_get_notification().
293 void task_notify(struct task *t, int err)
296 if (t->notification == -err) /* ignore subsequent notifications */
298 PARA_INFO_LOG("notifying task %s: %s\n", t->status, para_strerror(err));
299 t->notification = -err;
303 * Return the notification value of a task.
305 * \param t The task to get the notification value from.
307 * \return The notification value. If this is negative, the task has been
308 * notified by another task. Tasks are supposed to check for notifications by
309 * calling this function from their post_select method.
311 * \sa \ref task_notify().
313 int task_get_notification(const struct task *t)
315 return t->notification;
319 * Set the notification value of all tasks of a scheduler instance.
321 * \param s The scheduler instance whose tasks should be notified.
322 * \param err A positive error code.
324 * This simply iterates over all existing tasks of \a s and sets each
325 * task's notification value to \p -err.
327 void task_notify_all(struct sched *s, int err)
331 list_for_each_entry(t, &s->task_list, node)
336 * Set the select timeout to the minimal possible value.
338 * \param s Pointer to the scheduler struct.
340 * This causes the next select() call to return immediately.
342 void sched_min_delay(struct sched *s)
344 s->select_timeout.tv_sec = s->select_timeout.tv_usec = 0;
348 * Impose an upper bound for the timeout of the next select() call.
350 * \param to Maximal allowed timeout.
351 * \param s Pointer to the scheduler struct.
353 * If the current scheduler timeout is already smaller than \a to, this
354 * function does nothing. Otherwise the timeout for the next select() call is
355 * set to the given value.
357 * \sa sched_request_timeout_ms().
359 void sched_request_timeout(struct timeval *to, struct sched *s)
361 if (tv_diff(&s->select_timeout, to, NULL) > 0)
362 s->select_timeout = *to;
366 * Force the next select() call to return before the given amount of milliseconds.
368 * \param ms The maximal allowed timeout in milliseconds.
369 * \param s Pointer to the scheduler struct.
371 * Like sched_request_timeout() this imposes an upper bound on the timeout
372 * value for the next select() call.
374 void sched_request_timeout_ms(long unsigned ms, struct sched *s)
378 sched_request_timeout(&tv, s);
382 * Force the next select() call to return before the given future time.
384 * \param barrier Absolute time before select() should return.
385 * \param s Pointer to the scheduler struct.
387 * \return If \a barrier is in the past, this function does nothing and returns
388 * zero. Otherwise it returns one.
390 * \sa sched_request_barrier_or_min_delay().
392 int sched_request_barrier(struct timeval *barrier, struct sched *s)
396 if (tv_diff(now, barrier, &diff) > 0)
398 sched_request_timeout(&diff, s);
403 * Force the next select() call to return before the given time.
405 * \param barrier Absolute time before select() should return.
406 * \param s Pointer to the scheduler struct.
408 * \return If \a barrier is in the past, this function requests a minimal
409 * timeout and returns zero. Otherwise it returns one.
411 * \sa sched_min_delay(), sched_request_barrier().
413 int sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s)
417 if (tv_diff(now, barrier, &diff) > 0) {
421 sched_request_timeout(&diff, s);