1 /* Copyright (C) 2006 Andre Noll <maan@tuebingen.mpg.de>, see file COPYING. */
3 /** \file sched.c Paraslash's scheduling functions. */
17 * The possible states of a task.
19 * In addition to the states listed here, a task may also enter zombie state.
20 * This happens when its ->post_monitor function returns negative, the ->status
21 * field is then set to this return value. Such tasks are not scheduled any
22 * more (i.e. ->pre_monitor() and ->post_monitor() are no longer called), but
23 * they stay on the scheduler task list until \ref task_reap() or
24 * \ref sched_shutdown() is called.
27 /** Task has been reaped and may be removed from the task list. */
29 /** Task is active. */
34 /** A copy of the task name supplied when the task was registered. */
36 /** Copied during task_register(). */
37 struct task_info info;
38 /* TS_RUNNING, TS_DEAD, or zombie (negative value). */
40 /** Position of the task in the task list of the scheduler. */
41 struct list_head node;
42 /** If less than zero, the task was notified by another task. */
46 static struct timeval now_struct;
47 const struct timeval *now = &now_struct;
49 static void sched_pre_monitor(struct sched *s)
53 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
56 if (t->notification != 0)
58 if (t->info.pre_monitor)
59 t->info.pre_monitor(s, t->info.context);
63 static void unlink_and_free_task(struct task *t)
65 PARA_INFO_LOG("freeing task %s (%s)\n", t->name, t->status < 0?
66 para_strerror(-t->status) :
67 (t->status == TS_DEAD? "[dead]" : "[running]"));
74 //#define SCHED_DEBUG 1
75 static inline void call_post_monitor(struct sched *s, struct task *t)
80 ret = t->info.post_monitor(s, t->info.context);
82 struct timeval t1, t2, diff;
85 clock_get_realtime(&t1);
86 ret = t->info.post_monitor(s, t->info.context);
87 clock_get_realtime(&t2);
88 tv_diff(&t1, &t2, &diff);
91 PARA_WARNING_LOG("%s: post_monitor time: %lums\n",
94 t->status = ret < 0? ret : TS_RUNNING;
97 static unsigned sched_post_monitor(struct sched *s)
100 unsigned num_running_tasks = 0;
102 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
103 if (t->status == TS_DEAD) /* task has been reaped */
104 unlink_and_free_task(t);
105 else if (t->status == TS_RUNNING) {
106 call_post_monitor(s, t); /* sets t->status */
108 if (t->status == TS_RUNNING)
112 return num_running_tasks;
116 * The core function of all paraslash programs.
118 * \param s Pointer to the scheduler struct.
120 * This function updates the global now pointer, calls all registered
121 * pre_monitor hooks which may set the timeout and add any file descriptors to
122 * the pollfd array. Next, it calls the poll function and makes the result
123 * available to the registered tasks by calling their post_monitor hook.
125 * \return Zero if no more tasks are left in the task list, negative if the
126 * poll function returned an error.
130 int schedule(struct sched *s)
133 unsigned num_running_tasks;
135 if (!s->poll_function)
136 s->poll_function = xpoll;
140 memset(s->pidx, 0xff, s->pidx_array_len * sizeof(unsigned));
141 s->timeout = s->default_timeout;
142 clock_get_realtime(&now_struct);
143 sched_pre_monitor(s);
144 ret = s->poll_function(s->pfd, s->num_pfds, s->timeout);
147 clock_get_realtime(&now_struct);
148 num_running_tasks = sched_post_monitor(s);
149 if (num_running_tasks == 0)
155 * Obtain the error status of a task and deallocate its resources.
157 * \param tptr Identifies the task to reap.
159 * This function is similar to wait(2) in that it returns information about a
160 * terminated task which allows releasing the resources associated with the
161 * task. Until this function is called, the terminated task remains in a zombie
164 * \return If \a tptr is \p NULL, or \a *tptr is \p NULL, the function does
165 * nothing and returns zero. Otherwise, it is checked whether the task
166 * identified by \a tptr is still running. If it is, the function returns zero
167 * and again, no action is taken. Otherwise the (negative) error code of the
168 * terminated task is returned and \a *tptr is set to \p NULL. The task will
169 * then be removed removed from the scheduler task list.
171 * \sa \ref sched_shutdown(), wait(2).
173 int task_reap(struct task **tptr)
187 * With list_for_each_entry_safe() it is only safe to remove the
188 * _current_ list item. Since we are being called from the loop in
189 * schedule() via some task's ->post_monitor() function, freeing the
190 * given task here would result in use-after-free bugs in schedule().
191 * So we only set the task status to TS_DEAD which tells schedule() to
192 * free the task in the next iteration of its loop.
201 * Deallocate all resources of all tasks of a scheduler instance.
203 * \param s The scheduler instance.
205 * This should only be called after \ref schedule() has returned.
207 void sched_shutdown(struct sched *s)
209 struct task *t, *tmp;
211 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
212 if (t->status == TS_RUNNING)
213 /* The task list should contain only terminated tasks. */
214 PARA_WARNING_LOG("shutting down running task %s\n",
216 unlink_and_free_task(t);
223 * Add a task to the scheduler task list.
225 * \param info Task information supplied by the caller.
226 * \param s The scheduler instance.
228 * \return A pointer to a newly allocated task structure. It will be
229 * freed by sched_shutdown().
231 struct task *task_register(struct task_info *info, struct sched *s)
233 struct task *t = alloc(sizeof(*t));
235 assert(info->post_monitor);
237 if (!s->task_list.next)
238 init_list_head(&s->task_list);
241 t->name = para_strdup(info->name);
243 t->status = TS_RUNNING;
244 list_add_tail(&t->node, &s->task_list);
249 * Get the list of all registered tasks.
251 * \param s The scheduler instance to get the task list from.
253 * \return The task list.
255 * Each entry of the list contains an identifier which is simply a hex number.
256 * The result is dynamically allocated and must be freed by the caller.
258 char *get_task_list(struct sched *s)
260 struct task *t, *tmp;
263 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
265 tmp_msg = make_message("%s%p\t%s\t%s\n", msg? msg : "", t,
266 t->status == TS_DEAD? "dead" :
267 (t->status == TS_RUNNING? "running" : "zombie"),
276 * Set the notification value of a task.
278 * \param t The task to notify.
279 * \param err A positive error code.
281 * Tasks which honor notifications are supposed to call \ref
282 * task_get_notification() in their post_monitor function and act on the
283 * returned notification value.
285 * If the scheduler detects during its pre_monitor loop that at least one task
286 * has been notified, the loop terminates, and the post_monitor methods of all
287 * taks are immediately called again.
289 * The notification for a task is reset after the call to its post_monitor
292 * \sa \ref task_get_notification().
294 void task_notify(struct task *t, int err)
297 if (t->notification == -err) /* ignore subsequent notifications */
299 PARA_INFO_LOG("notifying task %s: %s\n", t->name, para_strerror(err));
300 t->notification = -err;
304 * Return the notification value of a task.
306 * \param t The task to get the notification value from.
308 * \return The notification value. If this is negative, the task has been
309 * notified by another task. Tasks are supposed to check for notifications by
310 * calling this function from their post_monitor method.
312 * \sa \ref task_notify().
314 int task_get_notification(const struct task *t)
316 return t->notification;
320 * Return the status value of a task.
322 * \param t The task to get the status value from.
324 * \return Zero if task does not exist, one if task is running, negative error
325 * code if task has terminated.
327 int task_status(const struct task *t)
331 if (t->status == TS_DEAD) /* pretend dead tasks don't exist */
333 if (t->status == TS_RUNNING)
339 * Set the notification value of all tasks of a scheduler instance.
341 * \param s The scheduler instance whose tasks should be notified.
342 * \param err A positive error code.
344 * This simply iterates over all existing tasks of \a s and sets each
345 * task's notification value to \p -err.
347 void task_notify_all(struct sched *s, int err)
351 list_for_each_entry(t, &s->task_list, node)
356 * Set the I/O timeout to the minimal possible value.
358 * \param s Pointer to the scheduler struct.
360 * This causes the next poll() call to return immediately.
362 void sched_min_delay(struct sched *s)
368 * Impose an upper bound for the I/O timeout.
370 * \param to Maximal allowed timeout.
371 * \param s Pointer to the scheduler struct.
373 * If the current I/O timeout is already smaller than to, this function does
374 * nothing. Otherwise the timeout is set to the given value.
376 * \sa \ref sched_request_timeout_ms().
378 void sched_request_timeout(struct timeval *to, struct sched *s)
380 long unsigned ms = tv2ms(to);
386 * Bound the I/O timeout to at most the given amount of milliseconds.
388 * \param ms The maximal allowed timeout in milliseconds.
389 * \param s Pointer to the scheduler struct.
391 * Like \ref sched_request_timeout() this imposes an upper bound on the I/O
394 void sched_request_timeout_ms(long unsigned ms, struct sched *s)
398 sched_request_timeout(&tv, s);
402 * Bound the I/O timeout by an absolute time in the future.
404 * \param barrier Defines the upper bound for the timeout.
405 * \param s Pointer to the scheduler struct.
407 * \return If the barrier is in the past, this function does nothing and
408 * returns zero. Otherwise it returns one.
410 * \sa \ref sched_request_barrier_or_min_delay().
412 int sched_request_barrier(struct timeval *barrier, struct sched *s)
416 if (tv_diff(now, barrier, &diff) > 0)
418 sched_request_timeout(&diff, s);
423 * Bound the I/O timeout or request a minimal delay.
425 * \param barrier Absolute time as in \ref sched_request_barrier().
426 * \param s Pointer to the scheduler struct.
428 * \return If the barrier is in the past, this function requests a minimal
429 * timeout and returns zero. Otherwise it returns one.
431 * \sa \ref sched_min_delay(), \ref sched_request_barrier().
433 int sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s)
437 if (tv_diff(now, barrier, &diff) > 0) {
441 sched_request_timeout(&diff, s);
445 static void add_pollfd(int fd, struct sched *s, short events)
450 int flags = fcntl(fd, F_GETFL);
451 if (!(flags & O_NONBLOCK)) {
452 PARA_EMERG_LOG("fd %d is a blocking file descriptor\n", fd);
457 if (s->pidx_array_len > fd) { /* is fd already registered? */
458 if (s->pidx[fd] < s->pfd_array_len) { /* yes, it is */
459 assert(s->pfd[s->pidx[fd]].fd == fd);
460 s->pfd[s->pidx[fd]].events |= events;
463 } else { /* need to extend the index array */
464 unsigned old_len = s->pidx_array_len;
465 while (s->pidx_array_len <= fd)
466 s->pidx_array_len = s->pidx_array_len * 2 + 1;
467 PARA_INFO_LOG("pidx array len: %u\n", s->pidx_array_len);
468 s->pidx = para_realloc(s->pidx,
469 s->pidx_array_len * sizeof(unsigned));
470 memset(s->pidx + old_len, 0xff,
471 (s->pidx_array_len - old_len) * sizeof(unsigned));
474 * The given fd is not part of the pfd array yet. Initialize pidx[fd]
475 * to point at the next unused slot of this array and initialize the
478 s->pidx[fd] = s->num_pfds;
479 if (s->pfd_array_len <= s->num_pfds) {
480 unsigned old_len = s->pfd_array_len;
481 s->pfd_array_len = old_len * 2 + 1;
482 PARA_INFO_LOG("pfd array len: %u\n", s->pfd_array_len);
483 s->pfd = para_realloc(s->pfd,
484 s->pfd_array_len * sizeof(struct pollfd));
485 memset(s->pfd + old_len, 0,
486 (s->pfd_array_len - old_len) * sizeof(struct pollfd));
488 s->pfd[s->num_pfds].fd = fd;
489 s->pfd[s->num_pfds].events = events;
490 s->pfd[s->num_pfds].revents = 0;
495 * Instruct the scheduler to monitor an fd for readiness for reading.
497 * \param fd The file descriptor.
498 * \param s The scheduler.
500 * \sa \ref sched_monitor_writefd().
502 void sched_monitor_readfd(int fd, struct sched *s)
504 add_pollfd(fd, s, POLLIN);
508 * Instruct the scheduler to monitor an fd for readiness for writing.
510 * \param fd The file descriptor.
511 * \param s The scheduler.
513 * \sa \ref sched_monitor_readfd().
515 void sched_monitor_writefd(int fd, struct sched *s)
517 add_pollfd(fd, s, POLLOUT);
520 static int get_revents(int fd, const struct sched *s)
524 if (fd >= s->pidx_array_len)
526 if (s->pidx[fd] >= s->num_pfds)
528 if (s->pfd[s->pidx[fd]].fd != fd)
530 assert((s->pfd[s->pidx[fd]].revents & POLLNVAL) == 0);
531 return s->pfd[s->pidx[fd]].revents;
535 * Check whether there is data to read on the given fd.
537 * To be called from the ->post_monitor() method of a task.
539 * \param fd Should have been monitored with \ref sched_monitor_readfd().
540 * \param s The scheduler instance.
542 * \return True if the file descriptor is ready for reading, false otherwise.
543 * If fd is negative, or has not been monitored in the current iteration of the
544 * scheduler's main loop, the function also returns false.
546 * \sa \ref sched_write_ok().
548 bool sched_read_ok(int fd, const struct sched *s)
550 return get_revents(fd, s) & (POLLIN | POLLERR | POLLHUP);
554 * Check whether writing is possible (i.e., does not block).
556 * \param fd Should have been monitored with \ref sched_monitor_writefd().
557 * \param s The scheduler instance.
559 * \return True if the file descriptor is ready for writing, false otherwise.
560 * The comment in \ref sched_read_ok() about invalid file descriptors applies
561 * to this function as well.
563 bool sched_write_ok(int fd, const struct sched *s)
565 return get_revents(fd, s) & (POLLOUT | POLLERR | POLLHUP);