task_register() conversion: receivers
[paraslash.git] / sched.c
1 /*
2 * Copyright (C) 2006-2014 Andre Noll <maan@systemlinux.org>
3 *
4 * Licensed under the GPL v2. For licencing details see COPYING.
5 */
6
7 /** \file sched.c Paraslash's scheduling functions. */
8
9 #include <regex.h>
10 #include <assert.h>
11
12 #include "para.h"
13 #include "ipc.h"
14 #include "fd.h"
15 #include "list.h"
16 #include "sched.h"
17 #include "string.h"
18 #include "time.h"
19 #include "error.h"
20
21 static struct timeval now_struct;
22 struct timeval *now = &now_struct;
23
24 static inline bool timeout_is_zero(struct sched *s)
25 {
26 struct timeval *tv = &s->select_timeout;
27 return tv->tv_sec == 0 && tv->tv_usec == 0;
28 }
29
30 static void sched_preselect(struct sched *s)
31 {
32 struct task *t, *tmp;
33
34 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
35 if (t->error < 0)
36 continue;
37 if (t->notification != 0)
38 sched_min_delay(s);
39 if (t->pre_select)
40 t->pre_select(s, t);
41 }
42 }
43
44 static void unlink_and_free_task(struct task *t)
45 {
46 PARA_INFO_LOG("freeing task %s\n", t->status);
47 list_del(&t->node);
48 if (t->owned_by_sched)
49 free(t);
50 }
51
52 //#define SCHED_DEBUG 1
53 static inline void call_post_select(struct sched *s, struct task *t)
54 {
55 #ifndef SCHED_DEBUG
56 t->error = t->post_select(s, t);
57 #else
58 struct timeval t1, t2, diff;
59 unsigned long pst;
60
61 clock_get_realtime(&t1);
62 t->error = t->post_select(s, t);
63 clock_get_realtime(&t2);
64 tv_diff(&t1, &t2, &diff);
65 pst = tv2ms(&diff);
66 if (pst > 50)
67 PARA_WARNING_LOG("%s: post_select time: %lums\n",
68 t->status, pst);
69 #endif
70 }
71
72 static unsigned sched_post_select(struct sched *s)
73 {
74 struct task *t, *tmp;
75 unsigned num_running_tasks = 0;
76
77 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
78 if (t->error < 0) {
79 if (t->dead) /* task has been reaped */
80 unlink_and_free_task(t);
81 continue;
82 }
83 call_post_select(s, t);
84 t->notification = 0;
85 if (t->error < 0) {
86 if (!t->owned_by_sched)
87 list_del(&t->node);
88 } else
89 num_running_tasks++;
90 }
91 return num_running_tasks;
92 }
93
94 /**
95 * The core function of all paraslash programs.
96 *
97 * \param s Pointer to the scheduler struct.
98 *
99 * This function updates the global \a now pointer, calls all registered
100 * pre_select hooks which may set the timeout and add any file descriptors to
101 * the fd sets of \a s. Next, it calls para_select() and makes the result available
102 * to the registered tasks by calling their post_select hook.
103 *
104 * \return Zero if no more tasks are left in the task list, negative if the
105 * select function returned an error.
106 *
107 * \sa \ref task, \ref now.
108 */
109 int schedule(struct sched *s)
110 {
111 int ret;
112 unsigned num_running_tasks;
113
114 if (!s->select_function)
115 s->select_function = para_select;
116 again:
117 FD_ZERO(&s->rfds);
118 FD_ZERO(&s->wfds);
119 s->select_timeout = s->default_timeout;
120 s->max_fileno = -1;
121 clock_get_realtime(now);
122 sched_preselect(s);
123 ret = s->select_function(s->max_fileno + 1, &s->rfds, &s->wfds,
124 &s->select_timeout);
125 if (ret < 0)
126 return ret;
127 if (ret == 0) {
128 /*
129 * APUE: Be careful not to check the descriptor sets on return
130 * unless the return value is greater than zero. The return
131 * state of the descriptor sets is implementation dependent if
132 * either a signal is caught or the timer expires.
133 */
134 FD_ZERO(&s->rfds);
135 FD_ZERO(&s->wfds);
136 }
137 clock_get_realtime(now);
138 num_running_tasks = sched_post_select(s);
139 if (num_running_tasks == 0)
140 return 0;
141 goto again;
142 }
143
144 /**
145 * Obtain the error status of a task and deallocate its resources.
146 *
147 * \param tptr Identifies the task to reap.
148 *
149 * This function is similar to wait(2) in that it returns information about a
150 * terminated task and allows to release the resources associated with the
151 * task. Until this function is called, the terminated task remains in a zombie
152 * state.
153 *
154 * \return If \a tptr is \p NULL, or \a *tptr is \p NULL, the function does
155 * nothing and returns zero. Otherwise, it is checked whether the task
156 * identified by \a tptr is still running. If it is, the function returns zero
157 * and again, no action is taken. Otherwise the (negative) error code of the
158 * terminated task is returned and \a *tptr is set to \p NULL. The task will
159 * then be removed removed from the scheduler task list.
160 *
161 * \sa \ref sched_shutdown(), wait(2).
162 */
163 int task_reap(struct task **tptr)
164 {
165 struct task *t;
166
167 if (!tptr)
168 return 0;
169 t = *tptr;
170 if (!t)
171 return 0;
172 if (!t->owned_by_sched)
173 return 0;
174 if (t->error >= 0)
175 return 0;
176 if (t->dead) /* will be freed in sched_post_select() */
177 return 0;
178 /*
179 * With list_for_each_entry_safe() it is only safe to remove the
180 * _current_ list item. Since we are being called from the loop in
181 * schedule() via some task's ->post_select() function, freeing the
182 * given task here would result in use-after-free bugs in schedule().
183 * So we only set t->dead which tells schedule() to free the task in
184 * the next iteration of its loop.
185 */
186 t->dead = true;
187 *tptr = NULL;
188 return t->error;
189 }
190
191 /**
192 * Deallocate all resources of all tasks of a scheduler instance.
193 *
194 * \param s The scheduler instance.
195 *
196 * This should only be called after \ref schedule() has returned.
197 */
198 void sched_shutdown(struct sched *s)
199 {
200 struct task *t, *tmp;
201
202 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
203 if (t->error >= 0)
204 /* The task list should contain only terminated tasks. */
205 PARA_WARNING_LOG("shutting down running task %s\n",
206 t->status);
207 unlink_and_free_task(t);
208 }
209 }
210
211 /**
212 * Add a task to the scheduler. Deprecated.
213 *
214 * \param t The task to add.
215 * \param s The scheduler instance to add the task to.
216 *
217 * \sa task::pre_select, task::post_select
218 */
219 void register_task(struct sched *s, struct task *t)
220 {
221 PARA_INFO_LOG("registering %s (%p)\n", t->status, t);
222 assert(t->post_select);
223 t->notification = 0;
224 t->owned_by_sched = false;
225 if (!s->task_list.next)
226 INIT_LIST_HEAD(&s->task_list);
227 list_add_tail(&t->node, &s->task_list);
228 }
229
230 /**
231 * Add a task to the scheduler task list.
232 *
233 * \param info Task information supplied by the caller.
234 * \param s The scheduler instance.
235 *
236 * \return A pointer to a newly allocated task structure. It will be
237 * freed by sched_shutdown().
238 */
239 struct task *task_register(struct task_info *info, struct sched *s)
240 {
241 struct task *t = para_malloc(sizeof(*t));
242
243 assert(info->post_select);
244
245 if (!s->task_list.next)
246 INIT_LIST_HEAD(&s->task_list);
247
248 snprintf(t->status, sizeof(t->status) - 1, "%s", info->name);
249 t->status[sizeof(t->status) - 1] = '\0';
250 t->notification = 0;
251 t->error = 0;
252 t->dead = false;
253 t->pre_select = info->pre_select;
254 t->post_select = info->post_select;
255 t->context = info->context;
256 t->owned_by_sched = true;
257 list_add_tail(&t->node, &s->task_list);
258 return t;
259 }
260
261 /**
262 * Obtain the context pointer of a task.
263 *
264 * \param t Return this task's context pointer.
265 *
266 * \return A pointer to the memory location specified previously as \a
267 * task_info->context when the task was registered with \ref task_register().
268 */
269 void *task_context(struct task *t)
270 {
271 assert(t->owned_by_sched);
272 return t->context;
273 }
274
275 /**
276 * Get the list of all registered tasks.
277 *
278 * \param s The scheduler instance to get the task list from.
279 *
280 * \return The task list.
281 *
282 * Each entry of the list contains an identifier which is simply a hex number.
283 * The result is dynamically allocated and must be freed by the caller.
284 */
285 char *get_task_list(struct sched *s)
286 {
287 struct task *t, *tmp;
288 char *msg = NULL;
289
290 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
291 char *tmp_msg;
292 tmp_msg = make_message("%s%p\t%s\t%s\n", msg? msg : "", t,
293 t->error < 0? (t->dead? "dead" : "zombie") : "running",
294 t->status);
295 free(msg);
296 msg = tmp_msg;
297 }
298 return msg;
299 }
300
301 /**
302 * Set the notification value of a task.
303 *
304 * \param t The task to notify.
305 * \param err A positive error code.
306 *
307 * Tasks which honor notifications are supposed to call \ref
308 * task_get_notification() in their post_select function and act on the
309 * returned notification value.
310 *
311 * If the scheduler detects during its pre_select loop that at least one task
312 * has been notified, the loop terminates, and the post_select methods of all
313 * taks are immediately called again.
314 *
315 * The notification for a task is reset after the call to its post_select
316 * method.
317 *
318 * \sa \ref task_get_notification().
319 */
320 void task_notify(struct task *t, int err)
321 {
322 assert(err > 0);
323 if (t->notification == -err) /* ignore subsequent notifications */
324 return;
325 PARA_INFO_LOG("notifying task %s: %s\n", t->status, para_strerror(err));
326 t->notification = -err;
327 }
328
329 /**
330 * Return the notification value of a task.
331 *
332 * \param t The task to get the notification value from.
333 *
334 * \return The notification value. If this is negative, the task has been
335 * notified by another task. Tasks are supposed to check for notifications by
336 * calling this function from their post_select method.
337 *
338 * \sa \ref task_notify().
339 */
340 int task_get_notification(const struct task *t)
341 {
342 return t->notification;
343 }
344
345 /**
346 * Set the notification value of all tasks of a scheduler instance.
347 *
348 * \param s The scheduler instance whose tasks should be notified.
349 * \param err A positive error code.
350 *
351 * This simply iterates over all existing tasks of \a s and sets each
352 * task's notification value to \p -err.
353 */
354 void task_notify_all(struct sched *s, int err)
355 {
356 struct task *t;
357
358 list_for_each_entry(t, &s->task_list, node)
359 task_notify(t, err);
360 }
361
362 /**
363 * Set the select timeout to the minimal possible value.
364 *
365 * \param s Pointer to the scheduler struct.
366 *
367 * This causes the next select() call to return immediately.
368 */
369 void sched_min_delay(struct sched *s)
370 {
371 s->select_timeout.tv_sec = s->select_timeout.tv_usec = 0;
372 }
373
374 /**
375 * Impose an upper bound for the timeout of the next select() call.
376 *
377 * \param to Maximal allowed timeout.
378 * \param s Pointer to the scheduler struct.
379 *
380 * If the current scheduler timeout is already smaller than \a to, this
381 * function does nothing. Otherwise the timeout for the next select() call is
382 * set to the given value.
383 *
384 * \sa sched_request_timeout_ms().
385 */
386 void sched_request_timeout(struct timeval *to, struct sched *s)
387 {
388 if (tv_diff(&s->select_timeout, to, NULL) > 0)
389 s->select_timeout = *to;
390 }
391
392 /**
393 * Force the next select() call to return before the given amount of milliseconds.
394 *
395 * \param ms The maximal allowed timeout in milliseconds.
396 * \param s Pointer to the scheduler struct.
397 *
398 * Like sched_request_timeout() this imposes an upper bound on the timeout
399 * value for the next select() call.
400 */
401 void sched_request_timeout_ms(long unsigned ms, struct sched *s)
402 {
403 struct timeval tv;
404 ms2tv(ms, &tv);
405 sched_request_timeout(&tv, s);
406 }
407
408 /**
409 * Force the next select() call to return before the given future time.
410 *
411 * \param barrier Absolute time before select() should return.
412 * \param s Pointer to the scheduler struct.
413 *
414 * \return If \a barrier is in the past, this function does nothing and returns
415 * zero. Otherwise it returns one.
416 *
417 * \sa sched_request_barrier_or_min_delay().
418 */
419 int sched_request_barrier(struct timeval *barrier, struct sched *s)
420 {
421 struct timeval diff;
422
423 if (tv_diff(now, barrier, &diff) > 0)
424 return 0;
425 sched_request_timeout(&diff, s);
426 return 1;
427 }
428
429 /**
430 * Force the next select() call to return before the given time.
431 *
432 * \param barrier Absolute time before select() should return.
433 * \param s Pointer to the scheduler struct.
434 *
435 * \return If \a barrier is in the past, this function requests a minimal
436 * timeout and returns zero. Otherwise it returns one.
437 *
438 * \sa sched_min_delay(), sched_request_barrier().
439 */
440 int sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s)
441 {
442 struct timeval diff;
443
444 if (tv_diff(now, barrier, &diff) > 0) {
445 sched_min_delay(s);
446 return 0;
447 }
448 sched_request_timeout(&diff, s);
449 return 1;
450 }