sched: Introduce alternative task API.
[paraslash.git] / sched.c
1 /*
2 * Copyright (C) 2006-2014 Andre Noll <maan@systemlinux.org>
3 *
4 * Licensed under the GPL v2. For licencing details see COPYING.
5 */
6
7 /** \file sched.c Paraslash's scheduling functions. */
8
9 #include <regex.h>
10 #include <assert.h>
11
12 #include "para.h"
13 #include "ipc.h"
14 #include "fd.h"
15 #include "list.h"
16 #include "sched.h"
17 #include "string.h"
18 #include "time.h"
19 #include "error.h"
20
21 static struct timeval now_struct;
22 struct timeval *now = &now_struct;
23
24 static inline bool timeout_is_zero(struct sched *s)
25 {
26 struct timeval *tv = &s->select_timeout;
27 return tv->tv_sec == 0 && tv->tv_usec == 0;
28 }
29
30 static void sched_preselect(struct sched *s)
31 {
32 struct task *t, *tmp;
33
34 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
35 if (t->error < 0)
36 continue;
37 if (t->notification != 0)
38 sched_min_delay(s);
39 if (t->pre_select)
40 t->pre_select(s, t);
41 }
42 }
43
44 static void unlink_and_free_task(struct task *t)
45 {
46 PARA_INFO_LOG("freeing task %s\n", t->status);
47 list_del(&t->node);
48 if (t->owned_by_sched)
49 free(t);
50 }
51
52 //#define SCHED_DEBUG 1
53 static inline void call_post_select(struct sched *s, struct task *t)
54 {
55 #ifndef SCHED_DEBUG
56 t->error = t->post_select(s, t);
57 #else
58 struct timeval t1, t2, diff;
59 unsigned long pst;
60
61 clock_get_realtime(&t1);
62 t->error = t->post_select(s, t);
63 clock_get_realtime(&t2);
64 tv_diff(&t1, &t2, &diff);
65 pst = tv2ms(&diff);
66 if (pst > 50)
67 PARA_WARNING_LOG("%s: post_select time: %lums\n",
68 t->status, pst);
69 #endif
70 }
71
72 static unsigned sched_post_select(struct sched *s)
73 {
74 struct task *t, *tmp;
75 unsigned num_running_tasks = 0;
76
77 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
78 if (t->error < 0)
79 continue;
80 call_post_select(s, t);
81 t->notification = 0;
82 if (t->error < 0) {
83 if (!t->owned_by_sched)
84 list_del(&t->node);
85 } else
86 num_running_tasks++;
87 }
88 return num_running_tasks;
89 }
90
91 /**
92 * The core function of all paraslash programs.
93 *
94 * \param s Pointer to the scheduler struct.
95 *
96 * This function updates the global \a now pointer, calls all registered
97 * pre_select hooks which may set the timeout and add any file descriptors to
98 * the fd sets of \a s. Next, it calls para_select() and makes the result available
99 * to the registered tasks by calling their post_select hook.
100 *
101 * \return Zero if no more tasks are left in the task list, negative if the
102 * select function returned an error.
103 *
104 * \sa \ref task, \ref now.
105 */
106 int schedule(struct sched *s)
107 {
108 int ret;
109 unsigned num_running_tasks;
110
111 if (!s->select_function)
112 s->select_function = para_select;
113 again:
114 FD_ZERO(&s->rfds);
115 FD_ZERO(&s->wfds);
116 s->select_timeout = s->default_timeout;
117 s->max_fileno = -1;
118 clock_get_realtime(now);
119 sched_preselect(s);
120 ret = s->select_function(s->max_fileno + 1, &s->rfds, &s->wfds,
121 &s->select_timeout);
122 if (ret < 0)
123 return ret;
124 if (ret == 0) {
125 /*
126 * APUE: Be careful not to check the descriptor sets on return
127 * unless the return value is greater than zero. The return
128 * state of the descriptor sets is implementation dependent if
129 * either a signal is caught or the timer expires.
130 */
131 FD_ZERO(&s->rfds);
132 FD_ZERO(&s->wfds);
133 }
134 clock_get_realtime(now);
135 num_running_tasks = sched_post_select(s);
136 if (num_running_tasks == 0)
137 return 0;
138 goto again;
139 }
140
141 /**
142 * Deallocate all resources of all tasks of a scheduler instance.
143 *
144 * \param s The scheduler instance.
145 *
146 * This should only be called after \ref schedule() has returned.
147 */
148 void sched_shutdown(struct sched *s)
149 {
150 struct task *t, *tmp;
151
152 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
153 if (t->error >= 0)
154 /* The task list should contain only terminated tasks. */
155 PARA_WARNING_LOG("shutting down running task %s\n",
156 t->status);
157 unlink_and_free_task(t);
158 }
159 }
160
161 /**
162 * Add a task to the scheduler. Deprecated.
163 *
164 * \param t The task to add.
165 * \param s The scheduler instance to add the task to.
166 *
167 * \sa task::pre_select, task::post_select
168 */
169 void register_task(struct sched *s, struct task *t)
170 {
171 PARA_INFO_LOG("registering %s (%p)\n", t->status, t);
172 assert(t->post_select);
173 t->notification = 0;
174 t->owned_by_sched = false;
175 if (!s->task_list.next)
176 INIT_LIST_HEAD(&s->task_list);
177 list_add_tail(&t->node, &s->task_list);
178 }
179
180 /**
181 * Add a task to the scheduler task list.
182 *
183 * \param info Task information supplied by the caller.
184 * \param s The scheduler instance.
185 *
186 * \return A pointer to a newly allocated task structure. It will be
187 * freed by sched_shutdown().
188 */
189 struct task *task_register(struct task_info *info, struct sched *s)
190 {
191 struct task *t = para_malloc(sizeof(*t));
192
193 assert(info->post_select);
194
195 if (!s->task_list.next)
196 INIT_LIST_HEAD(&s->task_list);
197
198 snprintf(t->status, sizeof(t->status) - 1, "%s", info->name);
199 t->status[sizeof(t->status) - 1] = '\0';
200 t->notification = 0;
201 t->error = 0;
202 t->pre_select = info->pre_select;
203 t->post_select = info->post_select;
204 t->context = info->context;
205 t->owned_by_sched = true;
206 list_add_tail(&t->node, &s->task_list);
207 return t;
208 }
209
210 /**
211 * Obtain the context pointer of a task.
212 *
213 * \param t Return this task's context pointer.
214 *
215 * \return A pointer to the memory location specified previously as \a
216 * task_info->context when the task was registered with \ref task_register().
217 */
218 void *task_context(struct task *t)
219 {
220 assert(t->owned_by_sched);
221 return t->context;
222 }
223
224 /**
225 * Get the list of all registered tasks.
226 *
227 * \param s The scheduler instance to get the task list from.
228 *
229 * \return The task list.
230 *
231 * Each entry of the list contains an identifier which is simply a hex number.
232 * The result is dynamically allocated and must be freed by the caller.
233 */
234 char *get_task_list(struct sched *s)
235 {
236 struct task *t, *tmp;
237 char *msg = NULL;
238
239 list_for_each_entry_safe(t, tmp, &s->task_list, node) {
240 char *tmp_msg;
241 tmp_msg = make_message("%s%p\t%s\t%s\n", msg? msg : "", t,
242 t->error < 0? "zombie" : "running",
243 t->status);
244 free(msg);
245 msg = tmp_msg;
246 }
247 return msg;
248 }
249
250 /**
251 * Set the notification value of a task.
252 *
253 * \param t The task to notify.
254 * \param err A positive error code.
255 *
256 * Tasks which honor notifications are supposed to call \ref
257 * task_get_notification() in their post_select function and act on the
258 * returned notification value.
259 *
260 * If the scheduler detects during its pre_select loop that at least one task
261 * has been notified, the loop terminates, and the post_select methods of all
262 * taks are immediately called again.
263 *
264 * The notification for a task is reset after the call to its post_select
265 * method.
266 *
267 * \sa \ref task_get_notification().
268 */
269 void task_notify(struct task *t, int err)
270 {
271 assert(err > 0);
272 if (t->notification == -err) /* ignore subsequent notifications */
273 return;
274 PARA_INFO_LOG("notifying task %s: %s\n", t->status, para_strerror(err));
275 t->notification = -err;
276 }
277
278 /**
279 * Return the notification value of a task.
280 *
281 * \param t The task to get the notification value from.
282 *
283 * \return The notification value. If this is negative, the task has been
284 * notified by another task. Tasks are supposed to check for notifications by
285 * calling this function from their post_select method.
286 *
287 * \sa \ref task_notify().
288 */
289 int task_get_notification(const struct task *t)
290 {
291 return t->notification;
292 }
293
294 /**
295 * Set the notification value of all tasks of a scheduler instance.
296 *
297 * \param s The scheduler instance whose tasks should be notified.
298 * \param err A positive error code.
299 *
300 * This simply iterates over all existing tasks of \a s and sets each
301 * task's notification value to \p -err.
302 */
303 void task_notify_all(struct sched *s, int err)
304 {
305 struct task *t;
306
307 list_for_each_entry(t, &s->task_list, node)
308 task_notify(t, err);
309 }
310
311 /**
312 * Set the select timeout to the minimal possible value.
313 *
314 * \param s Pointer to the scheduler struct.
315 *
316 * This causes the next select() call to return immediately.
317 */
318 void sched_min_delay(struct sched *s)
319 {
320 s->select_timeout.tv_sec = s->select_timeout.tv_usec = 0;
321 }
322
323 /**
324 * Impose an upper bound for the timeout of the next select() call.
325 *
326 * \param to Maximal allowed timeout.
327 * \param s Pointer to the scheduler struct.
328 *
329 * If the current scheduler timeout is already smaller than \a to, this
330 * function does nothing. Otherwise the timeout for the next select() call is
331 * set to the given value.
332 *
333 * \sa sched_request_timeout_ms().
334 */
335 void sched_request_timeout(struct timeval *to, struct sched *s)
336 {
337 if (tv_diff(&s->select_timeout, to, NULL) > 0)
338 s->select_timeout = *to;
339 }
340
341 /**
342 * Force the next select() call to return before the given amount of milliseconds.
343 *
344 * \param ms The maximal allowed timeout in milliseconds.
345 * \param s Pointer to the scheduler struct.
346 *
347 * Like sched_request_timeout() this imposes an upper bound on the timeout
348 * value for the next select() call.
349 */
350 void sched_request_timeout_ms(long unsigned ms, struct sched *s)
351 {
352 struct timeval tv;
353 ms2tv(ms, &tv);
354 sched_request_timeout(&tv, s);
355 }
356
357 /**
358 * Force the next select() call to return before the given future time.
359 *
360 * \param barrier Absolute time before select() should return.
361 * \param s Pointer to the scheduler struct.
362 *
363 * \return If \a barrier is in the past, this function does nothing and returns
364 * zero. Otherwise it returns one.
365 *
366 * \sa sched_request_barrier_or_min_delay().
367 */
368 int sched_request_barrier(struct timeval *barrier, struct sched *s)
369 {
370 struct timeval diff;
371
372 if (tv_diff(now, barrier, &diff) > 0)
373 return 0;
374 sched_request_timeout(&diff, s);
375 return 1;
376 }
377
378 /**
379 * Force the next select() call to return before the given time.
380 *
381 * \param barrier Absolute time before select() should return.
382 * \param s Pointer to the scheduler struct.
383 *
384 * \return If \a barrier is in the past, this function requests a minimal
385 * timeout and returns zero. Otherwise it returns one.
386 *
387 * \sa sched_min_delay(), sched_request_barrier().
388 */
389 int sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s)
390 {
391 struct timeval diff;
392
393 if (tv_diff(now, barrier, &diff) > 0) {
394 sched_min_delay(s);
395 return 0;
396 }
397 sched_request_timeout(&diff, s);
398 return 1;
399 }