Merge branch 'maint'
[paraslash.git] / sched.c
1 /*
2 * Copyright (C) 2006-2010 Andre Noll <maan@systemlinux.org>
3 *
4 * Licensed under the GPL v2. For licencing details see COPYING.
5 */
6
7 /** \file sched.c Paraslash's scheduling functions. */
8
9 #include <regex.h>
10 #include <dirent.h> /* readdir() */
11 #include <assert.h>
12 #include <sys/time.h>
13
14 #include "para.h"
15 #include "ipc.h"
16 #include "fd.h"
17 #include "list.h"
18 #include "sched.h"
19 #include "string.h"
20 #include "error.h"
21
22 static struct list_head pre_select_list, post_select_list;
23 static int initialized;
24
25 static struct timeval now_struct;
26 struct timeval *now = &now_struct;
27
28 /*
29 * Remove a task from the scheduler.
30 *
31 * \param t The task to remove.
32 *
33 * If the pre_select pointer of \a t is not \p NULL, it is removed from
34 * the pre_select list of the scheduler. Same goes for \a post_select.
35 */
36 static void unregister_task(struct task *t)
37 {
38 if (!initialized)
39 return;
40 PARA_INFO_LOG("unregistering %s (%s)\n", t->status,
41 t->error <0? para_strerror(-t->error) : "shutdown");
42 if (t->pre_select)
43 list_del(&t->pre_select_node);
44 if (t->post_select)
45 list_del(&t->post_select_node);
46 t->error = -E_TASK_UNREGISTERED;
47 }
48
49
50 static void sched_preselect(struct sched *s)
51 {
52 struct task *t, *tmp;
53 list_for_each_entry_safe(t, tmp, &pre_select_list, pre_select_node) {
54 if (t->error >= 0 && t->pre_select)
55 t->pre_select(s, t);
56 // PARA_INFO_LOG("%s \n", t->status);
57 if (t->error >= 0)
58 continue;
59 /*
60 * We have to check whether the list is empty because the call
61 * to ->pre_select() might have called sched_shutdown(). In
62 * this case t has been unregistered already, so we must not
63 * unregister it again.
64 */
65 if (list_empty(&pre_select_list))
66 return;
67 unregister_task(t);
68 }
69 }
70
71 static void sched_post_select(struct sched *s)
72 {
73 struct task *t, *tmp;
74
75 list_for_each_entry_safe(t, tmp, &post_select_list, post_select_node) {
76 if (t->error >= 0)
77 t->post_select(s, t);
78 // PARA_INFO_LOG("%s: %d\n", t->status, t->ret);
79 if (t->error >= 0)
80 continue;
81 /* nec., see sched_preselect() */
82 if (list_empty(&post_select_list))
83 return;
84 unregister_task(t);
85 }
86 }
87
88 /**
89 * The core function for all paraslash programs.
90 *
91 * \param s Pointer to the scheduler struct.
92 *
93 * This function updates the global \a now pointer, calls all registered
94 * pre_select hooks which may set the timeout and add any file descriptors to
95 * the fd sets of \a s. Next, it calls para_select() and makes the result available
96 * to the registered tasks by calling their post_select hook.
97 *
98 * \return Zero if no more tasks are left in either of the two lists, negative
99 * if para_select returned an error.
100 *
101 * \sa task, now.
102 */
103 int schedule(struct sched *s)
104 {
105 int ret;
106
107 if (!initialized)
108 return -E_NOT_INITIALIZED;
109 if (!s->select_function)
110 s->select_function = para_select;
111 again:
112 FD_ZERO(&s->rfds);
113 FD_ZERO(&s->wfds);
114 s->timeout = s->default_timeout;
115 s->max_fileno = -1;
116 gettimeofday(now, NULL);
117 sched_preselect(s);
118 if (list_empty(&pre_select_list) && list_empty(&post_select_list))
119 return 0;
120 ret = s->select_function(s->max_fileno + 1, &s->rfds, &s->wfds, &s->timeout);
121 if (ret < 0)
122 return ret;
123 if (ret == 0) {
124 /*
125 * APUE: Be careful not to check the descriptor sets on return
126 * unless the return value is greater than zero. The return
127 * state of the descriptor sets is implementation dependent if
128 * either a signal is caught or the timer expires.
129 */
130 FD_ZERO(&s->rfds);
131 FD_ZERO(&s->wfds);
132 }
133 gettimeofday(now, NULL);
134 sched_post_select(s);
135 if (list_empty(&pre_select_list) && list_empty(&post_select_list))
136 return 0;
137 goto again;
138 }
139
140 /*
141 * Initialize the paraslash scheduler.
142 */
143 static void init_sched(void)
144 {
145 PARA_INFO_LOG("initializing scheduler\n");
146 INIT_LIST_HEAD(&pre_select_list);
147 INIT_LIST_HEAD(&post_select_list);
148 initialized = 1;
149 }
150
151 /**
152 * Add a task to the scheduler.
153 *
154 * \param t the task to add
155 *
156 * If the pre_select pointer of \a t is not \p NULL, it is added to
157 * the pre_select list of the scheduler. Same goes for post_select.
158 *
159 * \sa task::pre_select, task::post_select
160 */
161 void register_task(struct task *t)
162 {
163 if (!initialized)
164 init_sched();
165 PARA_INFO_LOG("registering %s (%p)\n", t->status, t);
166 if (t->pre_select) {
167 PARA_DEBUG_LOG("pre_select: %p\n", &t->pre_select);
168 list_add_tail(&t->pre_select_node, &pre_select_list);
169 }
170 if (t->post_select) {
171 PARA_DEBUG_LOG("post_select: %p\n", &t->post_select);
172 list_add_tail(&t->post_select_node, &post_select_list);
173 }
174 }
175
176 /**
177 * Unregister all tasks.
178 *
179 * This will cause \a schedule() to return immediately because both the
180 * \a pre_select_list and the \a post_select_list are empty.
181 */
182 void sched_shutdown(void)
183 {
184 struct task *t, *tmp;
185
186 if (!initialized)
187 return;
188 list_for_each_entry_safe(t, tmp, &pre_select_list, pre_select_node)
189 unregister_task(t);
190 list_for_each_entry_safe(t, tmp, &post_select_list, post_select_node)
191 unregister_task(t);
192 initialized = 0;
193 }
194
195 /**
196 * Get the list of all registered tasks.
197 *
198 * \return The task list.
199 *
200 * Each entry of the list contains an identifier which is simply a hex number
201 * that may be used in \a kill_task() to terminate the task.
202 * The result is dynamically allocated and must be freed by the caller.
203 */
204 char *get_task_list(void)
205 {
206 struct task *t, *tmp;
207 char *msg = NULL;
208
209 if (!initialized)
210 return NULL;
211 list_for_each_entry_safe(t, tmp, &pre_select_list, pre_select_node) {
212 char *tmp_msg;
213 tmp_msg = make_message("%s%p\tpre\t%s\n", msg? msg : "", t, t->status);
214 free(msg);
215 msg = tmp_msg;
216 }
217 list_for_each_entry_safe(t, tmp, &post_select_list, post_select_node) {
218 char *tmp_msg;
219 // if (t->pre_select)
220 // continue;
221 tmp_msg = make_message("%s%p\tpost\t%s\n", msg? msg : "", t, t->status);
222 free(msg);
223 msg = tmp_msg;
224 }
225 //PARA_DEBUG_LOG("task list:\n%s", msg);
226 return msg;
227 }
228
229 /**
230 * Simulate an error for the given task.
231 *
232 * \param id The task identifier.
233 *
234 * Find the task identified by \a id, set the tasks' error value to
235 * \p -E_TASK_KILLED and unregister the task.
236 *
237 * \return Positive on success, negative on errors (e.g. if \a id does not
238 * correspond to a registered task).
239 */
240 int kill_task(char *id)
241 {
242 struct task *t, *tmp;
243 char buf[20];
244
245 if (!initialized)
246 return -E_NOT_INITIALIZED;
247 list_for_each_entry_safe(t, tmp, &pre_select_list, pre_select_node) {
248 sprintf(buf, "%p", t);
249 if (strcmp(id, buf))
250 continue;
251 t->error = -E_TASK_KILLED;
252 return 1;
253 }
254 list_for_each_entry_safe(t, tmp, &post_select_list, post_select_node) {
255 sprintf(buf, "%p", t);
256 if (strcmp(id, buf))
257 continue;
258 t->error = -E_TASK_KILLED;
259 return 1;
260 }
261 return -E_NO_SUCH_TASK;
262 }
263
264 /**
265 * Set the select timeout to the minimal possible value.
266 *
267 * \param s Pointer to the scheduler struct.
268 *
269 * This causes the next select() call to return immediately.
270 */
271 void sched_min_delay(struct sched *s)
272 {
273 s->timeout.tv_sec = 0;
274 s->timeout.tv_usec = 1;
275 }
276
277 /**
278 * Impose an upper bound for the timeout of the next select() call.
279 *
280 * \param timeout Maximal allowed timeout.
281 * \param s Pointer to the scheduler struct.
282 *
283 * If the current scheduler timeout is already smaller than \a timeout, this
284 * function does nothing. Otherwise the timeout for the next select() call is
285 * set to the given value.
286 *
287 * \sa sched_request_timeout_ms().
288 */
289 void sched_request_timeout(struct timeval *timeout, struct sched *s)
290 {
291 if (tv_diff(&s->timeout, timeout, NULL) > 0)
292 s->timeout = *timeout;
293 }
294
295 /**
296 * Force the next select() call to return before the given amount of milliseconds.
297 *
298 * \param ms The maximal allowed timeout in milliseconds.
299 * \param s Pointer to the scheduler struct.
300 *
301 * Like sched_request_timeout() this imposes an upper bound on the timeout
302 * value for the next select() call.
303 */
304 void sched_request_timeout_ms(long unsigned ms, struct sched *s)
305 {
306 struct timeval tv;
307 ms2tv(ms, &tv);
308 sched_request_timeout(&tv, s);
309 }
310
311 /**
312 * Force the next select() call to return before the given future time.
313 *
314 * \param barrier Absolute time before select() should return.
315 * \param s Pointer to the scheduler struct.
316 *
317 * If \a barrier is in the past, this function does nothing.
318 *
319 * \sa sched_request_barrier_or_min_delay().
320 */
321 void sched_request_barrier(struct timeval *barrier, struct sched *s)
322 {
323 struct timeval diff;
324
325 if (tv_diff(now, barrier, &diff) > 0)
326 return;
327 sched_request_timeout(&diff, s);
328 }
329
330 /**
331 * Force the next select() call to return before the given time.
332 *
333 * \param barrier Absolute time before select() should return.
334 * \param s Pointer to the scheduler struct.
335 *
336 * If \a barrier is in the past, this function requests a minimal timeout.
337 *
338 * \sa sched_min_delay(), sched_request_barrier().
339 */
340 void sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s)
341 {
342 struct timeval diff;
343
344 if (tv_diff(now, barrier, &diff) > 0)
345 return sched_min_delay(s);
346 sched_request_timeout(&diff, s);
347 }