#include "net.h"
#include "server.h"
#include "list.h"
-#include "send.h"
#include "sched.h"
+#include "send.h"
#include "vss.h"
#include "config.h"
#include "close_on_fork.h"
kill(afs_pid, SIGHUP);
}
-static int signal_post_select(struct sched *s, __a_unused void *context)
+static int signal_post_monitor(struct sched *s, __a_unused void *context)
{
int ret, signum;
ret = task_get_notification(signal_task->task);
if (ret < 0)
return ret;
- signum = para_next_signal(&s->rfds);
+ signum = para_next_signal();
switch (signum) {
case 0:
return 0;
add_close_on_fork_list(signal_task->fd);
signal_task->task = task_register(&(struct task_info) {
.name = "signal",
- .pre_select = signal_pre_select,
- .post_select = signal_post_select,
+ .pre_monitor = signal_pre_monitor,
+ .post_monitor = signal_post_monitor,
.context = signal_task,
}, &sched);
}
-static void command_pre_select(struct sched *s, void *context)
+static void command_pre_monitor(struct sched *s, void *context)
{
unsigned n;
struct server_command_task *sct = context;
for (n = 0; n < sct->num_listen_fds; n++)
- para_fd_set(sct->listen_fds[n], &s->rfds, &s->max_fileno);
+ sched_monitor_readfd(sct->listen_fds[n], s);
}
static int command_task_accept(unsigned listen_idx, struct sched *s,
pid_t child_pid;
uint32_t *chunk_table;
- ret = para_accept(sct->listen_fds[listen_idx], &s->rfds, NULL, 0, &new_fd);
+ ret = para_accept(sct->listen_fds[listen_idx], NULL, 0, &new_fd);
if (ret <= 0)
goto out;
mmd->num_connects++;
/*
* After we return, the scheduler calls server_select() with a minimal
* timeout value, because the remaining tasks have a notification
- * pending. Next it calls the ->post_select method of these tasks,
+ * pending. Next it calls the ->post_monitor method of these tasks,
* which will return negative in view of the notification. This causes
* schedule() to return as there are no more runnable tasks.
*
* Note that semaphores are not inherited across a fork(), so we don't
- * hold the lock at this point. Since server_select() drops the lock
- * prior to calling para_select(), we need to acquire it here.
+ * hold the lock at this point. Since server_poll() drops the lock
+ * prior to calling poll(), we need to acquire it here.
*/
mutex_lock(mmd_mutex);
return -E_CHILD_CONTEXT;
return 0;
}
-static int command_post_select(struct sched *s, void *context)
+static int command_post_monitor(struct sched *s, void *context)
{
struct server_command_task *sct = context;
unsigned n;
sct->task = task_register(&(struct task_info) {
.name = "server command",
- .pre_select = command_pre_select,
- .post_select = command_post_select,
+ .pre_monitor = command_pre_monitor,
+ .post_monitor = command_post_monitor,
.context = sct,
}, &sched);
/*
killpg(0, SIGUSR1);
}
-static int server_select(int max_fileno, fd_set *readfds, fd_set *writefds,
- int timeout)
+static int server_poll(struct pollfd *fds, nfds_t nfds, int timeout)
{
int ret;
status_refresh();
mutex_unlock(mmd_mutex);
- ret = para_select(max_fileno + 1, readfds, writefds, timeout);
+ ret = xpoll(fds, nfds, timeout);
mutex_lock(mmd_mutex);
return ret;
}
*sct = &server_command_task_struct;
sched.default_timeout = 1000;
- sched.select_function = server_select;
+ sched.poll_function = server_poll;
server_init(argc, argv, sct);
mutex_lock(mmd_mutex);
ret = schedule(&sched);
/*
- * We hold the mmd lock: it was re-acquired in server_select()
- * after the select call.
+ * We hold the mmd lock: it was re-acquired in server_poll()
+ * after the poll(2) call.
*/
mutex_unlock(mmd_mutex);
sched_shutdown(&sched);