X-Git-Url: http://git.tuebingen.mpg.de/?p=paraslash.git;a=blobdiff_plain;f=sched.c;h=eb8d03c2bd99fdd1990d307aec977040c2611909;hp=261809c1d64aa735e8d3324a783806477d5148b0;hb=HEAD;hpb=e3a7e12639c34fd86d48a072beb48add8c498d09 diff --git a/sched.c b/sched.c index 261809c1..20822038 100644 --- a/sched.c +++ b/sched.c @@ -62,10 +62,6 @@ static void sched_pre_monitor(struct sched *s) static void unlink_and_free_task(struct task *t) { - PARA_INFO_LOG("freeing task %s (%s)\n", t->name, t->status < 0? - para_strerror(-t->status) : - (t->status == TS_DEAD? "[dead]" : "[running]")); - list_del(&t->node); free(t->name); free(t); @@ -117,13 +113,13 @@ static unsigned sched_post_monitor(struct sched *s) * * \param s Pointer to the scheduler struct. * - * This function updates the global \a now pointer, calls all registered + * This function updates the global now pointer, calls all registered * pre_monitor hooks which may set the timeout and add any file descriptors to - * the fd sets of \a s. Next, it calls para_select() and makes the result + * the pollfd array. Next, it calls the poll function and makes the result * available to the registered tasks by calling their post_monitor hook. * * \return Zero if no more tasks are left in the task list, negative if the - * select function returned an error. + * poll function returned an error. * * \sa \ref now. */ @@ -132,29 +128,18 @@ int schedule(struct sched *s) int ret; unsigned num_running_tasks; - if (!s->select_function) - s->select_function = para_select; + if (!s->poll_function) + s->poll_function = xpoll; again: - FD_ZERO(&s->rfds); - FD_ZERO(&s->wfds); + s->num_pfds = 0; + if (s->pidx) + memset(s->pidx, 0xff, s->pidx_array_len * sizeof(unsigned)); s->timeout = s->default_timeout; - s->max_fileno = -1; clock_get_realtime(&now_struct); sched_pre_monitor(s); - ret = s->select_function(s->max_fileno + 1, &s->rfds, &s->wfds, - s->timeout); + ret = s->poll_function(s->pfd, s->num_pfds, s->timeout); if (ret < 0) return ret; - if (ret == 0) { - /* - * APUE: Be careful not to check the descriptor sets on return - * unless the return value is greater than zero. The return - * state of the descriptor sets is implementation dependent if - * either a signal is caught or the timer expires. - */ - FD_ZERO(&s->rfds); - FD_ZERO(&s->wfds); - } clock_get_realtime(&now_struct); num_running_tasks = sched_post_monitor(s); if (num_running_tasks == 0) @@ -194,6 +179,7 @@ int task_reap(struct task **tptr) if (t->status >= 0) return 0; ret = t->status; + PARA_INFO_LOG("reaping %s: %s\n", t->name, para_strerror(-ret)); /* * With list_for_each_entry_safe() it is only safe to remove the * _current_ list item. Since we are being called from the loop in @@ -226,6 +212,8 @@ void sched_shutdown(struct sched *s) t->name); unlink_and_free_task(t); } + free(s->pfd); + free(s->pidx); } /** @@ -239,7 +227,7 @@ void sched_shutdown(struct sched *s) */ struct task *task_register(struct task_info *info, struct sched *s) { - struct task *t = para_malloc(sizeof(*t)); + struct task *t = alloc(sizeof(*t)); assert(info->post_monitor); @@ -362,11 +350,11 @@ void task_notify_all(struct sched *s, int err) } /** - * Set the select timeout to the minimal possible value. + * Set the I/O timeout to the minimal possible value. * * \param s Pointer to the scheduler struct. * - * This causes the next select() call to return immediately. + * This causes the next poll() call to return immediately. */ void sched_min_delay(struct sched *s) { @@ -374,14 +362,13 @@ void sched_min_delay(struct sched *s) } /** - * Impose an upper bound for the timeout of the next select() call. + * Impose an upper bound for the I/O timeout. * * \param to Maximal allowed timeout. * \param s Pointer to the scheduler struct. * - * If the current scheduler timeout is already smaller than \a to, this - * function does nothing. Otherwise the timeout for the next select() call is - * set to the given value. + * If the current I/O timeout is already smaller than to, this function does + * nothing. Otherwise the timeout is set to the given value. * * \sa \ref sched_request_timeout_ms(). */ @@ -393,13 +380,13 @@ void sched_request_timeout(struct timeval *to, struct sched *s) } /** - * Force the next select() call to return before the given amount of milliseconds. + * Bound the I/O timeout to at most the given amount of milliseconds. * * \param ms The maximal allowed timeout in milliseconds. * \param s Pointer to the scheduler struct. * - * Like sched_request_timeout() this imposes an upper bound on the timeout - * value for the next select() call. + * Like \ref sched_request_timeout() this imposes an upper bound on the I/O + * timeout. */ void sched_request_timeout_ms(long unsigned ms, struct sched *s) { @@ -409,13 +396,13 @@ void sched_request_timeout_ms(long unsigned ms, struct sched *s) } /** - * Force the next select() call to return before the given future time. + * Bound the I/O timeout by an absolute time in the future. * - * \param barrier Absolute time before select() should return. + * \param barrier Defines the upper bound for the timeout. * \param s Pointer to the scheduler struct. * - * \return If \a barrier is in the past, this function does nothing and returns - * zero. Otherwise it returns one. + * \return If the barrier is in the past, this function does nothing and + * returns zero. Otherwise it returns one. * * \sa \ref sched_request_barrier_or_min_delay(). */ @@ -430,12 +417,12 @@ int sched_request_barrier(struct timeval *barrier, struct sched *s) } /** - * Force the next select() call to return before the given time. + * Bound the I/O timeout or request a minimal delay. * - * \param barrier Absolute time before select() should return. + * \param barrier Absolute time as in \ref sched_request_barrier(). * \param s Pointer to the scheduler struct. * - * \return If \a barrier is in the past, this function requests a minimal + * \return If the barrier is in the past, this function requests a minimal * timeout and returns zero. Otherwise it returns one. * * \sa \ref sched_min_delay(), \ref sched_request_barrier(). @@ -452,9 +439,9 @@ int sched_request_barrier_or_min_delay(struct timeval *barrier, struct sched *s) return 1; } -static void sched_fd_set(int fd, fd_set *fds, int *max_fileno) +static void add_pollfd(int fd, struct sched *s, short events) { - assert(fd >= 0 && fd < FD_SETSIZE); + assert(fd >= 0); #if 0 { int flags = fcntl(fd, F_GETFL); @@ -464,8 +451,41 @@ static void sched_fd_set(int fd, fd_set *fds, int *max_fileno) } } #endif - FD_SET(fd, fds); - *max_fileno = PARA_MAX(*max_fileno, fd); + if (s->pidx_array_len > fd) { /* is fd already registered? */ + if (s->pidx[fd] < s->pfd_array_len) { /* yes, it is */ + assert(s->pfd[s->pidx[fd]].fd == fd); + s->pfd[s->pidx[fd]].events |= events; + return; + } + } else { /* need to extend the index array */ + unsigned old_len = s->pidx_array_len; + while (s->pidx_array_len <= fd) + s->pidx_array_len = s->pidx_array_len * 2 + 1; + PARA_INFO_LOG("pidx array len: %u\n", s->pidx_array_len); + s->pidx = para_realloc(s->pidx, + s->pidx_array_len * sizeof(unsigned)); + memset(s->pidx + old_len, 0xff, + (s->pidx_array_len - old_len) * sizeof(unsigned)); + } + /* + * The given fd is not part of the pfd array yet. Initialize pidx[fd] + * to point at the next unused slot of this array and initialize the + * slot. + */ + s->pidx[fd] = s->num_pfds; + if (s->pfd_array_len <= s->num_pfds) { + unsigned old_len = s->pfd_array_len; + s->pfd_array_len = old_len * 2 + 1; + PARA_INFO_LOG("pfd array len: %u\n", s->pfd_array_len); + s->pfd = para_realloc(s->pfd, + s->pfd_array_len * sizeof(struct pollfd)); + memset(s->pfd + old_len, 0, + (s->pfd_array_len - old_len) * sizeof(struct pollfd)); + } + s->pfd[s->num_pfds].fd = fd; + s->pfd[s->num_pfds].events = events; + s->pfd[s->num_pfds].revents = 0; + s->num_pfds++; } /** @@ -478,7 +498,7 @@ static void sched_fd_set(int fd, fd_set *fds, int *max_fileno) */ void sched_monitor_readfd(int fd, struct sched *s) { - sched_fd_set(fd, &s->rfds, &s->max_fileno); + add_pollfd(fd, s, POLLIN); } /** @@ -491,5 +511,53 @@ void sched_monitor_readfd(int fd, struct sched *s) */ void sched_monitor_writefd(int fd, struct sched *s) { - sched_fd_set(fd, &s->wfds, &s->max_fileno); + add_pollfd(fd, s, POLLOUT); +} + +static int get_revents(int fd, const struct sched *s) +{ + if (fd < 0) + return 0; + if (fd >= s->pidx_array_len) + return 0; + if (s->pidx[fd] >= s->num_pfds) + return 0; + if (s->pfd[s->pidx[fd]].fd != fd) + return 0; + assert((s->pfd[s->pidx[fd]].revents & POLLNVAL) == 0); + return s->pfd[s->pidx[fd]].revents; +} + +/** + * Check whether there is data to read on the given fd. + * + * To be called from the ->post_monitor() method of a task. + * + * \param fd Should have been monitored with \ref sched_monitor_readfd(). + * \param s The scheduler instance. + * + * \return True if the file descriptor is ready for reading, false otherwise. + * If fd is negative, or has not been monitored in the current iteration of the + * scheduler's main loop, the function also returns false. + * + * \sa \ref sched_write_ok(). + */ +bool sched_read_ok(int fd, const struct sched *s) +{ + return get_revents(fd, s) & (POLLIN | POLLERR | POLLHUP); +} + +/** + * Check whether writing is possible (i.e., does not block). + * + * \param fd Should have been monitored with \ref sched_monitor_writefd(). + * \param s The scheduler instance. + * + * \return True if the file descriptor is ready for writing, false otherwise. + * The comment in \ref sched_read_ok() about invalid file descriptors applies + * to this function as well. + */ +bool sched_write_ok(int fd, const struct sched *s) +{ + return get_revents(fd, s) & (POLLOUT | POLLERR | POLLHUP); }