uint16_t slice_bytes;
};
+/** A FEC client is always in one of these states. */
enum fec_client_state {
FEC_STATE_NONE = 0, /**< not initialized and not enabled */
FEC_STATE_DISABLED, /**< temporarily disabled */
return ret < 0? 1 : 0;
}
-static void compute_slice_timeout(struct timeval *timeout)
-{
- struct fec_client *fc;
-
- list_for_each_entry(fc, &fec_client_list, node) {
- struct timeval diff;
-
- if (fc->state != FEC_STATE_READY_TO_RUN)
- continue;
- if (next_slice_is_due(fc, &diff)) {
- timeout->tv_sec = 0;
- timeout->tv_usec = 0;
- return;
- }
- /* timeout = min(timeout, diff) */
- if (tv_diff(&diff, timeout, NULL) < 0)
- *timeout = diff;
- }
-}
-
static void set_eof_barrier(struct vss_task *vsst)
{
struct fec_client *fc;
return -1;
}
-/*
- * != NULL: timeout for next chunk
- * NULL: nothing to do
- */
-static struct timeval *vss_compute_timeout(struct vss_task *vsst)
+static void vss_compute_timeout(struct sched *s, struct vss_task *vsst)
{
- static struct timeval the_timeout;
- struct timeval next_chunk;
-
- if (vss_next() && vsst->map) {
- /* only sleep a bit, nec*/
- the_timeout.tv_sec = 0;
- the_timeout.tv_usec = 100;
- return &the_timeout;
- }
- if (chk_barrier("autoplay_delay", &vsst->autoplay_barrier,
- &the_timeout, 1) < 0)
- return &the_timeout;
- if (chk_barrier("eof", &vsst->eof_barrier, &the_timeout, 1) < 0)
- return &the_timeout;
- if (chk_barrier("data send", &vsst->data_send_barrier,
- &the_timeout, 1) < 0)
- return &the_timeout;
+ struct timeval tv;
+ struct fec_client *fc;
+
if (!vss_playing() || !vsst->map)
- return NULL;
+ return;
+ if (vss_next() && vsst->map) /* only sleep a bit, nec*/
+ return sched_request_timeout_ms(100, s);
+
+ /* Each of these barriers must have passed until we may proceed */
+ if (sched_request_barrier(&vsst->autoplay_barrier, s) == 1)
+ return;
+ if (sched_request_barrier(&vsst->eof_barrier, s) == 1)
+ return;
+ if (sched_request_barrier(&vsst->data_send_barrier, s) == 1)
+ return;
+ /*
+ * Compute the select timeout as the minimal time until the next
+ * chunk/slice is due for any client.
+ */
compute_chunk_time(mmd->chunks_sent, &mmd->afd.afhi.chunk_tv,
- &mmd->stream_start, &next_chunk);
- if (chk_barrier("chunk", &next_chunk, &the_timeout, 0) >= 0) {
- /* chunk is due or bof */
- the_timeout.tv_sec = 0;
- the_timeout.tv_usec = 0;
- return &the_timeout;
+ &mmd->stream_start, &tv);
+ if (sched_request_barrier_or_min_delay(&tv, s) == 0)
+ return;
+ list_for_each_entry(fc, &fec_client_list, node) {
+ if (fc->state != FEC_STATE_READY_TO_RUN)
+ continue;
+ if (next_slice_is_due(fc, &tv))
+ return sched_min_delay(s);
+ sched_request_timeout(&tv, s);
}
- /* compute min of current timeout and next slice time */
- compute_slice_timeout(&the_timeout);
- return &the_timeout;
}
static void vss_eof(struct vss_task *vsst)
static void vss_pre_select(struct sched *s, struct task *t)
{
int i;
- struct timeval *tv;
struct vss_task *vsst = container_of(t, struct vss_task, task);
if (!vsst->map || vss_next() || vss_paused() || vss_repos()) {
continue;
senders[i].pre_select(&s->max_fileno, &s->rfds, &s->wfds);
}
- tv = vss_compute_timeout(vsst);
- if (tv)
- sched_request_timeout(tv, s);
+ vss_compute_timeout(s, vsst);
}
static int recv_afs_msg(int afs_socket, int *fd, uint32_t *code, uint32_t *data)
return 1;
}
+#ifndef MAP_POPULATE
+#define MAP_POPULATE 0
+#endif
+
static void recv_afs_result(struct vss_task *vsst, fd_set *rfds)
{
int ret, passed_fd, shmid;
}
mmd->size = statbuf.st_size;
mmd->mtime = statbuf.st_mtime;
- ret = para_mmap(mmd->size, PROT_READ, MAP_PRIVATE, passed_fd,
- 0, &vsst->map);
+ ret = para_mmap(mmd->size, PROT_READ, MAP_PRIVATE | MAP_POPULATE,
+ passed_fd, 0, &vsst->map);
if (ret < 0)
goto err;
close(passed_fd);
}
mmd->chunks_sent++;
mmd->current_chunk++;
+ /*
+ * Prefault next chunk(s)
+ *
+ * If the backing device of the memory-mapped audio file is
+ * slow and read-ahead is turned off or prevented for some
+ * reason, e.g. due to memory pressure, it may take much longer
+ * than the chunk interval to get the next chunk on the wire,
+ * causing buffer underruns on the client side. Mapping the
+ * file with MAP_POPULATE seems to help a bit, but it does not
+ * eliminate the delays completely. Moreover, it is supported
+ * only on Linux. So we do our own read-ahead here.
+ */
+ buf += len;
+ for (i = 0; i < 5 && buf < vsst->map + mmd->size; i++) {
+ __a_unused volatile char x = *buf;
+ buf += 4096;
+ }
}
}