From: Andre Noll Date: Mon, 26 Sep 2011 06:27:18 +0000 (+0200) Subject: vss: Mmap audio files using MAP_POPULATE. X-Git-Tag: v0.4.9~15^2 X-Git-Url: http://git.tuebingen.mpg.de/?p=paraslash.git;a=commitdiff_plain;h=7bba6232 vss: Mmap audio files using MAP_POPULATE. This fixes buffer underruns on an old laptop containing a slow IDE disk. The problem was that getting the next chunk from the map sometimes hit the disk and took more than 300ms. This patch adds MAP_POPULATE to the flags for mmap() to turn on read-ahead for the mapping. This almost fixed the problem, but some buffer underruns remained. Moreover, MAP_POPULATE is only available on Linux. To fix also the remaining cases, we now read one byte from each of the next few pages in the map after a chunk has been sent. This way the next chunk should already be cached when it is needed. --- diff --git a/vss.c b/vss.c index b9afc8ed..e833543e 100644 --- a/vss.c +++ b/vss.c @@ -952,6 +952,10 @@ static int recv_afs_msg(int afs_socket, int *fd, uint32_t *code, uint32_t *data) return 1; } +#ifndef MAP_POPULATE +#define MAP_POPULATE 0 +#endif + static void recv_afs_result(struct vss_task *vsst, fd_set *rfds) { int ret, passed_fd, shmid; @@ -986,8 +990,8 @@ static void recv_afs_result(struct vss_task *vsst, fd_set *rfds) } mmd->size = statbuf.st_size; mmd->mtime = statbuf.st_mtime; - ret = para_mmap(mmd->size, PROT_READ, MAP_PRIVATE, passed_fd, - 0, &vsst->map); + ret = para_mmap(mmd->size, PROT_READ, MAP_PRIVATE | MAP_POPULATE, + passed_fd, 0, &vsst->map); if (ret < 0) goto err; close(passed_fd); @@ -1076,6 +1080,23 @@ static void vss_send(struct vss_task *vsst) } mmd->chunks_sent++; mmd->current_chunk++; + /* + * Prefault next chunk(s) + * + * If the backing device of the memory-mapped audio file is + * slow and read-ahead is turned off or prevented for some + * reason, e.g. due to memory pressure, it may take much longer + * than the chunk interval to get the next chunk on the wire, + * causing buffer underruns on the client side. Mapping the + * file with MAP_POPULATE seems to help a bit, but it does not + * eliminate the delays completely. Moreover, it is supported + * only on Linux. So we do our own read-ahead here. + */ + buf += len; + for (i = 0; i < 5 && buf < vsst->map + mmd->size; i++) { + __a_unused volatile char x = *buf; + buf += 4096; + } } }