X-Git-Url: http://git.tuebingen.mpg.de/?p=paraslash.git;a=blobdiff_plain;f=buffer_tree.c;h=8a3175133e69aa57eff7da5155341cbf05532d03;hp=d12dcde7b5055b426f40ec116f0d5533213ef522;hb=d6b25bf854c164021550dfa8ff9fb4cfb104582e;hpb=172e2ef5bc06aee5a68c37e10b1a8918895c23f9 diff --git a/buffer_tree.c b/buffer_tree.c index d12dcde7..8a317513 100644 --- a/buffer_tree.c +++ b/buffer_tree.c @@ -1,5 +1,7 @@ +/* Copyright (C) 2009 Andre Noll , see file COPYING. */ + +/** \file buffer_tree.c Buffer tree and buffer pool implementations. */ #include -#include #include "para.h" #include "list.h" @@ -8,6 +10,7 @@ #include "error.h" #include "sched.h" +/* whead = NULL means area full */ struct btr_pool { char *name; char *area_start; @@ -16,17 +19,15 @@ struct btr_pool { char *whead; }; -enum btr_buffer_flags { - /* changes the way the buffer is deallocated */ - BTR_BF_BTR_POOL = 1, -}; - struct btr_buffer { char *buf; size_t size; /** The number of references to this buffer. */ int refcount; + /* NULL means no buffer pool but a malloced buffer. */ struct btr_pool *pool; + /* Only relevant if pool is NULL. */ + bool dont_free; }; struct btr_buffer_reference { @@ -56,6 +57,16 @@ struct btr_node { void *context; }; +/** + * Create a new buffer pool. + * + * \param name The name of the new buffer pool. + * \param area_size The size in bytes of the pool area. + * + * \return An opaque pointer to the newly created buffer pool. It must be + * passed to btr_pool_free() after it is no longer used to deallocate all + * resources. + */ struct btr_pool *btr_pool_new(const char *name, size_t area_size) { struct btr_pool *btrp; @@ -70,8 +81,11 @@ struct btr_pool *btr_pool_new(const char *name, size_t area_size) return btrp; } -/* whead = NULL means area full */ - +/** + * Deallocate resources used by a buffer pool. + * + * \param btrp A pointer obtained via btr_pool_new(). + */ void btr_pool_free(struct btr_pool *btrp) { if (!btrp) @@ -81,12 +95,20 @@ void btr_pool_free(struct btr_pool *btrp) free(btrp); } +/** + * Return the size of the buffer pool area. + * + * \param btrp The buffer pool. + * + * \return The same value which was passed during creation time to + * btr_pool_new(). + */ size_t btr_pool_size(struct btr_pool *btrp) { return btrp->area_end - btrp->area_start; } -size_t btr_pool_filled(struct btr_pool *btrp) +static size_t btr_pool_filled(struct btr_pool *btrp) { if (!btrp->whead) return btr_pool_size(btrp); @@ -95,12 +117,28 @@ size_t btr_pool_filled(struct btr_pool *btrp) return btr_pool_size(btrp) - (btrp->rhead - btrp->whead); } +/** + * Get the number of unused bytes in the buffer pool. + * + * \param btrp The pool. + * + * \return The number of bytes that can currently be allocated. + * + * Note that in general the returned number of bytes is not available as a + * single contiguous buffer. Use btr_pool_available() to obtain the length of + * the largest contiguous buffer that can currently be allocated from the + * buffer pool. + */ size_t btr_pool_unused(struct btr_pool *btrp) { return btr_pool_size(btrp) - btr_pool_filled(btrp); } -size_t btr_pool_available(struct btr_pool *btrp) +/* + * Return maximal size available for one read. This is + * smaller than the value returned by btr_pool_unused(). + */ +static size_t btr_pool_available(struct btr_pool *btrp) { if (!btrp->whead) return 0; @@ -109,6 +147,15 @@ size_t btr_pool_available(struct btr_pool *btrp) return btrp->rhead - btrp->whead; } +/** + * Obtain the current write head. + * + * \param btrp The buffer pool. + * \param result The write head is returned here. + * + * \return The maximal amount of bytes that may be written to the returned + * buffer. + */ size_t btr_pool_get_buffer(struct btr_pool *btrp, char **result) { if (result) @@ -116,13 +163,50 @@ size_t btr_pool_get_buffer(struct btr_pool *btrp, char **result) return btr_pool_available(btrp); } -void btr_pool_allocate(struct btr_pool *btrp, size_t size) +/** + * Get references to buffers pointing to free space of the buffer pool area. + * + * \param btrp The buffer pool. + * \param iov The scatter array. + * + * \return Zero if the buffer pool is full, one if the free space of the buffer + * pool area is available as a single contiguous buffer, two if the free space + * consists of two buffers. If this function returns the value n, then n + * elements of \a iov are initialized. + */ +int btr_pool_get_buffers(struct btr_pool *btrp, struct iovec iov[2]) +{ + size_t sz, unused; + char *buf; + + sz = btr_pool_get_buffer(btrp, &buf); + if (sz == 0) + return 0; + iov[0].iov_len = sz; + iov[0].iov_base = buf; + unused = btr_pool_unused(btrp); + if (sz == unused) + return 1; + iov[1].iov_len = unused - sz; + iov[1].iov_base = btrp->area_start; + return 2; +} + +/** + * Mark a part of the buffer pool area as allocated. + * + * \param btrp The buffer pool. + * \param size The amount of bytes to be allocated. + * + * This is usually called after the caller wrote to the buffer obtained by + * btr_pool_get_buffer(). + */ +static void btr_pool_allocate(struct btr_pool *btrp, size_t size) { char *end; if (size == 0) return; - //PARA_CRIT_LOG("filled: %zu, alloc %zu\n", btr_pool_filled(btrp), size); assert(size <= btr_pool_available(btrp)); end = btrp->whead + size; assert(end <= btrp->area_end); @@ -132,18 +216,16 @@ void btr_pool_allocate(struct btr_pool *btrp, size_t size) end = btrp->area_start; } if (end == btrp->rhead) { - PARA_DEBUG_LOG("btrp buffer full\n"); + PARA_DEBUG_LOG("%s btrp buffer full\n", btrp->name); end = NULL; /* buffer full */ } btrp->whead = end; - //PARA_CRIT_LOG("filled: %zu\n", btr_pool_filled(btrp)); } static void btr_pool_deallocate(struct btr_pool *btrp, size_t size) { char *end = btrp->rhead + size; - //PARA_CRIT_LOG("filled: %zu, dealloc %zu\n", btr_pool_filled(btrp), size); if (size == 0) return; assert(end <= btrp->area_end); @@ -155,7 +237,6 @@ static void btr_pool_deallocate(struct btr_pool *btrp, size_t size) btrp->rhead = end; if (btrp->rhead == btrp->whead) btrp->rhead = btrp->whead = btrp->area_start; - //PARA_CRIT_LOG("filled: %zu\n", btr_pool_filled(btrp)); } #define FOR_EACH_CHILD(_tn, _btrn) list_for_each_entry((_tn), \ @@ -168,25 +249,53 @@ static void btr_pool_deallocate(struct btr_pool *btrp, size_t size) #define FOR_EACH_BUFFER_REF_SAFE(_br, _tmp, _btrn) \ list_for_each_entry_safe((_br), (_tmp), &(_btrn)->input_queue, node) -struct btr_node *btr_new_node(const char *name, struct btr_node *parent, - btr_command_handler handler, void *context) +/** + * Create a new buffer tree node. + * + * \param bnd Specifies how to create the new node. + * + * \return A pointer to the newly allocated node. + * + * This function always succeeds (or calls exit()). The returned pointer must + * be freed using btr_free_node() after the node has been removed from the + * buffer tree via btr_remove_node(). + */ +struct btr_node *btr_new_node(struct btr_node_description *bnd) { struct btr_node *btrn = para_malloc(sizeof(*btrn)); - btrn->name = para_strdup(name); - btrn->parent = parent; - btrn->execute = handler; - btrn->context = context; + btrn->name = para_strdup(bnd->name); + btrn->parent = bnd->parent; + btrn->execute = bnd->handler; + btrn->context = bnd->context; btrn->start.tv_sec = 0; btrn->start.tv_usec = 0; - if (parent) - list_add_tail(&btrn->node, &parent->children); INIT_LIST_HEAD(&btrn->children); INIT_LIST_HEAD(&btrn->input_queue); - if (parent) - PARA_INFO_LOG("added %s as child of %s\n", name, parent->name); - else - PARA_INFO_LOG("added %s as btr root\n", name); + if (!bnd->child) { + if (bnd->parent) { + list_add_tail(&btrn->node, &bnd->parent->children); + PARA_INFO_LOG("new leaf node: %s (child of %s)\n", + bnd->name, bnd->parent->name); + } else + PARA_INFO_LOG("added %s as btr root\n", bnd->name); + goto out; + } + if (!bnd->parent) { + assert(!bnd->child->parent); + PARA_INFO_LOG("new root: %s (was %s)\n", + bnd->name, bnd->child->name); + btrn->parent = NULL; + list_add_tail(&bnd->child->node, &btrn->children); + /* link it in */ + bnd->child->parent = btrn; + goto out; + } + list_add_tail(&btrn->node, &bnd->parent->children); + list_move(&bnd->child->node, &btrn->children); + bnd->child->parent = btrn; + PARA_INFO_LOG("added %s as internal node\n", bnd->name); +out: return btrn; } @@ -209,7 +318,7 @@ static void dealloc_buffer(struct btr_buffer *btrb) { if (btrb->pool) btr_pool_deallocate(btrb->pool, btrb->size); - else + else if (!btrb->dont_free) free(btrb->buf); } @@ -228,7 +337,6 @@ static void btr_drop_buffer_reference(struct btr_buffer_reference *br) { struct btr_buffer *btrb = br->btrb; - //PARA_CRIT_LOG("dropping buffer reference %p\n", br); list_del(&br->node); free(br); btrb->refcount--; @@ -256,11 +364,26 @@ static void add_btrb_to_children(struct btr_buffer *btrb, } } +/** + * Insert a malloced buffer into the buffer tree. + * + * \param buf The buffer to insert. + * \param size The size of \a buf in bytes. + * \param btrn Position in the buffer tree to create the output. + * + * This creates references to \a buf and adds these references to each child of + * \a btrn. The buffer will be freed using standard free() once no buffer tree + * node is referencing it any more. + * + * Note that this function must not be used if \a buf was obtained from a + * buffer pool. Use btr_add_output_pool() in this case. + */ void btr_add_output(char *buf, size_t size, struct btr_node *btrn) { struct btr_buffer *btrb; - assert(size != 0); + if (size == 0) + return; if (list_empty(&btrn->children)) { free(buf); return; @@ -269,6 +392,47 @@ void btr_add_output(char *buf, size_t size, struct btr_node *btrn) add_btrb_to_children(btrb, btrn, 0); } +/** + * Insert a buffer into the buffer tree, non-freeing variant. + * + * \param buf See \ref btr_add_output(). + * \param size See \ref btr_add_output(). + * \param btrn See \ref btr_add_output(). + * + * This is similar to btr_add_output() but additionally sets the \p dont_free + * flag on \a buf. If the refcount for the buffer drops to zero, \a buf will + * not be deallocated if this flag is set. + * + * The \p dont_free bit also prevents the children of \a btrn from modifying + * the buffer contents inplace. Specifically, \ref btr_inplace_ok() returns + * false if there is any buffer in the input queue with the \p dont_free bit + * set. + */ +void btr_add_output_dont_free(const char *buf, size_t size, struct btr_node *btrn) +{ + struct btr_buffer *btrb; + + if (size == 0) + return; + if (list_empty(&btrn->children)) + return; + btrb = new_btrb((char *)buf, size); + btrb->dont_free = true; + add_btrb_to_children(btrb, btrn, 0); +} + +/** + * Feed data to child nodes of a buffer tree node. + * + * \param btrp The buffer pool. + * \param size The number of bytes to be allocated and fed to each child. + * \param btrn The node whose children are to be fed. + * + * This function allocates the amount of bytes from the buffer pool area, + * starting at the current value of the write head, and creates buffer + * references to the resulting part of the buffer pool area, one for each child + * of \a btrn. The references are then fed into the input queue of each child. + */ void btr_add_output_pool(struct btr_pool *btrp, size_t size, struct btr_node *btrn) { @@ -276,7 +440,8 @@ void btr_add_output_pool(struct btr_pool *btrp, size_t size, char *buf; size_t avail; - assert(size != 0); + if (size == 0) + return; if (list_empty(&btrn->children)) return; avail = btr_pool_get_buffer(btrp, &buf); @@ -287,6 +452,17 @@ void btr_add_output_pool(struct btr_pool *btrp, size_t size, add_btrb_to_children(btrb, btrn, 0); } +/** + * Copy data to write head of a buffer pool and feed it to all children nodes. + * + * \param src The source buffer. + * \param n The size of the source buffer in bytes. + * \param btrp The destination buffer pool. + * \param btrn Add the data as output of this node. + * + * This is expensive. The caller must make sure the data fits into the buffer + * pool area. + */ void btr_copy(const void *src, size_t n, struct btr_pool *btrp, struct btr_node *btrn) { @@ -314,6 +490,17 @@ static void btr_pushdown_br(struct btr_buffer_reference *br, struct btr_node *bt btr_drop_buffer_reference(br); } +/** + * Feed all buffer references of the input queue through the output channel. + * + * \param btrn The node whose buffer references should be pushed down. + * + * This function is useful for filters that do not change the contents of the + * buffers at all, like the wav filter or the amp filter if no amplification + * was specified. This function is rather cheap. + * + * \sa \ref btr_pushdown_one(). + */ void btr_pushdown(struct btr_node *btrn) { struct btr_buffer_reference *br, *tmp; @@ -322,33 +509,82 @@ void btr_pushdown(struct btr_node *btrn) btr_pushdown_br(br, btrn); } -int btr_pushdown_one(struct btr_node *btrn) +/** + * Feed the next buffer of the input queue through the output channel. + * + * \param btrn The node whose first input queue buffer should be pushed down. + * + * This works like \ref btr_pushdown() but pushes down only one buffer + * reference. + */ +void btr_pushdown_one(struct btr_node *btrn) { struct btr_buffer_reference *br; if (list_empty(&btrn->input_queue)) - return 0; + return; br = list_first_entry(&btrn->input_queue, struct btr_buffer_reference, node); btr_pushdown_br(br, btrn); - return 1; } -/* Return true if this node has no children. */ -bool btr_no_children(struct btr_node *btrn) +/* + * Find out whether a node is a leaf node. + * + * \param btrn The node to check. + * + * \return True if this node has no children. False otherwise. + */ +static bool btr_no_children(struct btr_node *btrn) { return list_empty(&btrn->children); } +/** + * Find out whether a node is an orphan. + * + * \param btrn The buffer tree node. + * + * \return True if \a btrn has no parent. + * + * This function returns true for the root node and false for any other node. + * + * After a (non-leaf) node was removed removed from the tree, the function + * returns true for all child nodes. + */ bool btr_no_parent(struct btr_node *btrn) { return !btrn->parent; } +/** + * Find out whether it is OK to change an input buffer. + * + * \param btrn The buffer tree node to check. + * + * This is used by filters that produce exactly the same amount of output as + * there is input. The amp filter which multiplies each sample by some number + * is an example of such a filter. If there are no other nodes in the buffer + * tree that read the same input stream (i.e. if \a btrn has no siblings), a + * node may modify its input buffer directly and push down the modified buffer + * to its children, thereby avoiding to allocate a possibly large additional + * buffer. + * + * Since the buffer tree may change at any time, this function should be called + * during each post_select call. + * + * \return True if \a btrn has no siblings. + */ bool btr_inplace_ok(struct btr_node *btrn) { - if (!btrn->parent) - return true; - return list_is_singular(&btrn->parent->children); + struct btr_buffer_reference *br; + FOR_EACH_BUFFER_REF(br, btrn) { + struct btr_buffer *btrb = br->btrb; + if (btrb->refcount > 1) + return false; + if (btrb->dont_free == true) + return false; + } + return true; } static inline size_t br_available_bytes(struct btr_buffer_reference *br) @@ -356,7 +592,7 @@ static inline size_t br_available_bytes(struct btr_buffer_reference *br) return br->btrb->size - br->consumed; } -size_t btr_get_buffer_by_reference(struct btr_buffer_reference *br, char **buf) +static size_t btr_get_buffer_by_reference(struct btr_buffer_reference *br, char **buf) { if (buf) *buf = br->btrb->buf + br->consumed; @@ -364,41 +600,110 @@ size_t btr_get_buffer_by_reference(struct btr_buffer_reference *br, char **buf) } /** - * \return zero if the input buffer queue is empty. + * Obtain the next buffer of the input queue, omitting data. + * + * \param btrn The node whose input queue is to be queried. + * \param omit Number of bytes to be omitted. + * \param bufp Result pointer. It is OK to pass \p NULL here. + * + * If a buffer tree node needs more input data but can not consume the data it + * already has (because it might be needed again later) this function can be + * used instead of btr_next_buffer() to get a reference to the buffer obtained + * by skipping the given number of bytes. Skipped input bytes are not consumed. + * + * With a zero \a omit argument, this function is equivalent to \ref + * btr_next_buffer(). + * + * \return Number of bytes in \a bufp. If there are less than or equal to \a + * omit many bytes available in the input queue of the buffer tree node pointed + * to by \a btrn, the function returns zero and the value of \a bufp is + * undefined. */ -size_t btr_next_buffer(struct btr_node *btrn, char **bufp) +size_t btr_next_buffer_omit(struct btr_node *btrn, size_t omit, char **bufp) { struct btr_buffer_reference *br; + size_t wrap_count, sz, rv = 0; char *buf, *result = NULL; - size_t sz, rv = 0; - FOR_EACH_BUFFER_REF(br, btrn) { + br = get_first_input_br(btrn); + if (!br) + return 0; + wrap_count = br->wrap_count; + if (wrap_count > 0) { /* we have a wrap buffer */ sz = btr_get_buffer_by_reference(br, &buf); - if (!result) { - result = buf; - rv = sz; - if (!br->btrb->pool) - break; - continue; - } - if (!br->btrb->pool) - break; - if (result + rv != buf) { - PARA_DEBUG_LOG("%s: pool merge impossible: %p != %p\n", - btrn->name, result + rv, buf); - break; + if (sz > omit) { /* and it's big enough */ + result = buf + omit; + rv = sz - omit; + /* + * Wrap buffers are allocated by malloc(), so the next + * buffer ref will not align nicely, so we return the + * tail of the wrap buffer. + */ + goto out; } -// PARA_CRIT_LOG("%s: inplace merge (%zu, %zu)->%zu\n", btrn->name, -// rv, sz, rv + sz); -// PARA_CRIT_LOG("%s: inplace merge %p (%zu)\n", btrn->name, -// result, sz); - rv += sz; + /* + * The next wrap_count bytes exist twice, in the wrap buffer + * and as a buffer reference in the buffer tree pool. + */ + omit += wrap_count; + } + /* + * For buffer tree pools, the buffers in the list align, i.e. the next + * buffer in the list starts directly at the end of its predecessor. In + * this case we merge adjacent buffers and return one larger buffer + * instead. + */ + FOR_EACH_BUFFER_REF(br, btrn) { + sz = btr_get_buffer_by_reference(br, &buf); + if (result) { + if (result + rv != buf) + goto out; + rv += sz; + } else if (sz > omit) { + result = buf + omit; + rv = sz - omit; + } else + omit -= sz; } + if (!result) + return 0; +out: if (bufp) *bufp = result; return rv; } +/** + * Obtain the next buffer of the input queue of a buffer tree node. + * + * \param btrn The node whose input queue is to be queried. + * \param bufp Result pointer. + * + * \return The number of bytes that can be read from buf. + * + * The call of this function is is equivalent to calling \ref + * btr_next_buffer_omit() with an \a omit value of zero. + */ +size_t btr_next_buffer(struct btr_node *btrn, char **bufp) +{ + return btr_next_buffer_omit(btrn, 0, bufp); +} + +/** + * Deallocate the given number of bytes from the input queue. + * + * \param btrn The buffer tree node. + * \param numbytes The number of bytes to be deallocated. + * + * This function must be used to get rid of existing buffer references in the + * node's input queue. If no references to a buffer remain, the underlying + * buffers are either freed (in the non-buffer pool case) or the read head of + * the buffer pool is being advanced. + * + * Note that \a numbytes may be smaller than the buffer size. In this case the + * buffer is not deallocated and subsequent calls to btr_next_buffer() return + * the remaining part of the buffer. + */ void btr_consume(struct btr_node *btrn, size_t numbytes) { struct btr_buffer_reference *br, *tmp; @@ -409,7 +714,6 @@ void btr_consume(struct btr_node *btrn, size_t numbytes) br = get_first_input_br(btrn); assert(br); - //PARA_CRIT_LOG("wrap count: %zu\n", br->wrap_count); if (br->wrap_count == 0) { /* * No wrap buffer. Drop buffer references whose buffer @@ -424,19 +728,17 @@ void btr_consume(struct btr_node *btrn, size_t numbytes) numbytes -= br->btrb->size - br->consumed; btr_drop_buffer_reference(br); } - assert(true); + assert(false); } /* - - We have a wrap buffer, consume from it. If in total, - i.e. including previous calls to brt_consume(), less than - wrap_count has been consumed, there's nothing more we can do. - - Otherwise we drop the wrap buffer and consume from subsequent - buffers of the input queue the correct amount of bytes. This - is the total number of bytes that have been consumed from the - wrap buffer. -*/ + * We have a wrap buffer, consume from it. If in total, i.e. including + * previous calls to brt_consume(), less than wrap_count has been + * consumed, there's nothing more we can do. + * + * Otherwise we drop the wrap buffer and consume from subsequent + * buffers of the input queue the correct amount of bytes. This is the + * total number of bytes that have been consumed from the wrap buffer. + */ PARA_DEBUG_LOG("consuming %zu/%zu bytes from wrap buffer\n", numbytes, br_available_bytes(br)); @@ -452,50 +754,99 @@ void btr_consume(struct btr_node *btrn, size_t numbytes) return btr_consume(btrn, sz); } -static void flush_input_queue(struct btr_node *btrn) +/** + * Clear the input queue of a buffer tree node. + * + * \param btrn The node whose input queue should be cleared. + */ +void btr_drain(struct btr_node *btrn) { struct btr_buffer_reference *br, *tmp; + FOR_EACH_BUFFER_REF_SAFE(br, tmp, btrn) btr_drop_buffer_reference(br); } -void btr_free_node(struct btr_node *btrn) +static void btr_free_node(struct btr_node *btrn) { - if (!btrn) - return; free(btrn->name); free(btrn); } -void btr_remove_node(struct btr_node *btrn) +/** + * Remove a node from a buffer tree. + * + * \param btrnp Determines the node to remove. + * + * This orphans all children of the node given by \a btrnp and removes this + * node from the child list of its parent. Moreover, the input queue is flushed + * and the node pointer given by \a btrp is set to \p NULL. + * + * \sa \ref btr_splice_out_node. + */ +void btr_remove_node(struct btr_node **btrnp) { struct btr_node *ch; + struct btr_node *btrn; - if (!btrn) + if (!btrnp) return; - PARA_NOTICE_LOG("removing btr node %s from buffer tree\n", btrn->name); + btrn = *btrnp; + if (!btrn) + goto out; + PARA_INFO_LOG("removing btr node %s from buffer tree\n", btrn->name); FOR_EACH_CHILD(ch, btrn) ch->parent = NULL; - flush_input_queue(btrn); + btr_drain(btrn); if (btrn->parent) list_del(&btrn->node); + btr_free_node(btrn); +out: + *btrnp = NULL; } +/** + * Return the amount of available input bytes of a buffer tree node. + * + * \param btrn The node whose input size should be computed. + * + * \return The total number of bytes available in the node's input + * queue. + * + * This simply iterates over all buffer references in the input queue and + * returns the sum of the sizes of all references. + */ size_t btr_get_input_queue_size(struct btr_node *btrn) { struct btr_buffer_reference *br; - size_t size = 0; + size_t size = 0, wrap_consumed = 0; FOR_EACH_BUFFER_REF(br, btrn) { - //PARA_CRIT_LOG("size: %zu\n", size); + if (br->wrap_count != 0) { + wrap_consumed = br->consumed; + continue; + } size += br_available_bytes(br); } + assert(wrap_consumed <= size); + size -= wrap_consumed; return size; } -void btr_splice_out_node(struct btr_node *btrn) +/** + * Remove a node from the buffer tree, reconnecting parent and children. + * + * \param btrnp The node to splice out. + * + * This function is used by buffer tree nodes that do not exist during the + * whole lifetime of the buffer tree. Unlike btr_remove_node(), calling + * btr_splice_out_node() does not split the tree into disconnected components + * but reconnects the buffer tree by making all child nodes of \a btrn children + * of the parent of \a btrn. + */ +void btr_splice_out_node(struct btr_node **btrnp) { - struct btr_node *ch, *tmp; + struct btr_node *btrn = *btrnp, *ch, *tmp; assert(btrn); PARA_NOTICE_LOG("splicing out %s\n", btrn->name); @@ -508,16 +859,23 @@ void btr_splice_out_node(struct btr_node *btrn) ch->parent = btrn->parent; if (btrn->parent) list_move(&ch->node, &btrn->parent->children); + else + list_del(&ch->node); } assert(list_empty(&btrn->children)); + btr_free_node(btrn); + *btrnp = NULL; } /** - * Return the size of the largest input queue. + * Return number of queued output bytes of a buffer tree node. * - * Iterates over all children of the given node. + * \param btrn The node whose output queue size should be computed. + * + * \return This function iterates over all children of the given node and + * returns the size of the largest input queue. */ -size_t btr_bytes_pending(struct btr_node *btrn) +size_t btr_get_output_queue_size(struct btr_node *btrn) { size_t max_size = 0; struct btr_node *ch; @@ -529,39 +887,52 @@ size_t btr_bytes_pending(struct btr_node *btrn) return max_size; } -int btr_exec(struct btr_node *btrn, const char *command, char **value_result) -{ - if (!btrn) - return -ERRNO_TO_PARA_ERROR(EINVAL); - if (!btrn->execute) - return -ERRNO_TO_PARA_ERROR(ENOTSUP); - return btrn->execute(btrn, command, value_result); -} - +/** + * Execute an inter-node command on the given node or on a parent node. + * + * \param btrn The node to start looking. + * \param command The command to execute. + * \param value_result Additional arguments and result value. + * + * This function traverses the buffer tree from \a btrn upwards and looks for + * the first node that understands \a command. On this node \a command is + * executed, and the result is stored in \a value_result. + * + * \return \p -ENOTSUP if no parent node of \a btrn understands \a command. + * Otherwise the return value of the command handler is returned. + * + * \sa \ref receiver::execute, \ref filter::execute, \ref writer::execute. + */ int btr_exec_up(struct btr_node *btrn, const char *command, char **value_result) { int ret; for (; btrn; btrn = btrn->parent) { - struct btr_node *parent = btrn->parent; - if (!parent) - return -ERRNO_TO_PARA_ERROR(ENOTSUP); - if (!parent->execute) + if (!btrn->execute) continue; - PARA_INFO_LOG("parent: %s, cmd: %s\n", parent->name, command); - ret = parent->execute(parent, command, value_result); + PARA_INFO_LOG("executing %s on %s\n", command, btrn->name); + ret = btrn->execute(btrn, command, value_result); if (ret == -ERRNO_TO_PARA_ERROR(ENOTSUP)) continue; if (ret < 0) return ret; if (value_result && *value_result) - PARA_NOTICE_LOG("%s(%s): %s\n", command, parent->name, + PARA_INFO_LOG("%s(%s): %s\n", command, btrn->name, *value_result); return 1; } return -ERRNO_TO_PARA_ERROR(ENOTSUP); } +/** + * Obtain the context of a buffer node tree. + * + * \param btrn The node whose output queue size should be computed. + * + * \return A pointer to the \a context address specified at node creation time. + * + * \sa \ref btr_new_node(), struct \ref btr_node_description. + */ void *btr_context(struct btr_node *btrn) { return btrn->context; @@ -582,24 +953,27 @@ static bool need_buffer_pool_merge(struct btr_node *btrn) static void merge_input_pool(struct btr_node *btrn, size_t dest_size) { - struct btr_buffer_reference *br, *wbr; + struct btr_buffer_reference *br, *wbr = NULL; int num_refs; /* including wrap buffer */ - char *buf, *buf1, *buf2 = NULL; - size_t sz, sz1, sz2 = 0, wsz; + char *buf, *buf1 = NULL, *buf2 = NULL; + size_t sz, sz1 = 0, sz2 = 0, wb_consumed = 0; - if (list_empty(&btrn->input_queue)) + br = get_first_input_br(btrn); + if (!br || br_available_bytes(br) >= dest_size) return; - num_refs = 0; FOR_EACH_BUFFER_REF(br, btrn) { num_refs++; sz = btr_get_buffer_by_reference(br, &buf); + if (sz == 0) + break; if (br->wrap_count != 0) { assert(!wbr); assert(num_refs == 1); wbr = br; if (sz >= dest_size) return; + wb_consumed = br->consumed; continue; } if (!buf1) { @@ -619,14 +993,19 @@ static void merge_input_pool(struct btr_node *btrn, size_t dest_size) assert(buf2 + sz2 == buf); sz2 += sz; next: - if (sz1 + sz2 >= dest_size) + if (sz1 + sz2 >= dest_size + wb_consumed) break; } + if (!buf2) /* nothing to do */ + return; + assert(buf1 && sz2 > 0); + /* + * If the second buffer is large, we only take the first part of it to + * avoid having to memcpy() huge buffers. + */ + sz2 = PARA_MIN(sz2, (size_t)(64 * 1024)); if (!wbr) { - assert(buf1); - if (!buf2) /* nothing to do */ - return; - /* make a new wrap buffer combining buf1 and buf 2. */ + /* Make a new wrap buffer combining buf1 and buf2. */ sz = sz1 + sz2; buf = para_malloc(sz); PARA_DEBUG_LOG("merging input buffers: (%p:%zu, %p:%zu) -> %p:%zu\n", @@ -646,13 +1025,12 @@ next: * We already have a wrap buffer, but it is too small. It might be * partially used. */ - wsz = br_available_bytes(wbr); if (wbr->wrap_count == sz1 && wbr->btrb->size >= sz1 + sz2) /* nothing we can do about it */ return; - assert(buf1 && buf2); sz = sz1 + sz2 - wbr->btrb->size; /* amount of new data */ + PARA_DEBUG_LOG("increasing wrap buffer %zu -> %zu\n", wbr->btrb->size, + wbr->btrb->size + sz); wbr->btrb->size += sz; - PARA_DEBUG_LOG("increasing wrap buffer to %zu\n", wbr->btrb->size); wbr->btrb->buf = para_realloc(wbr->btrb->buf, wbr->btrb->size); /* copy the new data to the end of the reallocated buffer */ assert(sz2 >= sz); @@ -686,6 +1064,7 @@ static int merge_input(struct btr_node *btrn) if (i == 2) break; } + assert(i == 2); /* make a new btrb that combines the two buffers and a br to it. */ sz = szs[0] + szs[1]; buf = para_malloc(sz); @@ -705,6 +1084,20 @@ static int merge_input(struct btr_node *btrn) return 2; } +/** + * Combine input queue buffers. + * + * \param btrn The buffer tree node whose input should be merged. + * \param dest_size Stop merging if a buffer of at least this size exists. + * + * Used to combine as many buffers as needed into a single buffer whose size is + * at least \a dest_size. This function is rather cheap in case the parent node + * uses buffer pools and rather expensive otherwise. + * + * Note that if less than \a dest_size bytes are available in total, this + * function does nothing and subsequent calls to btr_next_buffer() will still + * return a buffer size less than \a dest_size. + */ void btr_merge(struct btr_node *btrn, size_t dest_size) { if (need_buffer_pool_merge(btrn)) @@ -720,7 +1113,7 @@ void btr_merge(struct btr_node *btrn, size_t dest_size) } } -bool btr_eof(struct btr_node *btrn) +static bool btr_eof(struct btr_node *btrn) { char *buf; size_t len = btr_next_buffer(btrn, &buf); @@ -728,7 +1121,7 @@ bool btr_eof(struct btr_node *btrn) return (len == 0 && btr_no_parent(btrn)); } -void log_tree_recursively(struct btr_node *btrn, int loglevel, int depth) +static void log_tree_recursively(struct btr_node *btrn, int loglevel, int depth) { struct btr_node *ch; const char spaces[] = " ", *space = spaces + 16 - depth; @@ -740,38 +1133,119 @@ void log_tree_recursively(struct btr_node *btrn, int loglevel, int depth) log_tree_recursively(ch, loglevel, depth + 1); } +/** + * Write the current buffer (sub-)tree to the log. + * + * \param btrn Start logging at this node. + * \param loglevel Set severity with which the tree should be logged. + */ void btr_log_tree(struct btr_node *btrn, int loglevel) { return log_tree_recursively(btrn, loglevel, 0); } +/** + * Find the node with the given name in the buffer tree. + * + * \param name The name of the node to search. + * \param root Where to start the search. + * + * \return A pointer to the node with the given name on success. If \a name is + * \p NULL, the function returns \a root. If there is no node with the given + * name, \p NULL is returned. + */ +struct btr_node *btr_search_node(const char *name, struct btr_node *root) +{ + struct btr_node *ch; + + if (!name) + return root; + if (!strcmp(root->name, name)) + return root; + FOR_EACH_CHILD(ch, root) { + struct btr_node *result = btr_search_node(name, ch); + if (result) + return result; + } + return NULL; +} + /** 640K ought to be enough for everybody ;) */ -#define BTRN_MAX_PENDING (640 * 1024) +#define BTRN_MAX_PENDING (96 * 1024) +/** + * Return the current state of a buffer tree node. + * + * \param btrn The node whose state should be queried. + * \param min_iqs The minimal input queue size. + * \param type The supposed type of \a btrn. + * + * Most users of the buffer tree subsystem call this function from both + * their pre_select and the post_select methods. + * + * \return Negative if an error condition was detected, zero if there + * is nothing to do and positive otherwise. + * + * Examples: + * + * - If a non-root node has no parent and an empty input queue, the function + * returns \p -E_BTR_EOF. Similarly, if a non-leaf node has no children, \p + * -E_BTR_NO_CHILD is returned. + * + * - If less than \a min_iqs many bytes are available in the input queue and no + * EOF condition was detected, the function returns zero. + * + * - If there's plenty of data left in the input queue of the children of \a + * btrn, the function also returns zero in order to bound the memory usage of + * the buffer tree. + */ int btr_node_status(struct btr_node *btrn, size_t min_iqs, enum btr_node_type type) { size_t iqs; - if (type != BTR_NT_LEAF) { - if (btr_no_children(btrn)) - return -E_BTR_NO_CHILD; - if (btr_bytes_pending(btrn) > BTRN_MAX_PENDING) - return 0; - } - if (type != BTR_NT_ROOT) { - if (btr_eof(btrn)) - return -E_BTR_EOF; - iqs = btr_get_input_queue_size(btrn); - if (iqs == 0) /* we have a parent, because not eof */ - return 0; - if (iqs < min_iqs && !btr_no_parent(btrn)) - return 0; - } + assert(btrn); + if (type != BTR_NT_LEAF && btr_no_children(btrn)) + return -E_BTR_NO_CHILD; + if (type != BTR_NT_ROOT && btr_eof(btrn)) + return -E_BTR_EOF; + + if (btr_get_output_queue_size(btrn) > BTRN_MAX_PENDING) + return 0; + if (type == BTR_NT_ROOT) + return 1; + iqs = btr_get_input_queue_size(btrn); + if (iqs == 0) /* we have a parent, because not eof */ + return 0; + if (iqs < min_iqs && !btr_no_parent(btrn)) + return 0; return 1; } +/** + * Get the time of the first I/O for a buffer tree node. + * + * \param btrn The node whose I/O time should be obtained. + * \param tv Result pointer. + * + * Mainly useful for the time display of para_audiod. + */ void btr_get_node_start(struct btr_node *btrn, struct timeval *tv) { *tv = btrn->start; } + +/** + * Get the parent node of a buffer tree node. + * + * \param btrn The node whose parent should be returned. + * + * \a btrn must not be \p NULL. + * + * \return The parent of \a btrn, or \p NULL if \a btrn is the + * root node of the buffer tree. + */ +struct btr_node *btr_parent(struct btr_node *btrn) +{ + return btrn->parent; +}