1 /* Copyright (C) 2009 Andre Noll <maan@tuebingen.mpg.de>, see file COPYING. */
3 /** \file buffer_tree.c Buffer tree and buffer pool implementations. */
9 #include "buffer_tree.h"
13 /* whead = NULL means area full */
25 /** The number of references to this buffer. */
27 /* NULL means no buffer pool but a malloced buffer. */
28 struct btr_pool
*pool
;
29 /* Only relevant if pool is NULL. */
33 struct btr_buffer_reference
{
34 struct btr_buffer
*btrb
;
36 /* Each buffer reference belongs to the buffer queue list of some buffer tree node. */
37 struct list_head node
;
43 struct btr_node
*parent
;
44 /* The position of this btr node in the buffer tree. */
45 struct list_head node
;
46 /* The children nodes of this btr node are linked together in a list. */
47 struct list_head children
;
48 /* Time of first data transfer. */
51 * The input queue is a list of references to btr buffers. Each item on
52 * the list represents an input buffer which has not been completely
53 * used by this btr node.
55 struct list_head input_queue
;
56 btr_command_handler execute
;
61 * Create a new buffer pool.
63 * \param name The name of the new buffer pool.
64 * \param area_size The size in bytes of the pool area.
66 * \return An opaque pointer to the newly created buffer pool. It must be
67 * passed to btr_pool_free() after it is no longer used to deallocate all
70 struct btr_pool
*btr_pool_new(const char *name
, size_t area_size
)
72 struct btr_pool
*btrp
;
74 PARA_INFO_LOG("%s, %zu bytes\n", name
, area_size
);
75 btrp
= alloc(sizeof(*btrp
));
76 btrp
->area_start
= alloc(area_size
);
77 btrp
->area_end
= btrp
->area_start
+ area_size
;
78 btrp
->rhead
= btrp
->area_start
;
79 btrp
->whead
= btrp
->area_start
;
80 btrp
->name
= para_strdup(name
);
85 * Deallocate resources used by a buffer pool.
87 * \param btrp A pointer obtained via btr_pool_new().
89 void btr_pool_free(struct btr_pool
*btrp
)
93 free(btrp
->area_start
);
99 * Return the size of the buffer pool area.
101 * \param btrp The buffer pool.
103 * \return The same value which was passed during creation time to
106 size_t btr_pool_size(struct btr_pool
*btrp
)
108 return btrp
->area_end
- btrp
->area_start
;
111 static size_t btr_pool_filled(struct btr_pool
*btrp
)
114 return btr_pool_size(btrp
);
115 if (btrp
->rhead
<= btrp
->whead
)
116 return btrp
->whead
- btrp
->rhead
;
117 return btr_pool_size(btrp
) - (btrp
->rhead
- btrp
->whead
);
121 * Get the number of unused bytes in the buffer pool.
123 * \param btrp The pool.
125 * \return The number of bytes that can currently be allocated.
127 * Note that in general the returned number of bytes is not available as a
128 * single contiguous buffer. Use btr_pool_available() to obtain the length of
129 * the largest contiguous buffer that can currently be allocated from the
132 size_t btr_pool_unused(struct btr_pool
*btrp
)
134 return btr_pool_size(btrp
) - btr_pool_filled(btrp
);
138 * Return maximal size available for one read. This is
139 * smaller than the value returned by btr_pool_unused().
141 static size_t btr_pool_available(struct btr_pool
*btrp
)
145 if (btrp
->rhead
<= btrp
->whead
)
146 return btrp
->area_end
- btrp
->whead
;
147 return btrp
->rhead
- btrp
->whead
;
151 * Obtain the current write head.
153 * \param btrp The buffer pool.
154 * \param result The write head is returned here.
156 * \return The maximal amount of bytes that may be written to the returned
159 size_t btr_pool_get_buffer(struct btr_pool
*btrp
, char **result
)
162 *result
= btrp
->whead
;
163 return btr_pool_available(btrp
);
167 * Get references to buffers pointing to free space of the buffer pool area.
169 * \param btrp The buffer pool.
170 * \param iov The scatter array.
172 * \return Zero if the buffer pool is full, one if the free space of the buffer
173 * pool area is available as a single contiguous buffer, two if the free space
174 * consists of two buffers. If this function returns the value n, then n
175 * elements of \a iov are initialized.
177 int btr_pool_get_buffers(struct btr_pool
*btrp
, struct iovec iov
[2])
182 sz
= btr_pool_get_buffer(btrp
, &buf
);
186 iov
[0].iov_base
= buf
;
187 unused
= btr_pool_unused(btrp
);
190 iov
[1].iov_len
= unused
- sz
;
191 iov
[1].iov_base
= btrp
->area_start
;
196 * Mark a part of the buffer pool area as allocated.
198 * \param btrp The buffer pool.
199 * \param size The amount of bytes to be allocated.
201 * This is usually called after the caller wrote to the buffer obtained by
202 * btr_pool_get_buffer().
204 static void btr_pool_allocate(struct btr_pool
*btrp
, size_t size
)
210 assert(size
<= btr_pool_available(btrp
));
211 end
= btrp
->whead
+ size
;
212 assert(end
<= btrp
->area_end
);
214 if (end
== btrp
->area_end
) {
215 PARA_DEBUG_LOG("%s: end of pool area reached\n", btrp
->name
);
216 end
= btrp
->area_start
;
218 if (end
== btrp
->rhead
) {
219 PARA_DEBUG_LOG("%s btrp buffer full\n", btrp
->name
);
220 end
= NULL
; /* buffer full */
225 static void btr_pool_deallocate(struct btr_pool
*btrp
, size_t size
)
227 char *end
= btrp
->rhead
+ size
;
231 assert(end
<= btrp
->area_end
);
232 assert(size
<= btr_pool_filled(btrp
));
233 if (end
== btrp
->area_end
)
234 end
= btrp
->area_start
;
236 btrp
->whead
= btrp
->rhead
;
238 if (btrp
->rhead
== btrp
->whead
)
239 btrp
->rhead
= btrp
->whead
= btrp
->area_start
;
242 #define FOR_EACH_CHILD(_tn, _btrn) list_for_each_entry((_tn), \
243 &((_btrn)->children), node)
244 #define FOR_EACH_CHILD_SAFE(_tn, _tmp, _btrn) \
245 list_for_each_entry_safe((_tn), (_tmp), &((_btrn)->children), node)
247 #define FOR_EACH_BUFFER_REF(_br, _btrn) \
248 list_for_each_entry((_br), &(_btrn)->input_queue, node)
249 #define FOR_EACH_BUFFER_REF_SAFE(_br, _tmp, _btrn) \
250 list_for_each_entry_safe((_br), (_tmp), &(_btrn)->input_queue, node)
253 * Create a new buffer tree node.
255 * \param bnd Specifies how to create the new node.
257 * \return A pointer to the newly allocated node.
259 * This function always succeeds (or calls exit()). The returned pointer must
260 * be freed using btr_free_node() after the node has been removed from the
261 * buffer tree via btr_remove_node().
263 struct btr_node
*btr_new_node(struct btr_node_description
*bnd
)
265 struct btr_node
*btrn
= alloc(sizeof(*btrn
));
267 btrn
->name
= para_strdup(bnd
->name
);
268 btrn
->parent
= bnd
->parent
;
269 btrn
->execute
= bnd
->handler
;
270 btrn
->context
= bnd
->context
;
271 btrn
->start
.tv_sec
= 0;
272 btrn
->start
.tv_usec
= 0;
273 init_list_head(&btrn
->children
);
274 init_list_head(&btrn
->input_queue
);
277 list_add_tail(&btrn
->node
, &bnd
->parent
->children
);
278 PARA_INFO_LOG("new leaf node: %s (child of %s)\n",
279 bnd
->name
, bnd
->parent
->name
);
281 PARA_INFO_LOG("added %s as btr root\n", bnd
->name
);
285 assert(!bnd
->child
->parent
);
286 PARA_INFO_LOG("new root: %s (was %s)\n",
287 bnd
->name
, bnd
->child
->name
);
289 list_add_tail(&bnd
->child
->node
, &btrn
->children
);
291 bnd
->child
->parent
= btrn
;
294 list_add_tail(&btrn
->node
, &bnd
->parent
->children
);
295 list_move(&bnd
->child
->node
, &btrn
->children
);
296 bnd
->child
->parent
= btrn
;
297 PARA_INFO_LOG("added %s as internal node\n", bnd
->name
);
303 * Allocate a new btr buffer.
305 * The freshly allocated buffer will have a zero refcount and will
306 * not be associated with a btr pool.
308 static struct btr_buffer
*new_btrb(char *buf
, size_t size
)
310 struct btr_buffer
*btrb
= zalloc(sizeof(*btrb
));
317 static void dealloc_buffer(struct btr_buffer
*btrb
)
320 btr_pool_deallocate(btrb
->pool
, btrb
->size
);
321 else if (!btrb
->dont_free
)
325 static struct btr_buffer_reference
*get_first_input_br(struct btr_node
*btrn
)
327 if (list_empty(&btrn
->input_queue
))
329 return list_first_entry(&btrn
->input_queue
,
330 struct btr_buffer_reference
, node
);
334 * Deallocate the reference, release the resources if refcount drops to zero.
336 static void btr_drop_buffer_reference(struct btr_buffer_reference
*br
)
338 struct btr_buffer
*btrb
= br
->btrb
;
343 if (btrb
->refcount
== 0) {
344 dealloc_buffer(btrb
);
349 static void add_btrb_to_children(struct btr_buffer
*btrb
,
350 struct btr_node
*btrn
, size_t consumed
)
354 if (btrn
->start
.tv_sec
== 0)
356 FOR_EACH_CHILD(ch
, btrn
) {
357 struct btr_buffer_reference
*br
= zalloc(sizeof(*br
));
359 br
->consumed
= consumed
;
360 list_add_tail(&br
->node
, &ch
->input_queue
);
362 if (ch
->start
.tv_sec
== 0)
368 * Insert a malloced buffer into the buffer tree.
370 * \param buf The buffer to insert.
371 * \param size The size of \a buf in bytes.
372 * \param btrn Position in the buffer tree to create the output.
374 * This creates references to \a buf and adds these references to each child of
375 * \a btrn. The buffer will be freed using standard free() once no buffer tree
376 * node is referencing it any more.
378 * Note that this function must not be used if \a buf was obtained from a
379 * buffer pool. Use btr_add_output_pool() in this case.
381 void btr_add_output(char *buf
, size_t size
, struct btr_node
*btrn
)
383 struct btr_buffer
*btrb
;
387 if (list_empty(&btrn
->children
)) {
391 btrb
= new_btrb(buf
, size
);
392 add_btrb_to_children(btrb
, btrn
, 0);
396 * Insert a buffer into the buffer tree, non-freeing variant.
398 * \param buf See \ref btr_add_output().
399 * \param size See \ref btr_add_output().
400 * \param btrn See \ref btr_add_output().
402 * This is similar to btr_add_output() but additionally sets the \p dont_free
403 * flag on \a buf. If the refcount for the buffer drops to zero, \a buf will
404 * not be deallocated if this flag is set.
406 * The \p dont_free bit also prevents the children of \a btrn from modifying
407 * the buffer contents inplace. Specifically, \ref btr_inplace_ok() returns
408 * false if there is any buffer in the input queue with the \p dont_free bit
411 void btr_add_output_dont_free(const char *buf
, size_t size
, struct btr_node
*btrn
)
413 struct btr_buffer
*btrb
;
417 if (list_empty(&btrn
->children
))
419 btrb
= new_btrb((char *)buf
, size
);
420 btrb
->dont_free
= true;
421 add_btrb_to_children(btrb
, btrn
, 0);
425 * Feed data to child nodes of a buffer tree node.
427 * \param btrp The buffer pool.
428 * \param size The number of bytes to be allocated and fed to each child.
429 * \param btrn The node whose children are to be fed.
431 * This function allocates the amount of bytes from the buffer pool area,
432 * starting at the current value of the write head, and creates buffer
433 * references to the resulting part of the buffer pool area, one for each child
434 * of \a btrn. The references are then fed into the input queue of each child.
436 void btr_add_output_pool(struct btr_pool
*btrp
, size_t size
,
437 struct btr_node
*btrn
)
439 struct btr_buffer
*btrb
;
445 if (list_empty(&btrn
->children
))
447 avail
= btr_pool_get_buffer(btrp
, &buf
);
448 assert(avail
>= size
);
449 btr_pool_allocate(btrp
, size
);
450 btrb
= new_btrb(buf
, size
);
452 add_btrb_to_children(btrb
, btrn
, 0);
456 * Copy data to write head of a buffer pool and feed it to all children nodes.
458 * \param src The source buffer.
459 * \param n The size of the source buffer in bytes.
460 * \param btrp The destination buffer pool.
461 * \param btrn Add the data as output of this node.
463 * This is expensive. The caller must make sure the data fits into the buffer
466 void btr_copy(const void *src
, size_t n
, struct btr_pool
*btrp
,
467 struct btr_node
*btrn
)
474 assert(n
<= btr_pool_unused(btrp
));
475 sz
= btr_pool_get_buffer(btrp
, &buf
);
476 copy
= PARA_MIN(sz
, n
);
477 memcpy(buf
, src
, copy
);
478 btr_add_output_pool(btrp
, copy
, btrn
);
481 sz
= btr_pool_get_buffer(btrp
, &buf
);
482 assert(sz
>= n
- copy
);
483 memcpy(buf
, src
+ copy
, n
- copy
);
484 btr_add_output_pool(btrp
, n
- copy
, btrn
);
487 static void btr_pushdown_br(struct btr_buffer_reference
*br
, struct btr_node
*btrn
)
489 add_btrb_to_children(br
->btrb
, btrn
, br
->consumed
);
490 btr_drop_buffer_reference(br
);
494 * Feed all buffer references of the input queue through the output channel.
496 * \param btrn The node whose buffer references should be pushed down.
498 * This function is useful for filters that do not change the contents of the
499 * buffers at all, like the wav filter or the amp filter if no amplification
500 * was specified. This function is rather cheap.
502 * \sa \ref btr_pushdown_one().
504 void btr_pushdown(struct btr_node
*btrn
)
506 struct btr_buffer_reference
*br
, *tmp
;
508 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
)
509 btr_pushdown_br(br
, btrn
);
513 * Feed the next buffer of the input queue through the output channel.
515 * \param btrn The node whose first input queue buffer should be pushed down.
517 * This works like \ref btr_pushdown() but pushes down only one buffer
520 void btr_pushdown_one(struct btr_node
*btrn
)
522 struct btr_buffer_reference
*br
;
524 if (list_empty(&btrn
->input_queue
))
526 br
= list_first_entry(&btrn
->input_queue
, struct btr_buffer_reference
, node
);
527 btr_pushdown_br(br
, btrn
);
531 * Find out whether a node is a leaf node.
533 * \param btrn The node to check.
535 * \return True if this node has no children. False otherwise.
537 static bool btr_no_children(struct btr_node
*btrn
)
539 return list_empty(&btrn
->children
);
543 * Find out whether a node is an orphan.
545 * \param btrn The buffer tree node.
547 * \return True if \a btrn has no parent.
549 * This function returns true for the root node and false for any other node.
551 * After a (non-leaf) node was removed removed from the tree, the function
552 * returns true for all child nodes.
554 bool btr_no_parent(struct btr_node
*btrn
)
556 return !btrn
->parent
;
560 * Find out whether it is OK to change an input buffer.
562 * \param btrn The buffer tree node to check.
564 * This is used by filters that produce exactly the same amount of output as
565 * there is input. The amp filter which multiplies each sample by some number
566 * is an example of such a filter. If there are no other nodes in the buffer
567 * tree that read the same input stream (i.e. if \a btrn has no siblings), a
568 * node may modify its input buffer directly and push down the modified buffer
569 * to its children, thereby avoiding to allocate a possibly large additional
572 * Since the buffer tree may change at any time, this function should be called
573 * during each post_monitor call.
575 * \return True if \a btrn has no siblings.
577 bool btr_inplace_ok(struct btr_node
*btrn
)
579 struct btr_buffer_reference
*br
;
580 FOR_EACH_BUFFER_REF(br
, btrn
) {
581 struct btr_buffer
*btrb
= br
->btrb
;
582 if (btrb
->refcount
> 1)
584 if (btrb
->dont_free
== true)
590 static inline size_t br_available_bytes(struct btr_buffer_reference
*br
)
592 return br
->btrb
->size
- br
->consumed
;
595 static size_t btr_get_buffer_by_reference(struct btr_buffer_reference
*br
, char **buf
)
598 *buf
= br
->btrb
->buf
+ br
->consumed
;
599 return br_available_bytes(br
);
603 * Obtain the next buffer of the input queue, omitting data.
605 * \param btrn The node whose input queue is to be queried.
606 * \param omit Number of bytes to be omitted.
607 * \param bufp Result pointer. It is OK to pass \p NULL here.
609 * If a buffer tree node needs more input data but can not consume the data it
610 * already has (because it might be needed again later) this function can be
611 * used instead of btr_next_buffer() to get a reference to the buffer obtained
612 * by skipping the given number of bytes. Skipped input bytes are not consumed.
614 * With a zero \a omit argument, this function is equivalent to \ref
617 * \return Number of bytes in \a bufp. If there are less than or equal to \a
618 * omit many bytes available in the input queue of the buffer tree node pointed
619 * to by \a btrn, the function returns zero and the value of \a bufp is
622 size_t btr_next_buffer_omit(struct btr_node
*btrn
, size_t omit
, char **bufp
)
624 struct btr_buffer_reference
*br
;
625 size_t wrap_count
, sz
, rv
= 0;
626 char *buf
, *result
= NULL
;
628 br
= get_first_input_br(btrn
);
631 wrap_count
= br
->wrap_count
;
632 if (wrap_count
> 0) { /* we have a wrap buffer */
633 sz
= btr_get_buffer_by_reference(br
, &buf
);
634 if (sz
> omit
) { /* and it's big enough */
638 * Wrap buffers are allocated by malloc(), so the next
639 * buffer ref will not align nicely, so we return the
640 * tail of the wrap buffer.
645 * The next wrap_count bytes exist twice, in the wrap buffer
646 * and as a buffer reference in the buffer tree pool.
651 * For buffer tree pools, the buffers in the list align, i.e. the next
652 * buffer in the list starts directly at the end of its predecessor. In
653 * this case we merge adjacent buffers and return one larger buffer
656 FOR_EACH_BUFFER_REF(br
, btrn
) {
657 sz
= btr_get_buffer_by_reference(br
, &buf
);
659 if (result
+ rv
!= buf
)
662 } else if (sz
> omit
) {
677 * Obtain the next buffer of the input queue of a buffer tree node.
679 * \param btrn The node whose input queue is to be queried.
680 * \param bufp Result pointer.
682 * \return The number of bytes that can be read from buf.
684 * The call of this function is is equivalent to calling \ref
685 * btr_next_buffer_omit() with an \a omit value of zero.
687 size_t btr_next_buffer(struct btr_node
*btrn
, char **bufp
)
689 return btr_next_buffer_omit(btrn
, 0, bufp
);
693 * Deallocate the given number of bytes from the input queue.
695 * \param btrn The buffer tree node.
696 * \param numbytes The number of bytes to be deallocated.
698 * This function must be used to get rid of existing buffer references in the
699 * node's input queue. If no references to a buffer remain, the underlying
700 * buffers are either freed (in the non-buffer pool case) or the read head of
701 * the buffer pool is being advanced.
703 * Note that \a numbytes may be smaller than the buffer size. In this case the
704 * buffer is not deallocated and subsequent calls to btr_next_buffer() return
705 * the remaining part of the buffer.
707 void btr_consume(struct btr_node
*btrn
, size_t numbytes
)
709 struct btr_buffer_reference
*br
, *tmp
;
714 br
= get_first_input_br(btrn
);
717 if (br
->wrap_count
== 0) {
719 * No wrap buffer. Drop buffer references whose buffer
720 * has been fully used. */
721 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
) {
722 if (br
->consumed
+ numbytes
<= br
->btrb
->size
) {
723 br
->consumed
+= numbytes
;
724 if (br
->consumed
== br
->btrb
->size
)
725 btr_drop_buffer_reference(br
);
728 numbytes
-= br
->btrb
->size
- br
->consumed
;
729 btr_drop_buffer_reference(br
);
734 * We have a wrap buffer, consume from it. If in total, i.e. including
735 * previous calls to brt_consume(), less than wrap_count has been
736 * consumed, there's nothing more we can do.
738 * Otherwise we drop the wrap buffer and consume from subsequent
739 * buffers of the input queue the correct amount of bytes. This is the
740 * total number of bytes that have been consumed from the wrap buffer.
742 PARA_DEBUG_LOG("consuming %zu/%zu bytes from wrap buffer\n", numbytes
,
743 br_available_bytes(br
));
745 assert(numbytes
<= br_available_bytes(br
));
746 if (br
->consumed
+ numbytes
< br
->wrap_count
) {
747 br
->consumed
+= numbytes
;
750 PARA_DEBUG_LOG("dropping wrap buffer (%zu bytes)\n", br
->btrb
->size
);
751 /* get rid of the wrap buffer */
752 sz
= br
->consumed
+ numbytes
;
753 btr_drop_buffer_reference(br
);
754 return btr_consume(btrn
, sz
);
758 * Clear the input queue of a buffer tree node.
760 * \param btrn The node whose input queue should be cleared.
762 void btr_drain(struct btr_node
*btrn
)
764 struct btr_buffer_reference
*br
, *tmp
;
766 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
)
767 btr_drop_buffer_reference(br
);
770 static void btr_free_node(struct btr_node
*btrn
)
777 * Remove a node from a buffer tree.
779 * \param btrnp Determines the node to remove.
781 * This orphans all children of the node given by \a btrnp and removes this
782 * node from the child list of its parent. Moreover, the input queue is flushed
783 * and the node pointer given by \a btrp is set to \p NULL.
785 * \sa \ref btr_splice_out_node.
787 void btr_remove_node(struct btr_node
**btrnp
)
790 struct btr_node
*btrn
;
797 PARA_INFO_LOG("removing btr node %s from buffer tree\n", btrn
->name
);
798 FOR_EACH_CHILD(ch
, btrn
)
802 list_del(&btrn
->node
);
809 * Return the amount of available input bytes of a buffer tree node.
811 * \param btrn The node whose input size should be computed.
813 * \return The total number of bytes available in the node's input
816 * This simply iterates over all buffer references in the input queue and
817 * returns the sum of the sizes of all references.
819 size_t btr_get_input_queue_size(struct btr_node
*btrn
)
821 struct btr_buffer_reference
*br
;
822 size_t size
= 0, wrap_consumed
= 0;
824 FOR_EACH_BUFFER_REF(br
, btrn
) {
825 if (br
->wrap_count
!= 0) {
826 wrap_consumed
= br
->consumed
;
829 size
+= br_available_bytes(br
);
831 assert(wrap_consumed
<= size
);
832 size
-= wrap_consumed
;
837 * Remove a node from the buffer tree, reconnecting parent and children.
839 * \param btrnp The node to splice out.
841 * This function is used by buffer tree nodes that do not exist during the
842 * whole lifetime of the buffer tree. Unlike btr_remove_node(), calling
843 * btr_splice_out_node() does not split the tree into disconnected components
844 * but reconnects the buffer tree by making all child nodes of \a btrn children
845 * of the parent of \a btrn.
847 void btr_splice_out_node(struct btr_node
**btrnp
)
849 struct btr_node
*btrn
= *btrnp
, *ch
, *tmp
;
852 PARA_NOTICE_LOG("splicing out %s\n", btrn
->name
);
855 list_del(&btrn
->node
);
856 FOR_EACH_CHILD_SAFE(ch
, tmp
, btrn
) {
857 PARA_INFO_LOG("parent(%s): %s\n", ch
->name
,
858 btrn
->parent
? btrn
->parent
->name
: "NULL");
859 ch
->parent
= btrn
->parent
;
861 list_move(&ch
->node
, &btrn
->parent
->children
);
865 assert(list_empty(&btrn
->children
));
871 * Return number of queued output bytes of a buffer tree node.
873 * \param btrn The node whose output queue size should be computed.
875 * \return This function iterates over all children of the given node and
876 * returns the size of the largest input queue.
878 size_t btr_get_output_queue_size(struct btr_node
*btrn
)
883 FOR_EACH_CHILD(ch
, btrn
) {
884 size_t size
= btr_get_input_queue_size(ch
);
885 max_size
= PARA_MAX(max_size
, size
);
891 * Execute an inter-node command on the given node or on a parent node.
893 * \param btrn The node to start looking.
894 * \param command The command to execute.
895 * \param value_result Additional arguments and result value.
897 * This function traverses the buffer tree from \a btrn upwards and looks for
898 * the first node that understands \a command. On this node \a command is
899 * executed, and the result is stored in \a value_result.
901 * \return \p -ENOTSUP if no parent node of \a btrn understands \a command.
902 * Otherwise the return value of the command handler is returned.
904 * \sa \ref receiver::execute, \ref filter::execute, \ref writer::execute.
906 int btr_exec_up(struct btr_node
*btrn
, const char *command
, char **value_result
)
910 for (; btrn
; btrn
= btrn
->parent
) {
913 PARA_INFO_LOG("executing %s on %s\n", command
, btrn
->name
);
914 ret
= btrn
->execute(btrn
, command
, value_result
);
915 if (ret
== -ERRNO_TO_PARA_ERROR(ENOTSUP
))
919 if (value_result
&& *value_result
)
920 PARA_INFO_LOG("%s(%s): %s\n", command
, btrn
->name
,
924 return -ERRNO_TO_PARA_ERROR(ENOTSUP
);
928 * Obtain the context of a buffer node tree.
930 * \param btrn The node whose output queue size should be computed.
932 * \return A pointer to the \a context address specified at node creation time.
934 * \sa \ref btr_new_node(), struct \ref btr_node_description.
936 void *btr_context(struct btr_node
*btrn
)
938 return btrn
->context
;
941 static bool need_buffer_pool_merge(struct btr_node
*btrn
)
943 struct btr_buffer_reference
*br
= get_first_input_br(btrn
);
947 if (br
->wrap_count
!= 0)
954 static void merge_input_pool(struct btr_node
*btrn
, size_t dest_size
)
956 struct btr_buffer_reference
*br
, *wbr
= NULL
;
957 int num_refs
; /* including wrap buffer */
958 char *buf
, *buf1
= NULL
, *buf2
= NULL
;
959 size_t sz
, sz1
= 0, sz2
= 0, wb_consumed
= 0;
961 br
= get_first_input_br(btrn
);
962 if (!br
|| br_available_bytes(br
) >= dest_size
)
965 FOR_EACH_BUFFER_REF(br
, btrn
) {
967 sz
= btr_get_buffer_by_reference(br
, &buf
);
970 if (br
->wrap_count
!= 0) {
972 assert(num_refs
== 1);
976 wb_consumed
= br
->consumed
;
984 if (buf1
+ sz1
== buf
) {
993 assert(buf2
+ sz2
== buf
);
996 if (sz1
+ sz2
>= dest_size
+ wb_consumed
)
999 if (!buf2
) /* nothing to do */
1001 assert(buf1
&& sz2
> 0);
1003 * If the second buffer is large, we only take the first part of it to
1004 * avoid having to memcpy() huge buffers.
1006 sz2
= PARA_MIN(sz2
, (size_t)(64 * 1024));
1008 /* Make a new wrap buffer combining buf1 and buf2. */
1011 PARA_DEBUG_LOG("merging input buffers: (%p:%zu, %p:%zu) -> %p:%zu\n",
1012 buf1
, sz1
, buf2
, sz2
, buf
, sz
);
1013 memcpy(buf
, buf1
, sz1
);
1014 memcpy(buf
+ sz1
, buf2
, sz2
);
1015 br
= zalloc(sizeof(*br
));
1016 br
->btrb
= new_btrb(buf
, sz
);
1017 br
->btrb
->refcount
= 1;
1019 /* This is a wrap buffer */
1020 br
->wrap_count
= sz1
;
1021 para_list_add(&br
->node
, &btrn
->input_queue
);
1025 * We already have a wrap buffer, but it is too small. It might be
1028 if (wbr
->wrap_count
== sz1
&& wbr
->btrb
->size
>= sz1
+ sz2
) /* nothing we can do about it */
1030 sz
= sz1
+ sz2
- wbr
->btrb
->size
; /* amount of new data */
1031 PARA_DEBUG_LOG("increasing wrap buffer %zu -> %zu\n", wbr
->btrb
->size
,
1032 wbr
->btrb
->size
+ sz
);
1033 wbr
->btrb
->size
+= sz
;
1034 wbr
->btrb
->buf
= para_realloc(wbr
->btrb
->buf
, wbr
->btrb
->size
);
1035 /* copy the new data to the end of the reallocated buffer */
1037 memcpy(wbr
->btrb
->buf
+ wbr
->btrb
->size
- sz
, buf2
+ sz2
- sz
, sz
);
1041 * Merge the first two input buffers into one.
1043 * This is a quite expensive operation.
1045 * \return The number of buffers that have been available (zero, one or two).
1047 static int merge_input(struct btr_node
*btrn
)
1049 struct btr_buffer_reference
*brs
[2], *br
;
1050 char *bufs
[2], *buf
;
1054 if (list_empty(&btrn
->input_queue
))
1056 if (list_is_singular(&btrn
->input_queue
))
1059 /* get references to the first two buffers */
1060 FOR_EACH_BUFFER_REF(br
, btrn
) {
1062 szs
[i
] = btr_get_buffer_by_reference(brs
[i
], bufs
+ i
);
1068 /* make a new btrb that combines the two buffers and a br to it. */
1069 sz
= szs
[0] + szs
[1];
1071 PARA_DEBUG_LOG("%s: memory merging input buffers: (%zu, %zu) -> %zu\n",
1072 btrn
->name
, szs
[0], szs
[1], sz
);
1073 memcpy(buf
, bufs
[0], szs
[0]);
1074 memcpy(buf
+ szs
[0], bufs
[1], szs
[1]);
1076 br
= zalloc(sizeof(*br
));
1077 br
->btrb
= new_btrb(buf
, sz
);
1078 br
->btrb
->refcount
= 1;
1080 /* replace the first two refs by the new one */
1081 btr_drop_buffer_reference(brs
[0]);
1082 btr_drop_buffer_reference(brs
[1]);
1083 para_list_add(&br
->node
, &btrn
->input_queue
);
1088 * Combine input queue buffers.
1090 * \param btrn The buffer tree node whose input should be merged.
1091 * \param dest_size Stop merging if a buffer of at least this size exists.
1093 * Used to combine as many buffers as needed into a single buffer whose size is
1094 * at least \a dest_size. This function is rather cheap in case the parent node
1095 * uses buffer pools and rather expensive otherwise.
1097 * Note that if less than \a dest_size bytes are available in total, this
1098 * function does nothing and subsequent calls to btr_next_buffer() will still
1099 * return a buffer size less than \a dest_size.
1101 void btr_merge(struct btr_node
*btrn
, size_t dest_size
)
1103 if (need_buffer_pool_merge(btrn
))
1104 return merge_input_pool(btrn
, dest_size
);
1107 size_t len
= btr_next_buffer(btrn
, &buf
);
1108 if (len
>= dest_size
)
1110 PARA_DEBUG_LOG("input size = %zu < %zu = dest\n", len
, dest_size
);
1111 if (merge_input(btrn
) < 2)
1116 static bool btr_eof(struct btr_node
*btrn
)
1119 size_t len
= btr_next_buffer(btrn
, &buf
);
1121 return (len
== 0 && btr_no_parent(btrn
));
1124 static void log_tree_recursively(struct btr_node
*btrn
, int loglevel
, int depth
)
1126 struct btr_node
*ch
;
1127 const char spaces
[] = " ", *space
= spaces
+ 16 - depth
;
1131 para_log(loglevel
, "%s%s\n", space
, btrn
->name
);
1132 FOR_EACH_CHILD(ch
, btrn
)
1133 log_tree_recursively(ch
, loglevel
, depth
+ 1);
1137 * Write the current buffer (sub-)tree to the log.
1139 * \param btrn Start logging at this node.
1140 * \param loglevel Set severity with which the tree should be logged.
1142 void btr_log_tree(struct btr_node
*btrn
, int loglevel
)
1144 return log_tree_recursively(btrn
, loglevel
, 0);
1148 * Find the node with the given name in the buffer tree.
1150 * \param name The name of the node to search.
1151 * \param root Where to start the search.
1153 * \return A pointer to the node with the given name on success. If \a name is
1154 * \p NULL, the function returns \a root. If there is no node with the given
1155 * name, \p NULL is returned.
1157 struct btr_node
*btr_search_node(const char *name
, struct btr_node
*root
)
1159 struct btr_node
*ch
;
1163 if (!strcmp(root
->name
, name
))
1165 FOR_EACH_CHILD(ch
, root
) {
1166 struct btr_node
*result
= btr_search_node(name
, ch
);
1173 /** 640K ought to be enough for everybody ;) */
1174 #define BTRN_MAX_PENDING (96 * 1024)
1177 * Return the current state of a buffer tree node.
1179 * \param btrn The node whose state should be queried.
1180 * \param min_iqs The minimal input queue size.
1181 * \param type The supposed type of \a btrn.
1183 * Most users of the buffer tree subsystem call this function from both
1184 * their ->pre_monitor() and ->post_monitor() methods.
1186 * \return Negative if an error condition was detected, zero if there
1187 * is nothing to do and positive otherwise.
1191 * - If a non-root node has no parent and an empty input queue, the function
1192 * returns \p -E_BTR_EOF. Similarly, if a non-leaf node has no children, \p
1193 * -E_BTR_NO_CHILD is returned.
1195 * - If less than \a min_iqs many bytes are available in the input queue and no
1196 * EOF condition was detected, the function returns zero.
1198 * - If there's plenty of data left in the input queue of the children of \a
1199 * btrn, the function also returns zero in order to bound the memory usage of
1202 int btr_node_status(struct btr_node
*btrn
, size_t min_iqs
,
1203 enum btr_node_type type
)
1208 if (type
!= BTR_NT_LEAF
&& btr_no_children(btrn
))
1209 return -E_BTR_NO_CHILD
;
1210 if (type
!= BTR_NT_ROOT
&& btr_eof(btrn
))
1213 if (btr_get_output_queue_size(btrn
) > BTRN_MAX_PENDING
)
1215 if (type
== BTR_NT_ROOT
)
1217 iqs
= btr_get_input_queue_size(btrn
);
1218 if (iqs
== 0) /* we have a parent, because not eof */
1220 if (iqs
< min_iqs
&& !btr_no_parent(btrn
))
1226 * Get the time of the first I/O for a buffer tree node.
1228 * \param btrn The node whose I/O time should be obtained.
1229 * \param tv Result pointer.
1231 * Mainly useful for the time display of para_audiod.
1233 void btr_get_node_start(struct btr_node
*btrn
, struct timeval
*tv
)
1239 * Get the parent node of a buffer tree node.
1241 * \param btrn The node whose parent should be returned.
1243 * \a btrn must not be \p NULL.
1245 * \return The parent of \a btrn, or \p NULL if \a btrn is the
1246 * root node of the buffer tree.
1248 struct btr_node
*btr_parent(struct btr_node
*btrn
)
1250 return btrn
->parent
;