7 #include "buffer_tree.h"
11 /* whead = NULL means area full */
23 /** The number of references to this buffer. */
25 /* NULL means no buffer pool but a malloced buffer. */
26 struct btr_pool
*pool
;
29 struct btr_buffer_reference
{
30 struct btr_buffer
*btrb
;
32 /* Each buffer reference belongs to the buffer queue list of some buffer tree node. */
33 struct list_head node
;
39 struct btr_node
*parent
;
40 /* The position of this btr node in the buffer tree. */
41 struct list_head node
;
42 /* The children nodes of this btr node are linked together in a list. */
43 struct list_head children
;
44 /* Time of first data transfer. */
47 * The input queue is a list of references to btr buffers. Each item on
48 * the list represents an input buffer which has not been completely
49 * used by this btr node.
51 struct list_head input_queue
;
52 btr_command_handler execute
;
57 * Create a new buffer pool.
59 * \param name The name of the new buffer pool.
61 * \param area The size in bytes of the pool area.
63 * \return An opaque pointer to the newly created buffer pool. It must be
64 * passed to btr_pool_free() after it is no longer used to deallocate all
67 struct btr_pool
*btr_pool_new(const char *name
, size_t area_size
)
69 struct btr_pool
*btrp
;
71 PARA_INFO_LOG("%s, %zu bytes\n", name
, area_size
);
72 btrp
= para_malloc(sizeof(*btrp
));
73 btrp
->area_start
= para_malloc(area_size
);
74 btrp
->area_end
= btrp
->area_start
+ area_size
;
75 btrp
->rhead
= btrp
->area_start
;
76 btrp
->whead
= btrp
->area_start
;
77 btrp
->name
= para_strdup(name
);
82 * Dellocate resources used by a buffer pool.
84 * \param btrp A pointer obtained via btr_pool_new().
86 void btr_pool_free(struct btr_pool
*btrp
)
90 free(btrp
->area_start
);
96 * Return the size of the buffer pool area.
98 * \param btrp The buffer pool.
100 * \return The same value which was passed during creation time to
103 size_t btr_pool_size(struct btr_pool
*btrp
)
105 return btrp
->area_end
- btrp
->area_start
;
108 size_t btr_pool_filled(struct btr_pool
*btrp
)
111 return btr_pool_size(btrp
);
112 if (btrp
->rhead
<= btrp
->whead
)
113 return btrp
->whead
- btrp
->rhead
;
114 return btr_pool_size(btrp
) - (btrp
->rhead
- btrp
->whead
);
118 * Get the number of unused bytes in the buffer pool.
120 * \param btrp The pool.
122 * \return The number of bytes that can currently be allocated.
124 * Note that in general the returned number of bytes is not available as a
125 * single contiguous buffer. Use btr_pool_available() to obtain the length of
126 * the largest contiguous buffer that can currently be allocated from the
129 size_t btr_pool_unused(struct btr_pool
*btrp
)
131 return btr_pool_size(btrp
) - btr_pool_filled(btrp
);
135 * Return maximal size available for one read. This is
136 * smaller than the value returned by btr_pool_unused().
138 size_t btr_pool_available(struct btr_pool
*btrp
)
142 if (btrp
->rhead
<= btrp
->whead
)
143 return btrp
->area_end
- btrp
->whead
;
144 return btrp
->rhead
- btrp
->whead
;
148 * Obtain the current write head.
150 * \param btrp The buffer pool.
151 * \param result The write head is returned here.
153 * \return The maximal amount of bytes that may be written to the returned
156 size_t btr_pool_get_buffer(struct btr_pool
*btrp
, char **result
)
159 *result
= btrp
->whead
;
160 return btr_pool_available(btrp
);
164 * Mark a part of the buffer pool area as allocated.
166 * \param btrp The buffer pool.
167 * \param size The amount of bytes to be allocated.
169 * This is usually called after the caller wrote to the buffer obtained by
170 * btr_pool_get_buffer().
172 static void btr_pool_allocate(struct btr_pool
*btrp
, size_t size
)
178 assert(size
<= btr_pool_available(btrp
));
179 end
= btrp
->whead
+ size
;
180 assert(end
<= btrp
->area_end
);
182 if (end
== btrp
->area_end
) {
183 PARA_DEBUG_LOG("%s: end of pool area reached\n", btrp
->name
);
184 end
= btrp
->area_start
;
186 if (end
== btrp
->rhead
) {
187 PARA_DEBUG_LOG("%s btrp buffer full\n", btrp
->name
);
188 end
= NULL
; /* buffer full */
193 static void btr_pool_deallocate(struct btr_pool
*btrp
, size_t size
)
195 char *end
= btrp
->rhead
+ size
;
199 assert(end
<= btrp
->area_end
);
200 assert(size
<= btr_pool_filled(btrp
));
201 if (end
== btrp
->area_end
)
202 end
= btrp
->area_start
;
204 btrp
->whead
= btrp
->rhead
;
206 if (btrp
->rhead
== btrp
->whead
)
207 btrp
->rhead
= btrp
->whead
= btrp
->area_start
;
210 #define FOR_EACH_CHILD(_tn, _btrn) list_for_each_entry((_tn), \
211 &((_btrn)->children), node)
212 #define FOR_EACH_CHILD_SAFE(_tn, _tmp, _btrn) \
213 list_for_each_entry_safe((_tn), (_tmp), &((_btrn)->children), node)
215 #define FOR_EACH_BUFFER_REF(_br, _btrn) \
216 list_for_each_entry((_br), &(_btrn)->input_queue, node)
217 #define FOR_EACH_BUFFER_REF_SAFE(_br, _tmp, _btrn) \
218 list_for_each_entry_safe((_br), (_tmp), &(_btrn)->input_queue, node)
221 * Create a new buffer tree node.
223 * \param bnd Specifies how to create the new node.
225 * This function always succeeds (or calls exit()). The returned pointer
226 * must be freed using btr_free_node() after it has been removed from
227 * the buffer tree via btr_remove_node().
229 struct btr_node
*btr_new_node(struct btr_node_description
*bnd
)
231 struct btr_node
*btrn
= para_malloc(sizeof(*btrn
));
233 btrn
->name
= para_strdup(bnd
->name
);
234 btrn
->parent
= bnd
->parent
;
235 btrn
->execute
= bnd
->handler
;
236 btrn
->context
= bnd
->context
;
237 btrn
->start
.tv_sec
= 0;
238 btrn
->start
.tv_usec
= 0;
239 INIT_LIST_HEAD(&btrn
->children
);
240 INIT_LIST_HEAD(&btrn
->input_queue
);
243 list_add_tail(&btrn
->node
, &bnd
->parent
->children
);
244 PARA_INFO_LOG("new leaf node: %s (child of %s)\n",
245 bnd
->name
, bnd
->parent
->name
);
247 PARA_INFO_LOG("added %s as btr root\n", bnd
->name
);
251 assert(!bnd
->child
->parent
);
252 PARA_INFO_LOG("new root: %s (was %s)\n",
253 bnd
->name
, bnd
->child
->name
);
255 list_add_tail(&bnd
->child
->node
, &btrn
->children
);
257 bnd
->child
->parent
= btrn
;
260 PARA_EMERG_LOG("inserting internal nodes not yet supported.\n");
262 assert(bnd
->child
->parent
== bnd
->parent
);
268 * Allocate a new btr buffer.
270 * The freshly allocated buffer will have a zero refcount and will
271 * not be associated with a btr pool.
273 static struct btr_buffer
*new_btrb(char *buf
, size_t size
)
275 struct btr_buffer
*btrb
= para_calloc(sizeof(*btrb
));
282 static void dealloc_buffer(struct btr_buffer
*btrb
)
285 btr_pool_deallocate(btrb
->pool
, btrb
->size
);
290 static struct btr_buffer_reference
*get_first_input_br(struct btr_node
*btrn
)
292 if (list_empty(&btrn
->input_queue
))
294 return list_first_entry(&btrn
->input_queue
,
295 struct btr_buffer_reference
, node
);
299 * Deallocate the reference, release the resources if refcount drops to zero.
301 static void btr_drop_buffer_reference(struct btr_buffer_reference
*br
)
303 struct btr_buffer
*btrb
= br
->btrb
;
308 if (btrb
->refcount
== 0) {
309 dealloc_buffer(btrb
);
314 static void add_btrb_to_children(struct btr_buffer
*btrb
,
315 struct btr_node
*btrn
, size_t consumed
)
319 if (btrn
->start
.tv_sec
== 0)
321 FOR_EACH_CHILD(ch
, btrn
) {
322 struct btr_buffer_reference
*br
= para_calloc(sizeof(*br
));
324 br
->consumed
= consumed
;
325 list_add_tail(&br
->node
, &ch
->input_queue
);
327 if (ch
->start
.tv_sec
== 0)
333 * Insert a malloced buffer into the buffer tree.
335 * \param buf The buffer to insert.
336 * \param size The size of \a buf in bytes.
337 * \param btrn Position in the buffer tree to create the output.
339 * This creates references to \a buf and adds these references to each child of
340 * \a btrn. The buffer will be freed using standard free() once no buffer tree
341 * node is referencing it any more.
343 * Note that this function must not be used if \a buf was obtained from a
344 * buffer pool. Use btr_add_output_pool() in this case.
346 void btr_add_output(char *buf
, size_t size
, struct btr_node
*btrn
)
348 struct btr_buffer
*btrb
;
351 if (list_empty(&btrn
->children
)) {
355 btrb
= new_btrb(buf
, size
);
356 add_btrb_to_children(btrb
, btrn
, 0);
360 * Feed data to child nodes of a buffer tree node.
362 * \param btrp The buffer pool.
363 * \param size The number of bytes to be allocated and fed to each child.
364 * \param btrn The node whose children are to be fed.
366 * This function allocates the amount of bytes from the buffer pool area,
367 * starting at the current value of the write head, and creates buffer
368 * references to the resulting part of the buffer pool area, one for each child
369 * of \a btrn. The references are then fed into the input queue of each child.
371 void btr_add_output_pool(struct btr_pool
*btrp
, size_t size
,
372 struct btr_node
*btrn
)
374 struct btr_buffer
*btrb
;
379 if (list_empty(&btrn
->children
))
381 avail
= btr_pool_get_buffer(btrp
, &buf
);
382 assert(avail
>= size
);
383 btr_pool_allocate(btrp
, size
);
384 btrb
= new_btrb(buf
, size
);
386 add_btrb_to_children(btrb
, btrn
, 0);
390 * Copy data to write head of a buffer pool and feed it to all children nodes.
392 * \param src The source buffer.
393 * \param n The size of the source buffer in bytes.
394 * \param btrp The destination buffer pool.
395 * \param btrn Add the data as output of this node.
397 * This is expensive. The caller must make sure the data fits into the buffer
400 void btr_copy(const void *src
, size_t n
, struct btr_pool
*btrp
,
401 struct btr_node
*btrn
)
408 assert(n
<= btr_pool_unused(btrp
));
409 sz
= btr_pool_get_buffer(btrp
, &buf
);
410 copy
= PARA_MIN(sz
, n
);
411 memcpy(buf
, src
, copy
);
412 btr_add_output_pool(btrp
, copy
, btrn
);
415 sz
= btr_pool_get_buffer(btrp
, &buf
);
416 assert(sz
>= n
- copy
);
417 memcpy(buf
, src
+ copy
, n
- copy
);
418 btr_add_output_pool(btrp
, n
- copy
, btrn
);
421 static void btr_pushdown_br(struct btr_buffer_reference
*br
, struct btr_node
*btrn
)
423 add_btrb_to_children(br
->btrb
, btrn
, br
->consumed
);
424 btr_drop_buffer_reference(br
);
427 void btr_pushdown(struct btr_node
*btrn
)
429 struct btr_buffer_reference
*br
, *tmp
;
431 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
)
432 btr_pushdown_br(br
, btrn
);
435 int btr_pushdown_one(struct btr_node
*btrn
)
437 struct btr_buffer_reference
*br
;
439 if (list_empty(&btrn
->input_queue
))
441 br
= list_first_entry(&btrn
->input_queue
, struct btr_buffer_reference
, node
);
442 btr_pushdown_br(br
, btrn
);
447 * Find out whether a node is a leaf node.
449 * \param btrn The node to check.
451 * \return True if this node has no children. False otherwise.
453 static bool btr_no_children(struct btr_node
*btrn
)
455 return list_empty(&btrn
->children
);
459 * Find out whether a node is an orphan node.
461 * \param btrn The buffer tree node.
463 * \return True if \a btrn has no parent.
465 * This function will always return true for the root node. However in case
466 * nodes have been removed from the tree, other nodes may become orphans too.
468 bool btr_no_parent(struct btr_node
*btrn
)
470 return !btrn
->parent
;
473 bool btr_inplace_ok(struct btr_node
*btrn
)
477 return list_is_singular(&btrn
->parent
->children
);
480 static inline size_t br_available_bytes(struct btr_buffer_reference
*br
)
482 return br
->btrb
->size
- br
->consumed
;
485 size_t btr_get_buffer_by_reference(struct btr_buffer_reference
*br
, char **buf
)
488 *buf
= br
->btrb
->buf
+ br
->consumed
;
489 return br_available_bytes(br
);
493 * Obtain the next buffer of the input queue of a buffer tree node.
495 * \param btrn The node whose input queue is to be queried.
496 * \param bufp Result pointer.
498 * \return The number of bytes that can be read from buf. Zero if the input
499 * buffer queue is empty. In this case the value of \a bufp is undefined.
501 size_t btr_next_buffer(struct btr_node
*btrn
, char **bufp
)
503 struct btr_buffer_reference
*br
;
504 char *buf
, *result
= NULL
;
507 FOR_EACH_BUFFER_REF(br
, btrn
) {
508 sz
= btr_get_buffer_by_reference(br
, &buf
);
518 if (result
+ rv
!= buf
)
528 * Deallocate the given number of bytes from the input queue.
530 * \param btrn The buffer tree node.
531 * \param numbytes The number of bytes to be deallocated.
533 * This function must be used to get rid of existing buffer references in the
534 * node's input queue. If no references to a buffer remain, the underlying
535 * buffers are either freed (in the non-buffer tree case) or the read head of
536 * the buffer pool is being advanced.
538 * Note that \a numbytes may be smaller than the buffer size. In this case the
539 * buffer is not deallocated and subsequent calls to btr_next_buffer() return
540 * the remaining part of the buffer.
542 void btr_consume(struct btr_node
*btrn
, size_t numbytes
)
544 struct btr_buffer_reference
*br
, *tmp
;
549 br
= get_first_input_br(btrn
);
552 if (br
->wrap_count
== 0) {
554 * No wrap buffer. Drop buffer references whose buffer
555 * has been fully used. */
556 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
) {
557 if (br
->consumed
+ numbytes
<= br
->btrb
->size
) {
558 br
->consumed
+= numbytes
;
559 if (br
->consumed
== br
->btrb
->size
)
560 btr_drop_buffer_reference(br
);
563 numbytes
-= br
->btrb
->size
- br
->consumed
;
564 btr_drop_buffer_reference(br
);
569 * We have a wrap buffer, consume from it. If in total, i.e. including
570 * previous calls to brt_consume(), less than wrap_count has been
571 * consumed, there's nothing more we can do.
573 * Otherwise we drop the wrap buffer and consume from subsequent
574 * buffers of the input queue the correct amount of bytes. This is the
575 * total number of bytes that have been consumed from the wrap buffer.
577 PARA_DEBUG_LOG("consuming %zu/%zu bytes from wrap buffer\n", numbytes
,
578 br_available_bytes(br
));
580 assert(numbytes
<= br_available_bytes(br
));
581 if (br
->consumed
+ numbytes
< br
->wrap_count
) {
582 br
->consumed
+= numbytes
;
585 PARA_DEBUG_LOG("dropping wrap buffer (%zu bytes)\n", br
->btrb
->size
);
586 /* get rid of the wrap buffer */
587 sz
= br
->consumed
+ numbytes
;
588 btr_drop_buffer_reference(br
);
589 return btr_consume(btrn
, sz
);
592 static void flush_input_queue(struct btr_node
*btrn
)
594 struct btr_buffer_reference
*br
, *tmp
;
595 FOR_EACH_BUFFER_REF_SAFE(br
, tmp
, btrn
)
596 btr_drop_buffer_reference(br
);
599 void btr_free_node(struct btr_node
*btrn
)
607 void btr_remove_node(struct btr_node
*btrn
)
613 PARA_NOTICE_LOG("removing btr node %s from buffer tree\n", btrn
->name
);
614 FOR_EACH_CHILD(ch
, btrn
)
616 flush_input_queue(btrn
);
618 list_del(&btrn
->node
);
622 * Return the amount of available input bytes of a buffer tree node.
624 * \param btrn The node whose input size should be computed.
626 * \return The total number of bytes available in the node's input
629 * This simply iterates over all buffer references in the input queue and
630 * returns the sum of the sizes of all references.
632 size_t btr_get_input_queue_size(struct btr_node
*btrn
)
634 struct btr_buffer_reference
*br
;
635 size_t size
= 0, wrap_consumed
= 0;
637 FOR_EACH_BUFFER_REF(br
, btrn
) {
638 if (br
->wrap_count
!= 0) {
639 wrap_consumed
= br
->consumed
;
642 size
+= br_available_bytes(br
);
644 assert(wrap_consumed
<= size
);
645 size
-= wrap_consumed
;
649 void btr_splice_out_node(struct btr_node
*btrn
)
651 struct btr_node
*ch
, *tmp
;
654 PARA_NOTICE_LOG("splicing out %s\n", btrn
->name
);
657 list_del(&btrn
->node
);
658 FOR_EACH_CHILD_SAFE(ch
, tmp
, btrn
) {
659 PARA_INFO_LOG("parent(%s): %s\n", ch
->name
,
660 btrn
->parent
? btrn
->parent
->name
: "NULL");
661 ch
->parent
= btrn
->parent
;
663 list_move(&ch
->node
, &btrn
->parent
->children
);
665 assert(list_empty(&btrn
->children
));
669 * Return the size of the largest input queue.
671 * Iterates over all children of the given node.
673 static size_t btr_bytes_pending(struct btr_node
*btrn
)
678 FOR_EACH_CHILD(ch
, btrn
) {
679 size_t size
= btr_get_input_queue_size(ch
);
680 max_size
= PARA_MAX(max_size
, size
);
685 int btr_exec(struct btr_node
*btrn
, const char *command
, char **value_result
)
688 return -ERRNO_TO_PARA_ERROR(EINVAL
);
690 return -ERRNO_TO_PARA_ERROR(ENOTSUP
);
691 return btrn
->execute(btrn
, command
, value_result
);
695 * Execute a inter-node command.
697 int btr_exec_up(struct btr_node
*btrn
, const char *command
, char **value_result
)
701 for (; btrn
; btrn
= btrn
->parent
) {
702 struct btr_node
*parent
= btrn
->parent
;
704 return -ERRNO_TO_PARA_ERROR(ENOTSUP
);
705 if (!parent
->execute
)
707 PARA_INFO_LOG("parent: %s, cmd: %s\n", parent
->name
, command
);
708 ret
= parent
->execute(parent
, command
, value_result
);
709 if (ret
== -ERRNO_TO_PARA_ERROR(ENOTSUP
))
713 if (value_result
&& *value_result
)
714 PARA_NOTICE_LOG("%s(%s): %s\n", command
, parent
->name
,
718 return -ERRNO_TO_PARA_ERROR(ENOTSUP
);
721 void *btr_context(struct btr_node
*btrn
)
723 return btrn
->context
;
726 static bool need_buffer_pool_merge(struct btr_node
*btrn
)
728 struct btr_buffer_reference
*br
= get_first_input_br(btrn
);
732 if (br
->wrap_count
!= 0)
739 static void merge_input_pool(struct btr_node
*btrn
, size_t dest_size
)
741 struct btr_buffer_reference
*br
, *wbr
= NULL
;
742 int num_refs
; /* including wrap buffer */
743 char *buf
, *buf1
= NULL
, *buf2
= NULL
;
744 size_t sz
, sz1
= 0, sz2
= 0, wsz
;
746 br
= get_first_input_br(btrn
);
747 if (!br
|| br_available_bytes(br
) >= dest_size
)
750 FOR_EACH_BUFFER_REF(br
, btrn
) {
752 sz
= btr_get_buffer_by_reference(br
, &buf
);
755 if (br
->wrap_count
!= 0) {
757 assert(num_refs
== 1);
768 if (buf1
+ sz1
== buf
) {
777 assert(buf2
+ sz2
== buf
);
780 if (sz1
+ sz2
>= dest_size
)
783 if (!buf2
) /* nothing to do */
785 assert(buf1
&& sz2
> 0);
787 * If the second buffer is large, we only take the first part of it to
788 * avoid having to memcpy() huge buffers.
790 sz2
= PARA_MIN(sz2
, (size_t)(64 * 1024));
792 /* Make a new wrap buffer combining buf1 and buf2. */
794 buf
= para_malloc(sz
);
795 PARA_DEBUG_LOG("merging input buffers: (%p:%zu, %p:%zu) -> %p:%zu\n",
796 buf1
, sz1
, buf2
, sz2
, buf
, sz
);
797 memcpy(buf
, buf1
, sz1
);
798 memcpy(buf
+ sz1
, buf2
, sz2
);
799 br
= para_calloc(sizeof(*br
));
800 br
->btrb
= new_btrb(buf
, sz
);
801 br
->btrb
->refcount
= 1;
803 /* This is a wrap buffer */
804 br
->wrap_count
= sz1
;
805 para_list_add(&br
->node
, &btrn
->input_queue
);
808 PARA_DEBUG_LOG("increasing wrap buffer, sz1: %zu, sz2: %zu\n", sz1
, sz2
);
810 * We already have a wrap buffer, but it is too small. It might be
813 wsz
= br_available_bytes(wbr
);
814 if (wbr
->wrap_count
== sz1
&& wbr
->btrb
->size
>= sz1
+ sz2
) /* nothing we can do about it */
816 sz
= sz1
+ sz2
- wbr
->btrb
->size
; /* amount of new data */
817 wbr
->btrb
->size
+= sz
;
818 wbr
->btrb
->buf
= para_realloc(wbr
->btrb
->buf
, wbr
->btrb
->size
);
819 /* copy the new data to the end of the reallocated buffer */
821 memcpy(wbr
->btrb
->buf
+ wbr
->btrb
->size
- sz
, buf2
+ sz2
- sz
, sz
);
825 * Merge the first two input buffers into one.
827 * This is a quite expensive operation.
829 * \return The number of buffers that have been available (zero, one or two).
831 static int merge_input(struct btr_node
*btrn
)
833 struct btr_buffer_reference
*brs
[2], *br
;
838 if (list_empty(&btrn
->input_queue
))
840 if (list_is_singular(&btrn
->input_queue
))
843 /* get references to the first two buffers */
844 FOR_EACH_BUFFER_REF(br
, btrn
) {
846 szs
[i
] = btr_get_buffer_by_reference(brs
[i
], bufs
+ i
);
851 /* make a new btrb that combines the two buffers and a br to it. */
852 sz
= szs
[0] + szs
[1];
853 buf
= para_malloc(sz
);
854 PARA_DEBUG_LOG("%s: memory merging input buffers: (%zu, %zu) -> %zu\n",
855 btrn
->name
, szs
[0], szs
[1], sz
);
856 memcpy(buf
, bufs
[0], szs
[0]);
857 memcpy(buf
+ szs
[0], bufs
[1], szs
[1]);
859 br
= para_calloc(sizeof(*br
));
860 br
->btrb
= new_btrb(buf
, sz
);
861 br
->btrb
->refcount
= 1;
863 /* replace the first two refs by the new one */
864 btr_drop_buffer_reference(brs
[0]);
865 btr_drop_buffer_reference(brs
[1]);
866 para_list_add(&br
->node
, &btrn
->input_queue
);
870 void btr_merge(struct btr_node
*btrn
, size_t dest_size
)
872 if (need_buffer_pool_merge(btrn
))
873 return merge_input_pool(btrn
, dest_size
);
876 size_t len
= btr_next_buffer(btrn
, &buf
);
877 if (len
>= dest_size
)
879 PARA_DEBUG_LOG("input size = %zu < %zu = dest\n", len
, dest_size
);
880 if (merge_input(btrn
) < 2)
885 bool btr_eof(struct btr_node
*btrn
)
888 size_t len
= btr_next_buffer(btrn
, &buf
);
890 return (len
== 0 && btr_no_parent(btrn
));
893 void log_tree_recursively(struct btr_node
*btrn
, int loglevel
, int depth
)
896 const char spaces
[] = " ", *space
= spaces
+ 16 - depth
;
900 para_log(loglevel
, "%s%s\n", space
, btrn
->name
);
901 FOR_EACH_CHILD(ch
, btrn
)
902 log_tree_recursively(ch
, loglevel
, depth
+ 1);
905 void btr_log_tree(struct btr_node
*btrn
, int loglevel
)
907 return log_tree_recursively(btrn
, loglevel
, 0);
911 * \return \a root if \a name is \p NULL.
913 struct btr_node
*btr_search_node(const char *name
, struct btr_node
*root
)
919 if (!strcmp(root
->name
, name
))
921 FOR_EACH_CHILD(ch
, root
) {
922 struct btr_node
*result
= btr_search_node(name
, ch
);
929 /** 640K ought to be enough for everybody ;) */
930 #define BTRN_MAX_PENDING (640 * 1024)
932 int btr_node_status(struct btr_node
*btrn
, size_t min_iqs
,
933 enum btr_node_type type
)
938 if (type
!= BTR_NT_LEAF
) {
939 if (btr_no_children(btrn
))
940 return -E_BTR_NO_CHILD
;
941 if (btr_bytes_pending(btrn
) > BTRN_MAX_PENDING
)
944 if (type
!= BTR_NT_ROOT
) {
947 iqs
= btr_get_input_queue_size(btrn
);
948 if (iqs
== 0) /* we have a parent, because not eof */
950 if (iqs
< min_iqs
&& !btr_no_parent(btrn
))
956 void btr_get_node_start(struct btr_node
*btrn
, struct timeval
*tv
)