[btr] Only print debug message if we're really increasing the wrap buffer.
[paraslash.git] / buffer_tree.c
1 #include <regex.h>
2 #include <stdbool.h>
3
4 #include "para.h"
5 #include "list.h"
6 #include "string.h"
7 #include "buffer_tree.h"
8 #include "error.h"
9 #include "sched.h"
10
11 /* whead = NULL means area full */
12 struct btr_pool {
13 char *name;
14 char *area_start;
15 char *area_end;
16 char *rhead;
17 char *whead;
18 };
19
20 struct btr_buffer {
21 char *buf;
22 size_t size;
23 /** The number of references to this buffer. */
24 int refcount;
25 /* NULL means no buffer pool but a malloced buffer. */
26 struct btr_pool *pool;
27 };
28
29 struct btr_buffer_reference {
30 struct btr_buffer *btrb;
31 size_t consumed;
32 /* Each buffer reference belongs to the buffer queue list of some buffer tree node. */
33 struct list_head node;
34 size_t wrap_count;
35 };
36
37 struct btr_node {
38 char *name;
39 struct btr_node *parent;
40 /* The position of this btr node in the buffer tree. */
41 struct list_head node;
42 /* The children nodes of this btr node are linked together in a list. */
43 struct list_head children;
44 /* Time of first data transfer. */
45 struct timeval start;
46 /**
47 * The input queue is a list of references to btr buffers. Each item on
48 * the list represents an input buffer which has not been completely
49 * used by this btr node.
50 */
51 struct list_head input_queue;
52 btr_command_handler execute;
53 void *context;
54 };
55
56 /**
57 * Create a new buffer pool.
58 *
59 * \param name The name of the new buffer pool.
60 *
61 * \param area The size in bytes of the pool area.
62 *
63 * \return An opaque pointer to the newly created buffer pool. It must be
64 * passed to btr_pool_free() after it is no longer used to deallocate all
65 * resources.
66 */
67 struct btr_pool *btr_pool_new(const char *name, size_t area_size)
68 {
69 struct btr_pool *btrp;
70
71 PARA_INFO_LOG("%s, %zu bytes\n", name, area_size);
72 btrp = para_malloc(sizeof(*btrp));
73 btrp->area_start = para_malloc(area_size);
74 btrp->area_end = btrp->area_start + area_size;
75 btrp->rhead = btrp->area_start;
76 btrp->whead = btrp->area_start;
77 btrp->name = para_strdup(name);
78 return btrp;
79 }
80
81 /**
82 * Dellocate resources used by a buffer pool.
83 *
84 * \param btrp A pointer obtained via btr_pool_new().
85 */
86 void btr_pool_free(struct btr_pool *btrp)
87 {
88 if (!btrp)
89 return;
90 free(btrp->area_start);
91 free(btrp->name);
92 free(btrp);
93 }
94
95 /**
96 * Return the size of the buffer pool area.
97 *
98 * \param btrp The buffer pool.
99 *
100 * \return The same value which was passed during creation time to
101 * btr_pool_new().
102 */
103 size_t btr_pool_size(struct btr_pool *btrp)
104 {
105 return btrp->area_end - btrp->area_start;
106 }
107
108 size_t btr_pool_filled(struct btr_pool *btrp)
109 {
110 if (!btrp->whead)
111 return btr_pool_size(btrp);
112 if (btrp->rhead <= btrp->whead)
113 return btrp->whead - btrp->rhead;
114 return btr_pool_size(btrp) - (btrp->rhead - btrp->whead);
115 }
116
117 /**
118 * Get the number of unused bytes in the buffer pool.
119 *
120 * \param btrp The pool.
121 *
122 * \return The number of bytes that can currently be allocated.
123 *
124 * Note that in general the returned number of bytes is not available as a
125 * single contiguous buffer. Use btr_pool_available() to obtain the length of
126 * the largest contiguous buffer that can currently be allocated from the
127 * buffer pool.
128 */
129 size_t btr_pool_unused(struct btr_pool *btrp)
130 {
131 return btr_pool_size(btrp) - btr_pool_filled(btrp);
132 }
133
134 /*
135 * Return maximal size available for one read. This is
136 * smaller than the value returned by btr_pool_unused().
137 */
138 size_t btr_pool_available(struct btr_pool *btrp)
139 {
140 if (!btrp->whead)
141 return 0;
142 if (btrp->rhead <= btrp->whead)
143 return btrp->area_end - btrp->whead;
144 return btrp->rhead - btrp->whead;
145 }
146
147 /**
148 * Obtain the current write head.
149 *
150 * \param btrp The buffer pool.
151 * \param result The write head is returned here.
152 *
153 * \return The maximal amount of bytes that may be written to the returned
154 * buffer.
155 */
156 size_t btr_pool_get_buffer(struct btr_pool *btrp, char **result)
157 {
158 if (result)
159 *result = btrp->whead;
160 return btr_pool_available(btrp);
161 }
162
163 /**
164 * Mark a part of the buffer pool area as allocated.
165 *
166 * \param btrp The buffer pool.
167 * \param size The amount of bytes to be allocated.
168 *
169 * This is usually called after the caller wrote to the buffer obtained by
170 * btr_pool_get_buffer().
171 */
172 static void btr_pool_allocate(struct btr_pool *btrp, size_t size)
173 {
174 char *end;
175
176 if (size == 0)
177 return;
178 assert(size <= btr_pool_available(btrp));
179 end = btrp->whead + size;
180 assert(end <= btrp->area_end);
181
182 if (end == btrp->area_end) {
183 PARA_DEBUG_LOG("%s: end of pool area reached\n", btrp->name);
184 end = btrp->area_start;
185 }
186 if (end == btrp->rhead) {
187 PARA_DEBUG_LOG("%s btrp buffer full\n", btrp->name);
188 end = NULL; /* buffer full */
189 }
190 btrp->whead = end;
191 }
192
193 static void btr_pool_deallocate(struct btr_pool *btrp, size_t size)
194 {
195 char *end = btrp->rhead + size;
196
197 if (size == 0)
198 return;
199 assert(end <= btrp->area_end);
200 assert(size <= btr_pool_filled(btrp));
201 if (end == btrp->area_end)
202 end = btrp->area_start;
203 if (!btrp->whead)
204 btrp->whead = btrp->rhead;
205 btrp->rhead = end;
206 if (btrp->rhead == btrp->whead)
207 btrp->rhead = btrp->whead = btrp->area_start;
208 }
209
210 #define FOR_EACH_CHILD(_tn, _btrn) list_for_each_entry((_tn), \
211 &((_btrn)->children), node)
212 #define FOR_EACH_CHILD_SAFE(_tn, _tmp, _btrn) \
213 list_for_each_entry_safe((_tn), (_tmp), &((_btrn)->children), node)
214
215 #define FOR_EACH_BUFFER_REF(_br, _btrn) \
216 list_for_each_entry((_br), &(_btrn)->input_queue, node)
217 #define FOR_EACH_BUFFER_REF_SAFE(_br, _tmp, _btrn) \
218 list_for_each_entry_safe((_br), (_tmp), &(_btrn)->input_queue, node)
219
220 /**
221 * Create a new buffer tree node.
222 *
223 * \param bnd Specifies how to create the new node.
224 *
225 * This function always succeeds (or calls exit()). The returned pointer
226 * must be freed using btr_free_node() after it has been removed from
227 * the buffer tree via btr_remove_node().
228 */
229 struct btr_node *btr_new_node(struct btr_node_description *bnd)
230 {
231 struct btr_node *btrn = para_malloc(sizeof(*btrn));
232
233 btrn->name = para_strdup(bnd->name);
234 btrn->parent = bnd->parent;
235 btrn->execute = bnd->handler;
236 btrn->context = bnd->context;
237 btrn->start.tv_sec = 0;
238 btrn->start.tv_usec = 0;
239 INIT_LIST_HEAD(&btrn->children);
240 INIT_LIST_HEAD(&btrn->input_queue);
241 if (!bnd->child) {
242 if (bnd->parent) {
243 list_add_tail(&btrn->node, &bnd->parent->children);
244 PARA_INFO_LOG("new leaf node: %s (child of %s)\n",
245 bnd->name, bnd->parent->name);
246 } else
247 PARA_INFO_LOG("added %s as btr root\n", bnd->name);
248 goto out;
249 }
250 if (!bnd->parent) {
251 assert(!bnd->child->parent);
252 PARA_INFO_LOG("new root: %s (was %s)\n",
253 bnd->name, bnd->child->name);
254 btrn->parent = NULL;
255 list_add_tail(&bnd->child->node, &btrn->children);
256 /* link it in */
257 bnd->child->parent = btrn;
258 goto out;
259 }
260 PARA_EMERG_LOG("inserting internal nodes not yet supported.\n");
261 exit(EXIT_FAILURE);
262 assert(bnd->child->parent == bnd->parent);
263 out:
264 return btrn;
265 }
266
267 /*
268 * Allocate a new btr buffer.
269 *
270 * The freshly allocated buffer will have a zero refcount and will
271 * not be associated with a btr pool.
272 */
273 static struct btr_buffer *new_btrb(char *buf, size_t size)
274 {
275 struct btr_buffer *btrb = para_calloc(sizeof(*btrb));
276
277 btrb->buf = buf;
278 btrb->size = size;
279 return btrb;
280 }
281
282 static void dealloc_buffer(struct btr_buffer *btrb)
283 {
284 if (btrb->pool)
285 btr_pool_deallocate(btrb->pool, btrb->size);
286 else
287 free(btrb->buf);
288 }
289
290 static struct btr_buffer_reference *get_first_input_br(struct btr_node *btrn)
291 {
292 if (list_empty(&btrn->input_queue))
293 return NULL;
294 return list_first_entry(&btrn->input_queue,
295 struct btr_buffer_reference, node);
296 }
297
298 /*
299 * Deallocate the reference, release the resources if refcount drops to zero.
300 */
301 static void btr_drop_buffer_reference(struct btr_buffer_reference *br)
302 {
303 struct btr_buffer *btrb = br->btrb;
304
305 list_del(&br->node);
306 free(br);
307 btrb->refcount--;
308 if (btrb->refcount == 0) {
309 dealloc_buffer(btrb);
310 free(btrb);
311 }
312 }
313
314 static void add_btrb_to_children(struct btr_buffer *btrb,
315 struct btr_node *btrn, size_t consumed)
316 {
317 struct btr_node *ch;
318
319 if (btrn->start.tv_sec == 0)
320 btrn->start = *now;
321 FOR_EACH_CHILD(ch, btrn) {
322 struct btr_buffer_reference *br = para_calloc(sizeof(*br));
323 br->btrb = btrb;
324 br->consumed = consumed;
325 list_add_tail(&br->node, &ch->input_queue);
326 btrb->refcount++;
327 if (ch->start.tv_sec == 0)
328 ch->start = *now;
329 }
330 }
331
332 /**
333 * Insert a malloced buffer into the buffer tree.
334 *
335 * \param buf The buffer to insert.
336 * \param size The size of \a buf in bytes.
337 * \param btrn Position in the buffer tree to create the output.
338 *
339 * This creates references to \a buf and adds these references to each child of
340 * \a btrn. The buffer will be freed using standard free() once no buffer tree
341 * node is referencing it any more.
342 *
343 * Note that this function must not be used if \a buf was obtained from a
344 * buffer pool. Use btr_add_output_pool() in this case.
345 */
346 void btr_add_output(char *buf, size_t size, struct btr_node *btrn)
347 {
348 struct btr_buffer *btrb;
349
350 assert(size != 0);
351 if (list_empty(&btrn->children)) {
352 free(buf);
353 return;
354 }
355 btrb = new_btrb(buf, size);
356 add_btrb_to_children(btrb, btrn, 0);
357 }
358
359 /**
360 * Feed data to child nodes of a buffer tree node.
361 *
362 * \param btrp The buffer pool.
363 * \param size The number of bytes to be allocated and fed to each child.
364 * \param btrn The node whose children are to be fed.
365 *
366 * This function allocates the amount of bytes from the buffer pool area,
367 * starting at the current value of the write head, and creates buffer
368 * references to the resulting part of the buffer pool area, one for each child
369 * of \a btrn. The references are then fed into the input queue of each child.
370 */
371 void btr_add_output_pool(struct btr_pool *btrp, size_t size,
372 struct btr_node *btrn)
373 {
374 struct btr_buffer *btrb;
375 char *buf;
376 size_t avail;
377
378 assert(size != 0);
379 if (list_empty(&btrn->children))
380 return;
381 avail = btr_pool_get_buffer(btrp, &buf);
382 assert(avail >= size);
383 btr_pool_allocate(btrp, size);
384 btrb = new_btrb(buf, size);
385 btrb->pool = btrp;
386 add_btrb_to_children(btrb, btrn, 0);
387 }
388
389 /**
390 * Copy data to write head of a buffer pool and feed it to all children nodes.
391 *
392 * \param src The source buffer.
393 * \param n The size of the source buffer in bytes.
394 * \param btrp The destination buffer pool.
395 * \param btrn Add the data as output of this node.
396 *
397 * This is expensive. The caller must make sure the data fits into the buffer
398 * pool area.
399 */
400 void btr_copy(const void *src, size_t n, struct btr_pool *btrp,
401 struct btr_node *btrn)
402 {
403 char *buf;
404 size_t sz, copy;
405
406 if (n == 0)
407 return;
408 assert(n <= btr_pool_unused(btrp));
409 sz = btr_pool_get_buffer(btrp, &buf);
410 copy = PARA_MIN(sz, n);
411 memcpy(buf, src, copy);
412 btr_add_output_pool(btrp, copy, btrn);
413 if (copy == n)
414 return;
415 sz = btr_pool_get_buffer(btrp, &buf);
416 assert(sz >= n - copy);
417 memcpy(buf, src + copy, n - copy);
418 btr_add_output_pool(btrp, n - copy, btrn);
419 }
420
421 static void btr_pushdown_br(struct btr_buffer_reference *br, struct btr_node *btrn)
422 {
423 add_btrb_to_children(br->btrb, btrn, br->consumed);
424 btr_drop_buffer_reference(br);
425 }
426
427 void btr_pushdown(struct btr_node *btrn)
428 {
429 struct btr_buffer_reference *br, *tmp;
430
431 FOR_EACH_BUFFER_REF_SAFE(br, tmp, btrn)
432 btr_pushdown_br(br, btrn);
433 }
434
435 int btr_pushdown_one(struct btr_node *btrn)
436 {
437 struct btr_buffer_reference *br;
438
439 if (list_empty(&btrn->input_queue))
440 return 0;
441 br = list_first_entry(&btrn->input_queue, struct btr_buffer_reference, node);
442 btr_pushdown_br(br, btrn);
443 return 1;
444 }
445
446 /*
447 * Find out whether a node is a leaf node.
448 *
449 * \param btrn The node to check.
450 *
451 * \return True if this node has no children. False otherwise.
452 */
453 static bool btr_no_children(struct btr_node *btrn)
454 {
455 return list_empty(&btrn->children);
456 }
457
458 /**
459 * Find out whether a node is an orphan node.
460 *
461 * \param btrn The buffer tree node.
462 *
463 * \return True if \a btrn has no parent.
464 *
465 * This function will always return true for the root node. However in case
466 * nodes have been removed from the tree, other nodes may become orphans too.
467 */
468 bool btr_no_parent(struct btr_node *btrn)
469 {
470 return !btrn->parent;
471 }
472
473 bool btr_inplace_ok(struct btr_node *btrn)
474 {
475 if (!btrn->parent)
476 return true;
477 return list_is_singular(&btrn->parent->children);
478 }
479
480 static inline size_t br_available_bytes(struct btr_buffer_reference *br)
481 {
482 return br->btrb->size - br->consumed;
483 }
484
485 size_t btr_get_buffer_by_reference(struct btr_buffer_reference *br, char **buf)
486 {
487 if (buf)
488 *buf = br->btrb->buf + br->consumed;
489 return br_available_bytes(br);
490 }
491
492 /**
493 * Obtain the next buffer of the input queue of a buffer tree node.
494 *
495 * \param btrn The node whose input queue is to be queried.
496 * \param bufp Result pointer.
497 *
498 * \return The number of bytes that can be read from buf. Zero if the input
499 * buffer queue is empty. In this case the value of \a bufp is undefined.
500 */
501 size_t btr_next_buffer(struct btr_node *btrn, char **bufp)
502 {
503 struct btr_buffer_reference *br;
504 char *buf, *result = NULL;
505 size_t sz, rv = 0;
506
507 FOR_EACH_BUFFER_REF(br, btrn) {
508 sz = btr_get_buffer_by_reference(br, &buf);
509 if (!result) {
510 result = buf;
511 rv = sz;
512 if (!br->btrb->pool)
513 break;
514 continue;
515 }
516 if (!br->btrb->pool)
517 break;
518 if (result + rv != buf)
519 break;
520 rv += sz;
521 }
522 if (bufp)
523 *bufp = result;
524 return rv;
525 }
526
527 /**
528 * Deallocate the given number of bytes from the input queue.
529 *
530 * \param btrn The buffer tree node.
531 * \param numbytes The number of bytes to be deallocated.
532 *
533 * This function must be used to get rid of existing buffer references in the
534 * node's input queue. If no references to a buffer remain, the underlying
535 * buffers are either freed (in the non-buffer tree case) or the read head of
536 * the buffer pool is being advanced.
537 *
538 * Note that \a numbytes may be smaller than the buffer size. In this case the
539 * buffer is not deallocated and subsequent calls to btr_next_buffer() return
540 * the remaining part of the buffer.
541 */
542 void btr_consume(struct btr_node *btrn, size_t numbytes)
543 {
544 struct btr_buffer_reference *br, *tmp;
545 size_t sz;
546
547 if (numbytes == 0)
548 return;
549 br = get_first_input_br(btrn);
550 assert(br);
551
552 if (br->wrap_count == 0) {
553 /*
554 * No wrap buffer. Drop buffer references whose buffer
555 * has been fully used. */
556 FOR_EACH_BUFFER_REF_SAFE(br, tmp, btrn) {
557 if (br->consumed + numbytes <= br->btrb->size) {
558 br->consumed += numbytes;
559 if (br->consumed == br->btrb->size)
560 btr_drop_buffer_reference(br);
561 return;
562 }
563 numbytes -= br->btrb->size - br->consumed;
564 btr_drop_buffer_reference(br);
565 }
566 assert(true);
567 }
568 /*
569 * We have a wrap buffer, consume from it. If in total, i.e. including
570 * previous calls to brt_consume(), less than wrap_count has been
571 * consumed, there's nothing more we can do.
572 *
573 * Otherwise we drop the wrap buffer and consume from subsequent
574 * buffers of the input queue the correct amount of bytes. This is the
575 * total number of bytes that have been consumed from the wrap buffer.
576 */
577 PARA_DEBUG_LOG("consuming %zu/%zu bytes from wrap buffer\n", numbytes,
578 br_available_bytes(br));
579
580 assert(numbytes <= br_available_bytes(br));
581 if (br->consumed + numbytes < br->wrap_count) {
582 br->consumed += numbytes;
583 return;
584 }
585 PARA_DEBUG_LOG("dropping wrap buffer (%zu bytes)\n", br->btrb->size);
586 /* get rid of the wrap buffer */
587 sz = br->consumed + numbytes;
588 btr_drop_buffer_reference(br);
589 return btr_consume(btrn, sz);
590 }
591
592 static void flush_input_queue(struct btr_node *btrn)
593 {
594 struct btr_buffer_reference *br, *tmp;
595 FOR_EACH_BUFFER_REF_SAFE(br, tmp, btrn)
596 btr_drop_buffer_reference(br);
597 }
598
599 void btr_free_node(struct btr_node *btrn)
600 {
601 if (!btrn)
602 return;
603 free(btrn->name);
604 free(btrn);
605 }
606
607 void btr_remove_node(struct btr_node *btrn)
608 {
609 struct btr_node *ch;
610
611 if (!btrn)
612 return;
613 PARA_NOTICE_LOG("removing btr node %s from buffer tree\n", btrn->name);
614 FOR_EACH_CHILD(ch, btrn)
615 ch->parent = NULL;
616 flush_input_queue(btrn);
617 if (btrn->parent)
618 list_del(&btrn->node);
619 }
620
621 /**
622 * Return the amount of available input bytes of a buffer tree node.
623 *
624 * \param btrn The node whose input size should be computed.
625 *
626 * \return The total number of bytes available in the node's input
627 * queue.
628 *
629 * This simply iterates over all buffer references in the input queue and
630 * returns the sum of the sizes of all references.
631 */
632 size_t btr_get_input_queue_size(struct btr_node *btrn)
633 {
634 struct btr_buffer_reference *br;
635 size_t size = 0, wrap_consumed = 0;
636
637 FOR_EACH_BUFFER_REF(br, btrn) {
638 if (br->wrap_count != 0) {
639 wrap_consumed = br->consumed;
640 continue;
641 }
642 size += br_available_bytes(br);
643 }
644 assert(wrap_consumed <= size);
645 size -= wrap_consumed;
646 return size;
647 }
648
649 void btr_splice_out_node(struct btr_node *btrn)
650 {
651 struct btr_node *ch, *tmp;
652
653 assert(btrn);
654 PARA_NOTICE_LOG("splicing out %s\n", btrn->name);
655 btr_pushdown(btrn);
656 if (btrn->parent)
657 list_del(&btrn->node);
658 FOR_EACH_CHILD_SAFE(ch, tmp, btrn) {
659 PARA_INFO_LOG("parent(%s): %s\n", ch->name,
660 btrn->parent? btrn->parent->name : "NULL");
661 ch->parent = btrn->parent;
662 if (btrn->parent)
663 list_move(&ch->node, &btrn->parent->children);
664 }
665 assert(list_empty(&btrn->children));
666 }
667
668 /**
669 * Return the size of the largest input queue.
670 *
671 * Iterates over all children of the given node.
672 */
673 static size_t btr_bytes_pending(struct btr_node *btrn)
674 {
675 size_t max_size = 0;
676 struct btr_node *ch;
677
678 FOR_EACH_CHILD(ch, btrn) {
679 size_t size = btr_get_input_queue_size(ch);
680 max_size = PARA_MAX(max_size, size);
681 }
682 return max_size;
683 }
684
685 int btr_exec(struct btr_node *btrn, const char *command, char **value_result)
686 {
687 if (!btrn)
688 return -ERRNO_TO_PARA_ERROR(EINVAL);
689 if (!btrn->execute)
690 return -ERRNO_TO_PARA_ERROR(ENOTSUP);
691 return btrn->execute(btrn, command, value_result);
692 }
693
694 /**
695 * Execute a inter-node command.
696 */
697 int btr_exec_up(struct btr_node *btrn, const char *command, char **value_result)
698 {
699 int ret;
700
701 for (; btrn; btrn = btrn->parent) {
702 struct btr_node *parent = btrn->parent;
703 if (!parent)
704 return -ERRNO_TO_PARA_ERROR(ENOTSUP);
705 if (!parent->execute)
706 continue;
707 PARA_INFO_LOG("parent: %s, cmd: %s\n", parent->name, command);
708 ret = parent->execute(parent, command, value_result);
709 if (ret == -ERRNO_TO_PARA_ERROR(ENOTSUP))
710 continue;
711 if (ret < 0)
712 return ret;
713 if (value_result && *value_result)
714 PARA_NOTICE_LOG("%s(%s): %s\n", command, parent->name,
715 *value_result);
716 return 1;
717 }
718 return -ERRNO_TO_PARA_ERROR(ENOTSUP);
719 }
720
721 void *btr_context(struct btr_node *btrn)
722 {
723 return btrn->context;
724 }
725
726 static bool need_buffer_pool_merge(struct btr_node *btrn)
727 {
728 struct btr_buffer_reference *br = get_first_input_br(btrn);
729
730 if (!br)
731 return false;
732 if (br->wrap_count != 0)
733 return true;
734 if (br->btrb->pool)
735 return true;
736 return false;
737 }
738
739 static void merge_input_pool(struct btr_node *btrn, size_t dest_size)
740 {
741 struct btr_buffer_reference *br, *wbr = NULL;
742 int num_refs; /* including wrap buffer */
743 char *buf, *buf1 = NULL, *buf2 = NULL;
744 size_t sz, sz1 = 0, sz2 = 0, wsz;
745
746 br = get_first_input_br(btrn);
747 if (!br || br_available_bytes(br) >= dest_size)
748 return;
749 num_refs = 0;
750 FOR_EACH_BUFFER_REF(br, btrn) {
751 num_refs++;
752 sz = btr_get_buffer_by_reference(br, &buf);
753 if (sz == 0)
754 break;
755 if (br->wrap_count != 0) {
756 assert(!wbr);
757 assert(num_refs == 1);
758 wbr = br;
759 if (sz >= dest_size)
760 return;
761 continue;
762 }
763 if (!buf1) {
764 buf1 = buf;
765 sz1 = sz;
766 goto next;
767 }
768 if (buf1 + sz1 == buf) {
769 sz1 += sz;
770 goto next;
771 }
772 if (!buf2) {
773 buf2 = buf;
774 sz2 = sz;
775 goto next;
776 }
777 assert(buf2 + sz2 == buf);
778 sz2 += sz;
779 next:
780 if (sz1 + sz2 >= dest_size)
781 break;
782 }
783 if (!buf2) /* nothing to do */
784 return;
785 assert(buf1 && sz2 > 0);
786 /*
787 * If the second buffer is large, we only take the first part of it to
788 * avoid having to memcpy() huge buffers.
789 */
790 sz2 = PARA_MIN(sz2, (size_t)(64 * 1024));
791 if (!wbr) {
792 /* Make a new wrap buffer combining buf1 and buf2. */
793 sz = sz1 + sz2;
794 buf = para_malloc(sz);
795 PARA_DEBUG_LOG("merging input buffers: (%p:%zu, %p:%zu) -> %p:%zu\n",
796 buf1, sz1, buf2, sz2, buf, sz);
797 memcpy(buf, buf1, sz1);
798 memcpy(buf + sz1, buf2, sz2);
799 br = para_calloc(sizeof(*br));
800 br->btrb = new_btrb(buf, sz);
801 br->btrb->refcount = 1;
802 br->consumed = 0;
803 /* This is a wrap buffer */
804 br->wrap_count = sz1;
805 para_list_add(&br->node, &btrn->input_queue);
806 return;
807 }
808 /*
809 * We already have a wrap buffer, but it is too small. It might be
810 * partially used.
811 */
812 wsz = br_available_bytes(wbr);
813 if (wbr->wrap_count == sz1 && wbr->btrb->size >= sz1 + sz2) /* nothing we can do about it */
814 return;
815 sz = sz1 + sz2 - wbr->btrb->size; /* amount of new data */
816 PARA_DEBUG_LOG("increasing wrap buffer %zu -> %zu\n", wbr->btrb->size,
817 wbr->btrb->size + sz);
818 wbr->btrb->size += sz;
819 wbr->btrb->buf = para_realloc(wbr->btrb->buf, wbr->btrb->size);
820 /* copy the new data to the end of the reallocated buffer */
821 assert(sz2 >= sz);
822 memcpy(wbr->btrb->buf + wbr->btrb->size - sz, buf2 + sz2 - sz, sz);
823 }
824
825 /**
826 * Merge the first two input buffers into one.
827 *
828 * This is a quite expensive operation.
829 *
830 * \return The number of buffers that have been available (zero, one or two).
831 */
832 static int merge_input(struct btr_node *btrn)
833 {
834 struct btr_buffer_reference *brs[2], *br;
835 char *bufs[2], *buf;
836 size_t szs[2], sz;
837 int i;
838
839 if (list_empty(&btrn->input_queue))
840 return 0;
841 if (list_is_singular(&btrn->input_queue))
842 return 1;
843 i = 0;
844 /* get references to the first two buffers */
845 FOR_EACH_BUFFER_REF(br, btrn) {
846 brs[i] = br;
847 szs[i] = btr_get_buffer_by_reference(brs[i], bufs + i);
848 i++;
849 if (i == 2)
850 break;
851 }
852 /* make a new btrb that combines the two buffers and a br to it. */
853 sz = szs[0] + szs[1];
854 buf = para_malloc(sz);
855 PARA_DEBUG_LOG("%s: memory merging input buffers: (%zu, %zu) -> %zu\n",
856 btrn->name, szs[0], szs[1], sz);
857 memcpy(buf, bufs[0], szs[0]);
858 memcpy(buf + szs[0], bufs[1], szs[1]);
859
860 br = para_calloc(sizeof(*br));
861 br->btrb = new_btrb(buf, sz);
862 br->btrb->refcount = 1;
863
864 /* replace the first two refs by the new one */
865 btr_drop_buffer_reference(brs[0]);
866 btr_drop_buffer_reference(brs[1]);
867 para_list_add(&br->node, &btrn->input_queue);
868 return 2;
869 }
870
871 void btr_merge(struct btr_node *btrn, size_t dest_size)
872 {
873 if (need_buffer_pool_merge(btrn))
874 return merge_input_pool(btrn, dest_size);
875 for (;;) {
876 char *buf;
877 size_t len = btr_next_buffer(btrn, &buf);
878 if (len >= dest_size)
879 return;
880 PARA_DEBUG_LOG("input size = %zu < %zu = dest\n", len, dest_size);
881 if (merge_input(btrn) < 2)
882 return;
883 }
884 }
885
886 bool btr_eof(struct btr_node *btrn)
887 {
888 char *buf;
889 size_t len = btr_next_buffer(btrn, &buf);
890
891 return (len == 0 && btr_no_parent(btrn));
892 }
893
894 void log_tree_recursively(struct btr_node *btrn, int loglevel, int depth)
895 {
896 struct btr_node *ch;
897 const char spaces[] = " ", *space = spaces + 16 - depth;
898
899 if (depth > 16)
900 return;
901 para_log(loglevel, "%s%s\n", space, btrn->name);
902 FOR_EACH_CHILD(ch, btrn)
903 log_tree_recursively(ch, loglevel, depth + 1);
904 }
905
906 void btr_log_tree(struct btr_node *btrn, int loglevel)
907 {
908 return log_tree_recursively(btrn, loglevel, 0);
909 }
910
911 /*
912 * \return \a root if \a name is \p NULL.
913 */
914 struct btr_node *btr_search_node(const char *name, struct btr_node *root)
915 {
916 struct btr_node *ch;
917
918 if (!name)
919 return root;
920 if (!strcmp(root->name, name))
921 return root;
922 FOR_EACH_CHILD(ch, root) {
923 struct btr_node *result = btr_search_node(name, ch);
924 if (result)
925 return result;
926 }
927 return NULL;
928 }
929
930 /** 640K ought to be enough for everybody ;) */
931 #define BTRN_MAX_PENDING (640 * 1024)
932
933 int btr_node_status(struct btr_node *btrn, size_t min_iqs,
934 enum btr_node_type type)
935 {
936 size_t iqs;
937
938 assert(btrn);
939 if (type != BTR_NT_LEAF) {
940 if (btr_no_children(btrn))
941 return -E_BTR_NO_CHILD;
942 if (btr_bytes_pending(btrn) > BTRN_MAX_PENDING)
943 return 0;
944 }
945 if (type != BTR_NT_ROOT) {
946 if (btr_eof(btrn))
947 return -E_BTR_EOF;
948 iqs = btr_get_input_queue_size(btrn);
949 if (iqs == 0) /* we have a parent, because not eof */
950 return 0;
951 if (iqs < min_iqs && !btr_no_parent(btrn))
952 return 0;
953 }
954 return 1;
955 }
956
957 void btr_get_node_start(struct btr_node *btrn, struct timeval *tv)
958 {
959 *tv = btrn->start;
960 }