#include "error.h"
#include "sched.h"
+/* whead = NULL means area full */
struct btr_pool {
+ char *name;
char *area_start;
char *area_end;
char *rhead;
void *context;
};
-struct btr_pool *btr_pool_new(size_t area_size)
+/**
+ * Create a new buffer pool.
+ *
+ * \param name The name of the new buffer pool.
+ *
+ * \param area The size in bytes of the pool area.
+ *
+ * \return An opaque pointer to the newly created buffer pool. It must be
+ * passed to btr_pool_free() after it is no longer used to deallocate all
+ * resources.
+ */
+struct btr_pool *btr_pool_new(const char *name, size_t area_size)
{
- struct btr_pool *btrp = para_malloc(sizeof(*btrp));
+ struct btr_pool *btrp;
+ PARA_INFO_LOG("%s, %zu bytes\n", name, area_size);
+ btrp = para_malloc(sizeof(*btrp));
btrp->area_start = para_malloc(area_size);
btrp->area_end = btrp->area_start + area_size;
btrp->rhead = btrp->area_start;
btrp->whead = btrp->area_start;
+ btrp->name = para_strdup(name);
return btrp;
}
-/* whead = NULL means area full */
-
+/**
+ * Dellocate resources used by a buffer pool.
+ *
+ * \param btrp A pointer obtained via btr_pool_new().
+ */
void btr_pool_free(struct btr_pool *btrp)
{
if (!btrp)
return;
free(btrp->area_start);
+ free(btrp->name);
free(btrp);
}
+/**
+ * Return the size of the buffer pool area.
+ *
+ * \param btrp The buffer pool.
+ *
+ * \return The same value which was passed during creation time to
+ * btr_pool_new().
+ */
size_t btr_pool_size(struct btr_pool *btrp)
{
return btrp->area_end - btrp->area_start;
return btr_pool_size(btrp) - btr_pool_filled(btrp);
}
+/*
+ * Return maximal size available for one read. This is
+ * smaller than the value returned by btr_pool_unused().
+ */
size_t btr_pool_available(struct btr_pool *btrp)
{
if (!btrp->whead)
if (size == 0)
return;
- //PARA_CRIT_LOG("filled: %zu, alloc %zu\n", btr_pool_filled(btrp), size);
assert(size <= btr_pool_available(btrp));
end = btrp->whead + size;
assert(end <= btrp->area_end);
if (end == btrp->area_end) {
- PARA_DEBUG_LOG("end of pool area reached: %p\n", end);
+ PARA_DEBUG_LOG("%s: end of pool area reached\n", btrp->name);
end = btrp->area_start;
}
if (end == btrp->rhead) {
- PARA_DEBUG_LOG("btrp buffer full\n");
+ PARA_DEBUG_LOG("%s btrp buffer full\n", btrp->name);
end = NULL; /* buffer full */
}
btrp->whead = end;
- //PARA_CRIT_LOG("filled: %zu\n", btr_pool_filled(btrp));
}
static void btr_pool_deallocate(struct btr_pool *btrp, size_t size)
{
char *end = btrp->rhead + size;
- //PARA_CRIT_LOG("filled: %zu, dealloc %zu\n", btr_pool_filled(btrp), size);
if (size == 0)
return;
assert(end <= btrp->area_end);
btrp->rhead = end;
if (btrp->rhead == btrp->whead)
btrp->rhead = btrp->whead = btrp->area_start;
- //PARA_CRIT_LOG("filled: %zu\n", btr_pool_filled(btrp));
}
#define FOR_EACH_CHILD(_tn, _btrn) list_for_each_entry((_tn), \
#define FOR_EACH_BUFFER_REF_SAFE(_br, _tmp, _btrn) \
list_for_each_entry_safe((_br), (_tmp), &(_btrn)->input_queue, node)
-struct btr_node *btr_new_node(const char *name, struct btr_node *parent,
- btr_command_handler handler, void *context)
+struct btr_node *btr_new_node(struct btr_node_description *bnd)
{
struct btr_node *btrn = para_malloc(sizeof(*btrn));
- btrn->name = para_strdup(name);
- btrn->parent = parent;
- btrn->execute = handler;
- btrn->context = context;
+ btrn->name = para_strdup(bnd->name);
+ btrn->parent = bnd->parent;
+ btrn->execute = bnd->handler;
+ btrn->context = bnd->context;
btrn->start.tv_sec = 0;
btrn->start.tv_usec = 0;
- if (parent)
- list_add_tail(&btrn->node, &parent->children);
INIT_LIST_HEAD(&btrn->children);
INIT_LIST_HEAD(&btrn->input_queue);
- if (parent)
- PARA_INFO_LOG("added %s as child of %s\n", name, parent->name);
- else
- PARA_INFO_LOG("added %s as btr root\n", name);
+ if (!bnd->child) {
+ if (bnd->parent) {
+ list_add_tail(&btrn->node, &bnd->parent->children);
+ PARA_INFO_LOG("new leaf node: %s (child of %s)\n",
+ bnd->name, bnd->parent->name);
+ } else
+ PARA_INFO_LOG("added %s as btr root\n", bnd->name);
+ goto out;
+ }
+ if (!bnd->parent) {
+ assert(!bnd->child->parent);
+ PARA_INFO_LOG("new root: %s (was %s)\n",
+ bnd->name, bnd->child->name);
+ btrn->parent = NULL;
+ list_add_tail(&bnd->child->node, &btrn->children);
+ /* link it in */
+ bnd->child->parent = btrn;
+ goto out;
+ }
+ PARA_EMERG_LOG("inserting internal nodes not yet supported.\n");
+ exit(EXIT_FAILURE);
+ assert(bnd->child->parent == bnd->parent);
+out:
return btrn;
}
/*
* Allocate a new btr buffer.
*
- * The freshly allocated buffer will have a zero refcount.
+ * The freshly allocated buffer will have a zero refcount and will
+ * not be associated with a btr pool.
*/
static struct btr_buffer *new_btrb(char *buf, size_t size)
{
{
struct btr_buffer *btrb = br->btrb;
- //PARA_CRIT_LOG("dropping buffer reference %p\n", br);
list_del(&br->node);
free(br);
btrb->refcount--;
add_btrb_to_children(btrb, btrn, 0);
}
-void btr_add_output_pool(struct btr_pool *btrp, char *buf, size_t size,
- struct btr_node *btrn)
+void btr_add_output_pool(struct btr_pool *btrp, size_t size,
+ struct btr_node *btrn)
{
struct btr_buffer *btrb;
+ char *buf;
+ size_t avail;
assert(size != 0);
- if (list_empty(&btrn->children)) {
- btr_pool_deallocate(btrp, size);
+ if (list_empty(&btrn->children))
return;
- }
+ avail = btr_pool_get_buffer(btrp, &buf);
+ assert(avail >= size);
+ btr_pool_allocate(btrp, size);
btrb = new_btrb(buf, size);
btrb->pool = btrp;
add_btrb_to_children(btrb, btrn, 0);
}
+void btr_copy(const void *src, size_t n, struct btr_pool *btrp,
+ struct btr_node *btrn)
+{
+ char *buf;
+ size_t sz, copy;
+
+ if (n == 0)
+ return;
+ assert(n <= btr_pool_unused(btrp));
+ sz = btr_pool_get_buffer(btrp, &buf);
+ copy = PARA_MIN(sz, n);
+ memcpy(buf, src, copy);
+ btr_add_output_pool(btrp, copy, btrn);
+ if (copy == n)
+ return;
+ sz = btr_pool_get_buffer(btrp, &buf);
+ assert(sz >= n - copy);
+ memcpy(buf, src + copy, n - copy);
+ btr_add_output_pool(btrp, n - copy, btrn);
+}
+
static void btr_pushdown_br(struct btr_buffer_reference *br, struct btr_node *btrn)
{
add_btrb_to_children(br->btrb, btrn, br->consumed);
}
if (!br->btrb->pool)
break;
- if (result + rv != buf) {
- PARA_DEBUG_LOG("%s: pool merge impossible: %p != %p\n",
- btrn->name, result + rv, buf);
+ if (result + rv != buf)
break;
- }
-// PARA_CRIT_LOG("%s: inplace merge (%zu, %zu)->%zu\n", btrn->name,
-// rv, sz, rv + sz);
-// PARA_CRIT_LOG("%s: inplace merge %p (%zu)\n", btrn->name,
-// result, sz);
rv += sz;
}
if (bufp)
br = get_first_input_br(btrn);
assert(br);
- //PARA_CRIT_LOG("wrap count: %zu\n", br->wrap_count);
if (br->wrap_count == 0) {
/*
* No wrap buffer. Drop buffer references whose buffer
size_t btr_get_input_queue_size(struct btr_node *btrn)
{
struct btr_buffer_reference *br;
- size_t size = 0;
+ size_t size = 0, wrap_consumed = 0;
FOR_EACH_BUFFER_REF(br, btrn) {
- //PARA_CRIT_LOG("size: %zu\n", size);
+ if (br->wrap_count != 0) {
+ wrap_consumed = br->consumed;
+ continue;
+ }
size += br_available_bytes(br);
}
+ assert(wrap_consumed <= size);
+ size -= wrap_consumed;
return size;
}
static void merge_input_pool(struct btr_node *btrn, size_t dest_size)
{
- struct btr_buffer_reference *br, *wbr;
+ struct btr_buffer_reference *br, *wbr = NULL;
int num_refs; /* including wrap buffer */
- char *buf, *buf1, *buf2 = NULL;
- size_t sz, sz1, sz2 = 0, wsz;
+ char *buf, *buf1 = NULL, *buf2 = NULL;
+ size_t sz, sz1 = 0, sz2 = 0, wsz;
- if (list_empty(&btrn->input_queue))
+ br = get_first_input_br(btrn);
+ if (!br || br_available_bytes(br) >= dest_size)
return;
-
num_refs = 0;
FOR_EACH_BUFFER_REF(br, btrn) {
num_refs++;
sz = btr_get_buffer_by_reference(br, &buf);
+ if (sz == 0)
+ break;
if (br->wrap_count != 0) {
assert(!wbr);
assert(num_refs == 1);
if (sz1 + sz2 >= dest_size)
break;
}
+ if (!buf2) /* nothing to do */
+ return;
+ assert(buf1 && sz2 > 0);
+ /*
+ * If the second buffer is large, we only take the first part of it to
+ * avoid having to memcpy() huge buffers.
+ */
+ sz2 = PARA_MIN(sz2, (size_t)(64 * 1024));
if (!wbr) {
- assert(buf1);
- if (!buf2) /* nothing to do */
- return;
- /* make a new wrap buffer combining buf1 and buf 2. */
+ /* Make a new wrap buffer combining buf1 and buf2. */
sz = sz1 + sz2;
buf = para_malloc(sz);
PARA_DEBUG_LOG("merging input buffers: (%p:%zu, %p:%zu) -> %p:%zu\n",
para_list_add(&br->node, &btrn->input_queue);
return;
}
+ PARA_DEBUG_LOG("increasing wrap buffer, sz1: %zu, sz2: %zu\n", sz1, sz2);
/*
* We already have a wrap buffer, but it is too small. It might be
* partially used.
wsz = br_available_bytes(wbr);
if (wbr->wrap_count == sz1 && wbr->btrb->size >= sz1 + sz2) /* nothing we can do about it */
return;
- assert(buf1 && buf2);
sz = sz1 + sz2 - wbr->btrb->size; /* amount of new data */
wbr->btrb->size += sz;
- PARA_DEBUG_LOG("increasing wrap buffer to %zu\n", wbr->btrb->size);
wbr->btrb->buf = para_realloc(wbr->btrb->buf, wbr->btrb->size);
/* copy the new data to the end of the reallocated buffer */
assert(sz2 >= sz);
/* make a new btrb that combines the two buffers and a br to it. */
sz = szs[0] + szs[1];
buf = para_malloc(sz);
- PARA_DEBUG_LOG("memory merging input buffers: (%zu, %zu) -> %zu\n",
- szs[0], szs[1], sz);
+ PARA_DEBUG_LOG("%s: memory merging input buffers: (%zu, %zu) -> %zu\n",
+ btrn->name, szs[0], szs[1], sz);
memcpy(buf, bufs[0], szs[0]);
memcpy(buf + szs[0], bufs[1], szs[1]);
return log_tree_recursively(btrn, loglevel, 0);
}
+/*
+ * \return \a root if \a name is \p NULL.
+ */
+struct btr_node *btr_search_node(const char *name, struct btr_node *root)
+{
+ struct btr_node *ch;
+
+ if (!name)
+ return root;
+ if (!strcmp(root->name, name))
+ return root;
+ FOR_EACH_CHILD(ch, root) {
+ struct btr_node *result = btr_search_node(name, ch);
+ if (result)
+ return result;
+ }
+ return NULL;
+}
+
/** 640K ought to be enough for everybody ;) */
#define BTRN_MAX_PENDING (640 * 1024)
{
size_t iqs;
+ assert(btrn);
if (type != BTR_NT_LEAF) {
if (btr_no_children(btrn))
return -E_BTR_NO_CHILD;