auto import from //depot/cupcake/@135843
diff --git a/libdb/Android.mk b/libdb/Android.mk
new file mode 100644
index 0000000..1594fe8
--- /dev/null
+++ b/libdb/Android.mk
@@ -0,0 +1,17 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ db_debug.c \
+ db_insert.c \
+ db_manage.c \
+ db_stat.c \
+ db_travel.c
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/.. \
+ $(LOCAL_PATH)/../libutil
+
+LOCAL_MODULE := libdb
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/libdb/db_debug.c b/libdb/db_debug.c
new file mode 100644
index 0000000..0575570
--- /dev/null
+++ b/libdb/db_debug.c
@@ -0,0 +1,132 @@
+/**
+ * @file db_debug.c
+ * Debug routines for libdb
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "odb.h"
+
+static int check_circular_list(odb_data_t const * data)
+{
+ odb_node_nr_t pos;
+ int do_abort = 0;
+ unsigned char * bitmap = malloc(data->descr->current_size);
+ memset(bitmap, '\0', data->descr->current_size);
+
+ for (pos = 0 ; pos < data->descr->size * BUCKET_FACTOR ; ++pos) {
+
+ odb_index_t index = data->hash_base[pos];
+ if (index && !do_abort) {
+ while (index) {
+ if (bitmap[index])
+ do_abort = 1;
+
+ bitmap[index] = 1;
+ index = data->node_base[index].next;
+ }
+ }
+
+ if (do_abort) {
+ printf("circular list detected size: %d\n",
+ data->descr->current_size);
+
+ memset(bitmap, '\0', data->descr->current_size);
+
+ index = data->hash_base[pos];
+ while (index) {
+ printf("%d ", index);
+ if (bitmap[index])
+ exit(1);
+
+ bitmap[index] = 1;
+ index = data->node_base[index].next;
+ }
+ }
+
+ /* purely an optimization: intead of memset the map reset only
+ * the needed part: not my use to optimize test but here the
+ * test was so slow it was useless */
+ index = data->hash_base[pos];
+ while (index) {
+ bitmap[index] = 1;
+ index = data->node_base[index].next;
+ }
+ }
+
+ free(bitmap);
+
+ return do_abort;
+}
+
+static int check_redundant_key(odb_data_t const * data, odb_key_t max)
+{
+ odb_node_nr_t pos;
+
+ unsigned char * bitmap = malloc(max + 1);
+ memset(bitmap, '\0', max + 1);
+
+ for (pos = 1 ; pos < data->descr->current_size ; ++pos) {
+ if (bitmap[data->node_base[pos].key]) {
+ printf("redundant key found %lld\n",
+ (unsigned long long)data->node_base[pos].key);
+ return 1;
+ }
+ bitmap[data->node_base[pos].key] = 1;
+ }
+ free(bitmap);
+
+ return 0;
+}
+
+int odb_check_hash(odb_t const * odb)
+{
+ odb_node_nr_t pos;
+ odb_node_nr_t nr_node = 0;
+ odb_node_nr_t nr_node_out_of_bound = 0;
+ int ret = 0;
+ odb_key_t max = 0;
+ odb_data_t * data = odb->data;
+
+ for (pos = 0 ; pos < data->descr->size * BUCKET_FACTOR ; ++pos) {
+ odb_index_t index = data->hash_base[pos];
+ while (index) {
+ if (index >= data->descr->current_size) {
+ nr_node_out_of_bound++;
+ break;
+ }
+ ++nr_node;
+
+ if (data->node_base[index].key > max)
+ max = data->node_base[index].key;
+
+ index = data->node_base[index].next;
+ }
+ }
+
+ if (nr_node != data->descr->current_size - 1) {
+ printf("hash table walk found %d node expect %d node\n",
+ nr_node, data->descr->current_size - 1);
+ ret = 1;
+ }
+
+ if (nr_node_out_of_bound) {
+ printf("out of bound node index: %d\n", nr_node_out_of_bound);
+ ret = 1;
+ }
+
+ if (ret == 0)
+ ret = check_circular_list(data);
+
+ if (ret == 0)
+ ret = check_redundant_key(data, max);
+
+ return ret;
+}
diff --git a/libdb/db_insert.c b/libdb/db_insert.c
new file mode 100644
index 0000000..018c294
--- /dev/null
+++ b/libdb/db_insert.c
@@ -0,0 +1,102 @@
+/**
+ * @file db_insert.c
+ * Inserting a key-value pair into a DB
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "odb.h"
+
+
+static inline int add_node(odb_data_t * data, odb_key_t key, odb_value_t value)
+{
+ odb_index_t new_node;
+ odb_node_t * node;
+ odb_index_t index;
+
+ /* no locking is necessary: iteration interface retrieve data through
+ * the node_base array, we doesn't increase current_size now but it's
+ * done by odb_commit_reservation() so the new slot is visible only
+ * after the increment
+ */
+ if (data->descr->current_size >= data->descr->size) {
+ if (odb_grow_hashtable(data))
+ return EINVAL;
+ }
+ new_node = data->descr->current_size;
+
+ node = &data->node_base[new_node];
+ node->value = value;
+ node->key = key;
+
+ index = odb_do_hash(data, key);
+ node->next = data->hash_base[index];
+ data->hash_base[index] = new_node;
+
+ /* FIXME: we need wrmb() here */
+ odb_commit_reservation(data);
+
+ return 0;
+}
+
+int odb_update_node(odb_t * odb, odb_key_t key)
+{
+ odb_index_t index;
+ odb_node_t * node;
+ odb_data_t * data;
+
+ data = odb->data;
+ index = data->hash_base[odb_do_hash(data, key)];
+ while (index) {
+ node = &data->node_base[index];
+ if (node->key == key) {
+ if (node->value + 1 != 0) {
+ node->value += 1;
+ } else {
+ /* post profile tools must handle overflow */
+ /* FIXME: the tricky way will be just to add
+ * a goto to jump right before the return
+ * add_node(), in this way we no longer can
+ * overflow. It'll work because new node are
+ * linked at the start of the node list for
+ * this bucket so this loop will see first a
+ * non overflowed node if one exist. When we
+ * grow the hashtable the most recently
+ * allocated node for this key will be setup
+ * last, so again it'll be linked at start of
+ * the list. pp tools looke like ok with this
+ * change.
+ *
+ * This change doesn't involve any file format
+ * change but perhaps it's a bit hacky to do
+ * this w/o bumping the sample file format
+ * version. The drawback of this is the added
+ * node are additive not multiplicative.
+ * (multiplicative as if we add more bits to
+ * store a value)
+ */
+ }
+ return 0;
+ }
+
+ index = node->next;
+ }
+
+ return add_node(data, key, 1);
+}
+
+
+int odb_add_node(odb_t * odb, odb_key_t key, odb_value_t value)
+{
+ return add_node(odb->data, key, value);
+}
diff --git a/libdb/db_manage.c b/libdb/db_manage.c
new file mode 100644
index 0000000..d8a6fcb
--- /dev/null
+++ b/libdb/db_manage.c
@@ -0,0 +1,311 @@
+/**
+ * @file db_manage.c
+ * Management of a DB file
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#ifndef ANDROID
+#include <sys/fcntl.h>
+#else
+#include <fcntl.h>
+#endif
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "odb.h"
+#include "op_string.h"
+#include "op_libiberty.h"
+
+
+static __inline odb_descr_t * odb_to_descr(odb_data_t * data)
+{
+ return (odb_descr_t *)(((char*)data->base_memory) + data->sizeof_header);
+}
+
+
+static __inline odb_node_t * odb_to_node_base(odb_data_t * data)
+{
+ return (odb_node_t *)(((char *)data->base_memory) + data->offset_node);
+}
+
+
+static __inline odb_index_t * odb_to_hash_base(odb_data_t * data)
+{
+ return (odb_index_t *)(((char *)data->base_memory) +
+ data->offset_node +
+ (data->descr->size * sizeof(odb_node_t)));
+}
+
+
+/**
+ * return the number of bytes used by hash table, node table and header.
+ */
+static unsigned int tables_size(odb_data_t const * data, odb_node_nr_t node_nr)
+{
+ size_t size;
+
+ size = node_nr * (sizeof(odb_index_t) * BUCKET_FACTOR);
+ size += node_nr * sizeof(odb_node_t);
+ size += data->offset_node;
+
+ return size;
+}
+
+
+int odb_grow_hashtable(odb_data_t * data)
+{
+ unsigned int old_file_size;
+ unsigned int new_file_size;
+ unsigned int pos;
+ void * new_map;
+
+ old_file_size = tables_size(data, data->descr->size);
+ new_file_size = tables_size(data, data->descr->size * 2);
+
+ if (ftruncate(data->fd, new_file_size))
+ return 1;
+
+ new_map = mremap(data->base_memory,
+ old_file_size, new_file_size, MREMAP_MAYMOVE);
+
+ if (new_map == MAP_FAILED)
+ return 1;
+
+ data->base_memory = new_map;
+ data->descr = odb_to_descr(data);
+ data->descr->size *= 2;
+ data->node_base = odb_to_node_base(data);
+ data->hash_base = odb_to_hash_base(data);
+ data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
+
+ /* rebuild the hash table, node zero is never used. This works
+ * because layout of file is node table then hash table,
+ * sizeof(node) > sizeof(bucket) and when we grow table we
+ * double size ==> old hash table and new hash table can't
+ * overlap so on the new hash table is entirely in the new
+ * memory area (the grown part) and we know the new hash
+ * hash table is zeroed. That's why we don't need to zero init
+ * the new table */
+ /* OK: the above is not exact
+ * if BUCKET_FACTOR < sizeof(bd_node_t) / sizeof(bd_node_nr_t)
+ * all things are fine and we don't need to init the hash
+ * table because in this case the new hash table is completely
+ * inside the new growed part. Avoiding to touch this memory is
+ * useful.
+ */
+#if 0
+ for (pos = 0 ; pos < data->descr->size*BUCKET_FACTOR ; ++pos)
+ data->hash_base[pos] = 0;
+#endif
+
+ for (pos = 1; pos < data->descr->current_size; ++pos) {
+ odb_node_t * node = &data->node_base[pos];
+ size_t index = odb_do_hash(data, node->key);
+ node->next = data->hash_base[index];
+ data->hash_base[index] = pos;
+ }
+
+ return 0;
+}
+
+
+void odb_init(odb_t * odb)
+{
+ odb->data = NULL;
+}
+
+
+/* the default number of page, calculated to fit in 4096 bytes */
+#define DEFAULT_NODE_NR(offset_node) 128
+#define FILES_HASH_SIZE 512
+
+static struct list_head files_hash[FILES_HASH_SIZE];
+
+
+static void init_hash()
+{
+ size_t i;
+ for (i = 0; i < FILES_HASH_SIZE; ++i)
+ list_init(&files_hash[i]);
+}
+
+
+static odb_data_t *
+find_samples_data(size_t hash, char const * filename)
+{
+ struct list_head * pos;
+
+ /* FIXME: maybe an initial init routine ? */
+ if (files_hash[0].next == NULL) {
+ init_hash();
+ return NULL;
+ }
+
+ list_for_each(pos, &files_hash[hash]) {
+ odb_data_t * entry = list_entry(pos, odb_data_t, list);
+ if (strcmp(entry->filename, filename) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+
+int odb_open(odb_t * odb, char const * filename, enum odb_rw rw,
+ size_t sizeof_header)
+{
+ struct stat stat_buf;
+ odb_node_nr_t nr_node;
+ odb_data_t * data;
+ size_t hash;
+ int err = 0;
+
+ int flags = (rw == ODB_RDWR) ? (O_CREAT | O_RDWR) : O_RDONLY;
+ int mmflags = (rw == ODB_RDWR) ? (PROT_READ | PROT_WRITE) : PROT_READ;
+
+ hash = op_hash_string(filename) % FILES_HASH_SIZE;
+ data = find_samples_data(hash, filename);
+ if (data) {
+ odb->data = data;
+ data->ref_count++;
+ return 0;
+ }
+
+ data = xmalloc(sizeof(odb_data_t));
+ memset(data, '\0', sizeof(odb_data_t));
+ list_init(&data->list);
+ data->offset_node = sizeof_header + sizeof(odb_descr_t);
+ data->sizeof_header = sizeof_header;
+ data->ref_count = 1;
+ data->filename = xstrdup(filename);
+
+ data->fd = open(filename, flags, 0644);
+ if (data->fd < 0) {
+ err = errno;
+ goto out;
+ }
+
+ if (fstat(data->fd, &stat_buf)) {
+ err = errno;
+ goto fail;
+ }
+
+ if (stat_buf.st_size == 0) {
+ size_t file_size;
+
+ if (rw == ODB_RDONLY) {
+ err = EIO;
+ goto fail;
+ }
+
+ nr_node = DEFAULT_NODE_NR(data->offset_node);
+
+ file_size = tables_size(data, nr_node);
+ if (ftruncate(data->fd, file_size)) {
+ err = errno;
+ goto fail;
+ }
+ } else {
+ /* Calculate nr node allowing a sanity check later */
+ nr_node = (stat_buf.st_size - data->offset_node) /
+ ((sizeof(odb_index_t) * BUCKET_FACTOR) + sizeof(odb_node_t));
+ }
+
+ data->base_memory = mmap(0, tables_size(data, nr_node), mmflags,
+ MAP_SHARED, data->fd, 0);
+
+ if (data->base_memory == MAP_FAILED) {
+ err = errno;
+ goto fail;
+ }
+
+ data->descr = odb_to_descr(data);
+
+ if (stat_buf.st_size == 0) {
+ data->descr->size = nr_node;
+ /* page zero is not used */
+ data->descr->current_size = 1;
+ } else {
+ /* file already exist, sanity check nr node */
+ if (nr_node != data->descr->size) {
+ err = EINVAL;
+ goto fail_unmap;
+ }
+ }
+
+ data->hash_base = odb_to_hash_base(data);
+ data->node_base = odb_to_node_base(data);
+ data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
+
+ list_add(&data->list, &files_hash[hash]);
+ odb->data = data;
+out:
+ return err;
+fail_unmap:
+ munmap(data->base_memory, tables_size(data, nr_node));
+fail:
+ close(data->fd);
+ free(data->filename);
+ free(data);
+ odb->data = NULL;
+ goto out;
+}
+
+
+void odb_close(odb_t * odb)
+{
+ odb_data_t * data = odb->data;
+
+ if (data) {
+ data->ref_count--;
+ if (data->ref_count == 0) {
+ size_t size = tables_size(data, data->descr->size);
+ list_del(&data->list);
+ munmap(data->base_memory, size);
+ if (data->fd >= 0)
+ close(data->fd);
+ free(data->filename);
+ free(data);
+ odb->data = NULL;
+ }
+ }
+}
+
+
+int odb_open_count(odb_t const * odb)
+{
+ if (!odb->data)
+ return 0;
+ return odb->data->ref_count;
+}
+
+
+void * odb_get_data(odb_t * odb)
+{
+ return odb->data->base_memory;
+}
+
+
+void odb_sync(odb_t const * odb)
+{
+ odb_data_t * data = odb->data;
+ size_t size;
+
+ if (!data)
+ return;
+
+ size = tables_size(data, data->descr->size);
+ msync(data->base_memory, size, MS_ASYNC);
+}
diff --git a/libdb/db_stat.c b/libdb/db_stat.c
new file mode 100644
index 0000000..6d29e9a
--- /dev/null
+++ b/libdb/db_stat.c
@@ -0,0 +1,88 @@
+/**
+ * @file db_stat.c
+ * Statistics routines for libdb
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "odb.h"
+#include "op_types.h"
+
+/// hold various statistics data for a db file
+struct odb_hash_stat_t {
+ odb_node_nr_t node_nr; /**< allocated node number */
+ odb_node_nr_t used_node_nr; /**< in use node number */
+ count_type total_count; /**< cumulated samples count */
+ odb_index_t hash_table_size; /**< hash table entry number */
+ odb_node_nr_t max_list_length; /**< worst case */
+ double average_list_length; /**< average case */
+ /* do we need variance ? */
+};
+
+odb_hash_stat_t * odb_hash_stat(odb_t const * odb)
+{
+ size_t max_length = 0;
+ double total_length = 0.0;
+ size_t nr_non_empty_list = 0;
+ size_t pos;
+ odb_data_t * data = odb->data;
+
+ odb_hash_stat_t * result = calloc(1, sizeof(odb_hash_stat_t));
+ if (!result) {
+ fprintf(stderr, "not enough memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ result->node_nr = data->descr->size;
+ result->used_node_nr = data->descr->current_size;
+ result->hash_table_size = data->descr->size * BUCKET_FACTOR;
+
+ /* FIXME: I'm dubious if this do right statistics for hash table
+ * efficiency check */
+
+ for (pos = 0 ; pos < result->hash_table_size ; ++pos) {
+ size_t cur_length = 0;
+ size_t index = data->hash_base[pos];
+ while (index) {
+ result->total_count += data->node_base[index].value;
+ index = data->node_base[index].next;
+ ++cur_length;
+ }
+
+ if (cur_length > max_length)
+ max_length = cur_length;
+
+ if (cur_length) {
+ total_length += cur_length;
+ ++nr_non_empty_list;
+ }
+ }
+
+ result->max_list_length = max_length;
+ result->average_list_length = total_length / nr_non_empty_list;
+
+ return result;
+}
+
+
+void odb_hash_display_stat(odb_hash_stat_t const * stat)
+{
+ printf("total node number: %d\n", stat->node_nr);
+ printf("total used node: %d\n", stat->used_node_nr);
+ printf("total count: %llu\n", stat->total_count);
+ printf("hash table size: %d\n", stat->hash_table_size);
+ printf("greater list length: %d\n", stat->max_list_length);
+ printf("average non empty list length: %2.4f\n", stat->average_list_length);
+}
+
+
+void odb_hash_free_stat(odb_hash_stat_t * stat)
+{
+ free(stat);
+}
diff --git a/libdb/db_travel.c b/libdb/db_travel.c
new file mode 100644
index 0000000..3ed467b
--- /dev/null
+++ b/libdb/db_travel.c
@@ -0,0 +1,18 @@
+/**
+ * @file db_travel.c
+ * Inspection of a DB
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#include "odb.h"
+
+odb_node_t * odb_get_iterator(odb_t const * odb, odb_node_nr_t * nr)
+{
+ /* node zero is unused */
+ *nr = odb->data->descr->current_size - 1;
+ return odb->data->node_base + 1;
+}
diff --git a/libdb/odb.h b/libdb/odb.h
new file mode 100644
index 0000000..c190b57
--- /dev/null
+++ b/libdb/odb.h
@@ -0,0 +1,223 @@
+/**
+ * @file odb.h
+ * This file contains various definitions and interface for management
+ * of in-memory, through mmaped file, growable hash table, that stores
+ * sample files.
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Philippe Elie
+ */
+
+#ifndef ODB_HASH_H
+#define ODB_HASH_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "op_list.h"
+
+/** the type of key. 64-bit because CG needs 32-bit pair {from,to} */
+typedef uint64_t odb_key_t;
+/** the type of an information in the database */
+typedef unsigned int odb_value_t;
+/** the type of index (node number), list are implemented through index */
+typedef unsigned int odb_index_t;
+/** the type store node number */
+typedef odb_index_t odb_node_nr_t;
+/** store the hash mask, hash table size are always power of two */
+typedef odb_index_t odb_hash_mask_t;
+
+/* there is (bucket factor * nr node) entry in hash table, this can seem
+ * excessive but hash coding eip don't give a good distributions and our
+ * goal is to get a O(1) amortized insert time. bucket factor must be a
+ * power of two. FIXME: see big comment in odb_hash_add_node, you must
+ * re-enable zeroing hash table if BUCKET_FACTOR > 2 (roughly exact, you
+ * want to read the comment in odb_hash_add_node() if you tune this define)
+ */
+#define BUCKET_FACTOR 1
+
+/** a db hash node */
+typedef struct {
+ odb_key_t key; /**< eip */
+ odb_value_t value; /**< samples count */
+ odb_index_t next; /**< next entry for this bucket */
+} odb_node_t;
+
+/** the minimal information which must be stored in the file to reload
+ * properly the data base, following this header is the node array then
+ * the hash table (when growing we avoid to copy node array)
+ */
+typedef struct {
+ odb_node_nr_t size; /**< in node nr (power of two) */
+ odb_node_nr_t current_size; /**< nr used node + 1, node 0 unused */
+ int padding[6]; /**< for padding and future use */
+} odb_descr_t;
+
+/** a "database". this is an in memory only description.
+ *
+ * We allow to manage a database inside a mapped file with an "header" of
+ * unknown size so odb_open get a parameter to specify the size of this header.
+ * A typical use is:
+ *
+ * struct header { int etc; ... };
+ * odb_open(&hash, filename, ODB_RW, sizeof(header));
+ * so on this library have no dependency on the header type.
+ *
+ * the internal memory layout from base_memory is:
+ * the unknown header (sizeof_header)
+ * odb_descr_t
+ * the node array: (descr->size * sizeof(odb_node_t) entries
+ * the hash table: array of odb_index_t indexing the node array
+ * (descr->size * BUCKET_FACTOR) entries
+ */
+typedef struct odb_data {
+ odb_node_t * node_base; /**< base memory area of the page */
+ odb_index_t * hash_base; /**< base memory of hash table */
+ odb_descr_t * descr; /**< the current state of database */
+ odb_hash_mask_t hash_mask; /**< == descr->size - 1 */
+ unsigned int sizeof_header; /**< from base_memory to odb header */
+ unsigned int offset_node; /**< from base_memory to node array */
+ void * base_memory; /**< base memory of the maped memory */
+ int fd; /**< mmaped memory file descriptor */
+ char * filename; /**< full path name of sample file */
+ int ref_count; /**< reference count */
+ struct list_head list; /**< hash bucket list */
+} odb_data_t;
+
+typedef struct {
+ odb_data_t * data;
+} odb_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* db_manage.c */
+
+/** how to open the DB file */
+enum odb_rw {
+ ODB_RDONLY = 0, /**< open for read only */
+ ODB_RDWR = 1 /**< open for read and/or write */
+};
+
+/**
+ * odb_init - initialize a DB file
+ * @param odb the DB file to init
+ */
+void odb_init(odb_t * odb);
+
+/**
+ * odb_open - open a DB file
+ * @param odb the data base object to setup
+ * @param filename the filename where go the maped memory
+ * @param rw \enum ODB_RW if opening for writing, else \enum ODB_RDONLY
+ * @param sizeof_header size of the file header if any
+ *
+ * The sizeof_header parameter allows the data file to have a header
+ * at the start of the file which is skipped.
+ * odb_open() always preallocate a few number of pages.
+ * returns 0 on success, errno on failure
+ */
+int odb_open(odb_t * odb, char const * filename,
+ enum odb_rw rw, size_t sizeof_header);
+
+/** Close the given ODB file */
+void odb_close(odb_t * odb);
+
+/** return the number of times this sample file is open */
+int odb_open_count(odb_t const * odb);
+
+/** return the start of the mapped data */
+void * odb_get_data(odb_t * odb);
+
+/** issue a msync on the used size of the mmaped file */
+void odb_sync(odb_t const * odb);
+
+/**
+ * grow the hashtable in such way current_size is the index of the first free
+ * node. Take care all node pointer can be invalidated by this call.
+ *
+ * Node allocation is done in a two step way 1st) ensure a free node exist
+ * eventually, caller can setup it, 2nd) commit the node allocation with
+ * odb_commit_reservation().
+ * This is done in this way to ensure node setup is visible from another
+ * process like pp tools in an atomic way.
+ *
+ * returns 0 on success, non zero on failure in this case this function do
+ * nothing and errno is set by the first libc call failure allowing to retry
+ * after cleanup some program resource.
+ */
+int odb_grow_hashtable(odb_data_t * data);
+/**
+ * commit a previously successfull node reservation. This can't fail.
+ */
+static __inline void odb_commit_reservation(odb_data_t * data)
+{
+ ++data->descr->current_size;
+}
+
+/** "immpossible" node number to indicate an error from odb_hash_add_node() */
+#define ODB_NODE_NR_INVALID ((odb_node_nr_t)-1)
+
+/* db_debug.c */
+/** check that the hash is well built */
+int odb_check_hash(odb_t const * odb);
+
+/* db_stat.c */
+typedef struct odb_hash_stat_t odb_hash_stat_t;
+odb_hash_stat_t * odb_hash_stat(odb_t const * odb);
+void odb_hash_display_stat(odb_hash_stat_t const * stats);
+void odb_hash_free_stat(odb_hash_stat_t * stats);
+
+/* db_insert.c */
+/** update info at key by incrementing its associated value by one,
+ * if the key does not exist a new node is created and the value associated
+ * is set to one.
+ *
+ * returns EXIT_SUCCESS on success, EXIT_FAILURE on failure
+ */
+int odb_update_node(odb_t * odb, odb_key_t key);
+
+/** Add a new node w/o regarding if a node with the same key already exists
+ *
+ * returns EXIT_SUCCESS on success, EXIT_FAILURE on failure
+ */
+int odb_add_node(odb_t * odb, odb_key_t key, odb_value_t value);
+
+/* db_travel.c */
+/**
+ * return a base pointer to the node array and number of node in this array
+ * caller then will iterate through:
+ *
+ * odb_node_nr_t node_nr, pos;
+ * odb_node_t * node = odb_get_iterator(odb, &node_nr);
+ * for ( pos = 0 ; pos < node_nr ; ++pos)
+ * // do something
+ *
+ * note than caller does not need to filter nil key as it's a valid key,
+ * The returned range is all valid (i.e. should never contain zero value).
+ */
+odb_node_t * odb_get_iterator(odb_t const * odb, odb_node_nr_t * nr);
+
+static __inline unsigned int
+odb_do_hash(odb_data_t const * data, odb_key_t value)
+{
+ /* FIXME: better hash for eip value, needs to instrument code
+ * and do a lot of tests ... */
+ /* trying to combine high order bits his a no-op: inside a binary image
+ * high order bits don't vary a lot, hash table start with 7 bits mask
+ * so this hash coding use bits 0-7, 8-15. Hash table is stored in
+ * files avoiding to rebuilding them at profiling re-start so
+ * on changing do_hash() change the file format!
+ */
+ uint32_t temp = (value >> 32) ^ value;
+ return ((temp << 0) ^ (temp >> 8)) & data->hash_mask;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ODB_H */