diff options
author | Joern Engel <joern@logfs.org> | 2012-01-16 15:12:42 -0800 |
---|---|---|
committer | Joern Engel <joern@logfs.org> | 2012-01-16 15:14:49 -0800 |
commit | 8e82320bb8f943e3df6daaaebead65eea6c71f26 (patch) | |
tree | 3deab9c375f8f5efb18ff90e34cb934d5c9a208a | |
parent | b94965242daa03117a50f8551f9866f347b91548 (diff) | |
download | cancd-8e82320bb8f943e3df6daaaebead65eea6c71f26.tar.gz |
Add my btree library
It will be used to store per-ip data, like reverse dns lookups, whether
we previously had a newline, etc.
Signed-off-by: Joern Engel <joern@logfs.org>
-rw-r--r-- | btree.c | 765 | ||||
-rw-r--r-- | btree.h | 264 | ||||
-rw-r--r-- | kerncompat.h | 212 |
3 files changed, 1241 insertions, 0 deletions
@@ -0,0 +1,765 @@ +/* + * lib/btree.c - Simple In-memory B+Tree + * + * License: GPLv2 + * + * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> + * + * A relatively simple B+Tree implementation. I have written it as a learning + * excercise to understand how B+Trees work. Turned out to be useful as well. + * + * B+Trees can be used similar to Linux radix trees (which don't have anything + * in common with textbook radix trees, beware). Prerequisite for them working + * well is that access to a random tree node is much faster than a large number + * of operations within each node. + * + * Disks have fulfilled the prerequisite for a long time. More recently DRAM + * has gained similar properties, as memory access times, when measured in cpu + * cycles, have increased. Cacheline sizes have increased as well, which also + * helps B+Trees. + * + * Compared to radix trees, B+Trees are more efficient when dealing with a + * sparsely populated address space. Between 25% and 50% of the memory is + * occupied with valid pointers. When densely populated, radix trees contain + * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2% + * pointers. + * + * This particular implementation stores pointers identified by a long value. + * Storing NULL pointers is illegal, lookup will return NULL when no entry + * was found. + * + * One trick was used that are not commonly found in textbooks. The lowest + * values are to the right, not to the left. All used slots within a node + * are on the left, all unused slots contain NUL values. Most operations + * simply loop once over all slots and terminate on the first NUL. + */ + +#include <errno.h> +#include "btree.h" + +/* + * Depending on the ratio of lookups vs. insert and removes, it may be + * beneficial to spend more work trying to keep the tree as compact as + * possible. With roughly 50 lookups for every insert/remove, stealing + * from neighbours becomes more effective. If that is the case, please + * define AGGRESSIVE_COMPACTION below + */ +// #define AGGRESSIVE_COMPACTION + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define NODESIZE MAX(L1_CACHE_BYTES, 128) + +struct btree_geo btree_geo32 = { + .keylen = 1, + .no_pairs = NODESIZE / sizeof(long) / 2, +}; + +#define LONG_PER_U64 (64 / BITS_PER_LONG) +struct btree_geo btree_geo64 = { + .keylen = LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64), +}; + +struct btree_geo btree_geo128 = { + .keylen = 2 * LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64), +}; + +static unsigned long *btree_node_alloc(struct btree_head *head) +{ + return calloc(1, NODESIZE); +} + +static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) { + if (l1[i] < l2[i]) + return -1; + if (l1[i] > l2[i]) + return 1; + } + return 0; +} + +static unsigned long *longcpy(unsigned long *dest, const unsigned long *src, + size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + dest[i] = src[i]; + return dest; +} + +static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + s[i] = c; + return s; +} + +/* + * B+Tree node format: + * [key0, key1, ..., keyN] [val0, val1, ..., valN] + * Each key is an array of unsigned longs, head->keylen in total. + * Total number of keys and vals (N) is head->no_pairs. + */ + +static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) +{ + return &node[n * geo->keylen]; +} + +static unsigned long bval(struct btree_geo *geo, unsigned long *node, int n) +{ + return node[geo->no_pairs * geo->keylen + n]; +} + +static void setkey(struct btree_geo *geo, unsigned long *node, + unsigned long *key, int n) +{ + longcpy(bkey(geo, node, n), key, geo->keylen); +} + +static void setval(struct btree_geo *geo, unsigned long *node, + unsigned long val, int n) +{ + node[geo->no_pairs * geo->keylen + n] = val; +} + +static void clearpair(struct btree_geo *geo, unsigned long *node, int n) +{ + longset(bkey(geo, node, n), 0, geo->keylen); + node[geo->no_pairs * geo->keylen + n] = 0; +} + +#if 0 +static void dumpkey(struct btree_geo *geo, unsigned long *key) +{ + int k; + + printf("(%lx", key[0]); + for (k = 1; k < geo->keylen; k++) + printf(",%lx", key[k]); + printf(")"); +} + +static void dumpnode(struct btree_geo *geo, unsigned long *node) +{ + int i; + unsigned long *key; + + printf("%p: ", node); + for (i = 0; i < geo->no_pairs; i++) { + key = bkey(geo, node, i); + dumpkey(geo, key); + printf(" %lx ", bval(geo, node, i)); + } + printf("\n"); +} + +static void __dumptree(struct btree_head *head, struct btree_geo *geo, + unsigned long *node, int height) +{ + int i; + unsigned long *child; + + if (!height) + return; + + printf("%2x ", height); + dumpnode(geo, node); + for (i = 0; i < geo->no_pairs; i++) { + child = (void *)bval(geo, node, i); + if (!child) + return; + __dumptree(head, geo, child, height - 1); + } +} + +static void dumptree(struct btree_head *head, struct btree_geo *geo) +{ + __dumptree(head, geo, head->node, head->height); +} +#endif + +static inline void __btree_init(struct btree_head *head) +{ + head->node = NULL; + head->height = 0; +} + +void btree_init(struct btree_head *head) +{ + __btree_init(head); +} + +unsigned long *btree_last(struct btree_head *head, struct btree_geo *geo) +{ + int height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) + node = (unsigned long *)bval(geo, node, 0); + + return bkey(geo, node, 0); +} + +static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, + unsigned long *key) +{ + return longcmp(bkey(geo, node, pos), key, geo->keylen); +} + +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int i, height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + return NULL; + node = (unsigned long *)bval(geo, node, i); + if (!node) + return NULL; + } + + if (!node) + return NULL; + + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) == 0) + return (void *)bval(geo, node, i); + return NULL; +} + +static int getpos(struct btree_geo *geo, unsigned long *node, + unsigned long *key) +{ + int i; + + for (i = 0; i < geo->no_pairs; i++) { + if (keycmp(geo, node, i, key) <= 0) + break; + } + return i; +} + +static int getfill(struct btree_geo *geo, unsigned long *node, int start) +{ + int i; + + for (i = start; i < geo->no_pairs; i++) + if (bval(geo, node, i) == 0) + break; + return i; +} + +/* + * locate the correct leaf node in the btree + */ +static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node = head->node; + int i, height; + + for (height = head->height; height > level; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + + if ((i == geo->no_pairs) || !bval(geo, node, i)) { + /* right-most key is too large, update it */ + /* FIXME: If the right-most key on higher levels is + * always zero, this wouldn't be necessary. */ + i--; + setkey(geo, node, key, i); + } + BUG_ON(i < 0); + node = (unsigned long *)bval(geo, node, i); + } + BUG_ON(!node); + return node; +} + +static int btree_grow(struct btree_head *head, struct btree_geo *geo) +{ + unsigned long *node; + int fill; + + node = btree_node_alloc(head); + if (!node) + return -ENOMEM; + if (head->node) { + fill = getfill(geo, head->node, 0); + setkey(geo, node, bkey(geo, head->node, fill - 1), 0); + setval(geo, node, (unsigned long)head->node, 0); + } + head->node = node; + head->height++; + return 0; +} + +static void btree_shrink(struct btree_head *head, struct btree_geo *geo, + int fill) +{ + unsigned long *node; + + if ((fill == 0) || ((fill == 1) && (head->height > 1))) { + node = head->node; + head->node = (unsigned long *)bval(geo, node, 0); + head->height--; + free(node); + } +} + +static void steal_l(struct btree_head *head, struct btree_geo *geo, int level, + unsigned long *left, int lfill, + unsigned long *right, int rfill, + unsigned long *parent, int lpos, + int no_entries) +{ + int i; + + for (i = rfill - 1; i >= 0; i--) { + /* Shift entries on the right */ + setkey(geo, right, bkey(geo, right, i), i + no_entries); + setval(geo, right, bval(geo, right, i), i + no_entries); + } + for (i = 0; i < no_entries; i++) { + /* Move some entries to the right */ + setkey(geo, right, bkey(geo, left, lfill - no_entries + i), i); + setval(geo, right, bval(geo, left, lfill - no_entries + i), i); + } + /* Set parent key */ + setkey(geo, parent, bkey(geo, left, lfill - no_entries - 1), lpos); + for (i = lfill - no_entries; i < lfill; i++) + clearpair(geo, left, i); +} + +static void steal_r(struct btree_head *head, struct btree_geo *geo, int level, + unsigned long *left, int lfill, + unsigned long *right, int rfill, + unsigned long *parent, int lpos, + int no_entries) +{ + int i; + + for (i = 0; i < no_entries; i++) { + /* Move some entries to the left */ + setkey(geo, left, bkey(geo, right, i), lfill + i); + setval(geo, left, bval(geo, right, i), lfill + i); + } + /* Set parent key */ + setkey(geo, parent, bkey(geo, right, no_entries - 1), lpos); + /* Shift entries on the right */ + for ( ; i < rfill; i++) { + setkey(geo, right, bkey(geo, right, i), i - no_entries); + setval(geo, right, bval(geo, right, i), i - no_entries); + } + for (i = rfill - no_entries; i < rfill; i++) + clearpair(geo, right, i); +} + +static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, unsigned long val, int level); +static int split(struct btree_head *head, struct btree_geo *geo, + unsigned long *node, int level) +{ + unsigned long *new; + int i, err, fill = geo->no_pairs; + + new = btree_node_alloc(head); + if (!new) + return -ENOMEM; + err = btree_insert_level(head, geo, + bkey(geo, node, fill / 2 - 1), + (unsigned long)new, level + 1); + if (err) { + free(new); + return err; + } + for (i = 0; i < fill / 2; i++) { + setkey(geo, new, bkey(geo, node, i), i); + setval(geo, new, bval(geo, node, i), i); + setkey(geo, node, bkey(geo, node, i + fill / 2), i); + setval(geo, node, bval(geo, node, i + fill / 2), i); + clearpair(geo, node, i + fill / 2); + } + if (fill & 1) { + setkey(geo, node, bkey(geo, node, fill - 1), i); + setval(geo, node, bval(geo, node, fill - 1), i); + clearpair(geo, node, fill - 1); + } + return 0; +} + +static int rebalance_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, unsigned long *child, int level) +{ +#ifdef AGGRESSIVE_COMPACTION + unsigned long *parent, *left, *right; + int child_no, no_left, no_right, delta; + + if (level == head->height) + goto split; + + parent = find_level(head, geo, key, level + 1); + child_no = getpos(geo, parent, key); + BUG_ON(bval(geo, parent, child_no) != (unsigned long)child); + + if (child_no > 0) { + left = (unsigned long *)bval(geo, parent, child_no - 1); + no_left = getfill(geo, left, 0); + delta = geo->no_pairs - no_left; + if (delta >= 2) { + steal_r(head, geo, level, + left, no_left, + child, geo->no_pairs, + parent, child_no - 1, delta / 2); + return 0; + } + } + if (child_no + 1 < getfill(geo, parent, child_no)) { + right = (unsigned long *)bval(geo, parent, child_no + 1); + no_right = getfill(geo, right, 0); + delta = geo->no_pairs - no_right; + if (delta >= 2) { + steal_l(head, geo, level, + child, geo->no_pairs, + right, no_right, + parent, child_no, delta / 2); + return 0; + } + } +split: +#endif + return split(head, geo, child, level); +} + +static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, unsigned long val, int level) +{ + unsigned long *node; + int i, pos, fill, err; + + BUG_ON(!val); + if (head->height < level) { + err = btree_grow(head, geo); + if (err) + return err; + } + +retry: + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + /* two identical keys are not allowed */ + BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); + + if (fill == geo->no_pairs) { + /* need to split node */ + err = rebalance_insert(head, geo, key, node, level); + if (err) + return err; + goto retry; + } + BUG_ON(fill >= geo->no_pairs); + + /* shift and insert */ + for (i = fill; i > pos; i--) { + setkey(geo, node, bkey(geo, node, i - 1), i); + setval(geo, node, bval(geo, node, i - 1), i); + } + setkey(geo, node, key, pos); + setval(geo, node, val, pos); + + return 0; +} + +int btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val) +{ + return btree_insert_level(head, geo, key, (unsigned long)val, 1); +} + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level); +static void merge(struct btree_head *head, struct btree_geo *geo, int level, + unsigned long *left, int lfill, + unsigned long *right, int rfill, + unsigned long *parent, int lpos) +{ + int i; + + for (i = 0; i < rfill; i++) { + /* Move all entries to the left */ + setkey(geo, left, bkey(geo, right, i), lfill + i); + setval(geo, left, bval(geo, right, i), lfill + i); + } + /* Exchange left and right child in parent */ + setval(geo, parent, (unsigned long)right, lpos); + setval(geo, parent, (unsigned long)left, lpos + 1); + /* Remove left (formerly right) child from parent */ + btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); + free(right); +} + +static void rebalance(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level, unsigned long *child, int fill) +{ + unsigned long *parent, *left = NULL, *right = NULL; + int child_no, no_left, no_right, i; + + parent = find_level(head, geo, key, level + 1); + child_no = getpos(geo, parent, key); + BUG_ON(bval(geo, parent, child_no) != (unsigned long)child); + + if (child_no > 0) { + left = (unsigned long *)bval(geo, parent, child_no - 1); + no_left = getfill(geo, left, 0); + if (fill + no_left <= geo->no_pairs) { + /* Merge with left neighbour */ + merge(head, geo, level, + left, no_left, + child, fill, + parent, child_no - 1); + return; + } + } + if (child_no + 1 < getfill(geo, parent, child_no)) { + right = (unsigned long *)bval(geo, parent, child_no + 1); + no_right = getfill(geo, right, 0); + if (fill + no_right <= geo->no_pairs) { + /* Merge with right neighbour */ + merge(head, geo, level, + child, fill, + right, no_right, + parent, child_no); + return; + } + } + /* + * Leaving the btree in a somewhat unbalanced state can improve + * performance. Stealing entries from a neighbour is a fairly + * expensive operation. In trees where reads completely dominate + * writes, the cost will be amortized sooner or later. When the + * ratio of writes increases, they may never be amortized. + * + * So avoid stealing unless the tree would get _really_ unbalanced. + */ + if (fill > 1) + return; + if (left) { + /* Steal from left neighbour */ + i = (no_left - fill) / 2; + BUG_ON(i < 1); + steal_l(head, geo, level, + left, no_left, + child, fill, + parent, child_no - 1, i); + return; + } + if (right) { + /* Steal from right neighbour */ + i = (no_right - fill) / 2; + BUG_ON(i < 1); + steal_r(head, geo, level, + child, fill, + right, no_right, + parent, child_no, i); + return; + } + BUG(); /* We should never get here */ +} + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node; + int i, pos, fill; + void *ret; + + if (level > head->height) { + /* we recursed all the way up */ + head->height = 0; + head->node = NULL; + return NULL; + } + + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) + return NULL; + ret = (void *)bval(geo, node, pos); + + /* remove and shift */ + for (i = pos; i < fill - 1; i++) { + setkey(geo, node, bkey(geo, node, i + 1), i); + setval(geo, node, bval(geo, node, i + 1), i); + } + clearpair(geo, node, fill - 1); + + if (fill - 1 < geo->no_pairs / 2) { + if (level < head->height) + rebalance(head, geo, key, level, node, fill - 1); + else + btree_shrink(head, geo, fill - 1); + } + + return ret; +} + +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + if (head->height == 0) + return NULL; + + return btree_remove_level(head, geo, key, 1); +} + +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, unsigned long *duplicate) +{ + unsigned long *key; + void *val; + int err; + + BUG_ON(target == victim); + + if (!(target->node)) { + /* target is empty, just copy fields over */ + target->node = victim->node; + target->height = victim->height; + __btree_init(victim); + return 0; + } + + for (;;) { + key = btree_last(victim, geo); + if (!key) + break; + val = btree_lookup(victim, geo, key); + err = btree_insert(target, geo, key, val); + if (err) + return err; + /* We must make a copy of the key, as the original will get + * mangled inside btree_remove. */ + longcpy(duplicate, key, geo->keylen); + btree_remove(victim, geo, duplicate); + } + return 0; +} + +static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, + unsigned long *node, long opaque, + void (*func)(void *elem, long opaque, + unsigned long *key, size_t index, void *func2), + void *func2, int reap, int height, size_t count) +{ + int i; + unsigned long *child; + + for (i = 0; i < geo->no_pairs; i++) { + child = (void *)bval(geo, node, i); + if (!child) + break; + if (height > 1) + count = __btree_for_each(head, geo, child, opaque, + func, func2, reap, height - 1, count); + else + func(child, opaque, bkey(geo, node, i), count++, + func2); + } + if (reap) + free(node); + return count; +} + +static void empty(void *elem, long opaque, unsigned long *key, size_t index, + void *func2) +{ +} + +void visitorl(void *elem, long opaque, unsigned long *key, size_t index, + void *__func) +{ + visitorl_t func = __func; + + func(elem, opaque, *key, index); +} + +void visitor32(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func) +{ + visitor32_t func = __func; + u32 *key = (void *)__key; + + func(elem, opaque, *key, index); +} + +void visitor64(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func) +{ + visitor64_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, *key, index); +} + +void visitor128(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func) +{ + visitor128_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, key[0], key[1], index); +} + +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + long opaque, + void (*func)(void *elem, long opaque, unsigned long *key, + size_t index, void *func2), void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 0, head->height, 0); + return count; +} + +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + long opaque, + void (*func)(void *elem, long opaque, unsigned long *key, + size_t index, void *func2), void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 1, head->height, 0); + __btree_init(head); + return count; +} @@ -0,0 +1,264 @@ +#ifndef BTREE_H +#define BTREE_H + +#include "kerncompat.h" + +/* + * B+Tree node format: + * [key0, key1, ..., keyN] [val0, val1, ..., valN] + * Each key is an array of unsigned longs, head->no_longs in total. + * Total number of keys and vals (N) is head->no_pairs. + */ + +struct btree_head { + unsigned long *node; + int height; +}; + +struct btree_geo { + int keylen; + int no_pairs; +}; +extern struct btree_geo btree_geo32; +extern struct btree_geo btree_geo64; +extern struct btree_geo btree_geo128; + +struct btree_headl { struct btree_head h; }; +struct btree_head32 { struct btree_head h; }; +struct btree_head64 { struct btree_head h; }; +struct btree_head128 { struct btree_head h; }; + +/* + * These couple of functions are all there is to it. The rest of this header + * consists only of wrappers that try to add some typesafety, make the code + * a little self-documenting and generally be nice to people. + */ +void btree_free(void *element, void *pool_data); +void btree_init(struct btree_head *head); +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); +int btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val); +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, unsigned long *duplicate); +unsigned long *btree_last(struct btree_head *head, struct btree_geo *geo); +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + long opaque, + void (*func)(void *elem, long opaque, unsigned long *key, + size_t index, void *func2), void *func2); +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + long opaque, + void (*func)(void *elem, long opaque, unsigned long *key, + size_t index, void *func2), void *func2); + +/* key is unsigned long */ +static inline void btree_initl(struct btree_headl *head) +{ + btree_init(&head->h); +} + +static inline void *btree_lookupl(struct btree_headl *head, unsigned long key) +{ + return btree_lookup(&head->h, &btree_geo32, &key); +} + +static inline int btree_insertl(struct btree_headl *head, unsigned long key, + void *val) +{ + return btree_insert(&head->h, &btree_geo32, &key, val); +} + +static inline void *btree_removel(struct btree_headl *head, unsigned long key) +{ + return btree_remove(&head->h, &btree_geo32, &key); +} + +static inline int btree_mergel(struct btree_headl *target, + struct btree_headl *victim) +{ + unsigned long scratch; + + return btree_merge(&target->h, &victim->h, &btree_geo32, &scratch); +} + +void visitorl(void *elem, long opaque, unsigned long *key, size_t index, + void *__func); + +typedef void (*visitorl_t)(void *elem, long opaque, unsigned long key, + size_t index); + +static inline size_t btree_visitorl(struct btree_headl *head, long opaque, + visitorl_t func2) +{ + return btree_visitor(&head->h, &btree_geo32, opaque, visitorl, func2); +} + +static inline size_t btree_grim_visitorl(struct btree_headl *head, long opaque, + visitorl_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo32, opaque, visitorl, func2); +} + +/* key is u32 */ +static inline void btree_init32(struct btree_head32 *head) +{ + btree_init(&head->h); +} + +static inline void *btree_lookup32(struct btree_head32 *head, u32 key) +{ + return btree_lookup(&head->h, &btree_geo32, (unsigned long *)&key); +} + +static inline int btree_insert32(struct btree_head32 *head, u32 key, + void *val) +{ + return btree_insert(&head->h, &btree_geo32, (unsigned long *)&key, val); +} + +static inline void *btree_remove32(struct btree_head32 *head, u32 key) +{ + return btree_remove(&head->h, &btree_geo32, (unsigned long *)&key); +} + +static inline int btree_merge32(struct btree_head32 *target, + struct btree_head32 *victim) +{ + unsigned long scratch; + + return btree_merge(&target->h, &victim->h, &btree_geo32, &scratch); +} + +void visitor32(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func); + +typedef void (*visitor32_t)(void *elem, long opaque, u32 key, size_t index); + +static inline size_t btree_visitor32(struct btree_head32 *head, long opaque, + visitor32_t func2) +{ + return btree_visitor(&head->h, &btree_geo32, opaque, visitor32, func2); +} + +static inline size_t btree_grim_visitor32(struct btree_head32 *head, long opaque, + visitor32_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo32, opaque, visitor32, func2); +} + +/* key is u64 */ +static inline void btree_init64(struct btree_head64 *head) +{ + btree_init(&head->h); +} + +static inline void *btree_lookup64(struct btree_head64 *head, u64 key) +{ + return btree_lookup(&head->h, &btree_geo64, (unsigned long *)&key); +} + +static inline int btree_insert64(struct btree_head64 *head, u64 key, + void *val) +{ + return btree_insert(&head->h, &btree_geo64, (unsigned long *)&key, val); +} + +static inline void *btree_remove64(struct btree_head64 *head, u64 key) +{ + return btree_remove(&head->h, &btree_geo64, (unsigned long *)&key); +} + +static inline int btree_merge64(struct btree_head64 *target, + struct btree_head64 *victim) +{ + u64 scratch; + + return btree_merge(&target->h, &victim->h, &btree_geo64, + (unsigned long *)&scratch); +} + +void visitor64(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func); + +typedef void (*visitor64_t)(void *elem, long opaque, u64 key, size_t index); + +static inline size_t btree_visitor64(struct btree_head64 *head, long opaque, + visitor64_t func2) +{ + return btree_visitor(&head->h, &btree_geo64, opaque, visitor64, func2); +} + +static inline size_t btree_grim_visitor64(struct btree_head64 *head, long opaque, + visitor64_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo64, opaque, visitor64, func2); +} + +/* key is 128bit (two u64) */ +static inline void btree_init128(struct btree_head128 *head) +{ + btree_init(&head->h); +} + +static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, + void *val) +{ + u64 key[2] = {k1, k2}; + return btree_insert(&head->h, &btree_geo128, (unsigned long *)&key, val); +} + +static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) +{ + u64 *key = (u64 *)btree_last(&head->h, &btree_geo128); + + if (key) { + *k1 = key[0]; + *k2 = key[1]; + } else { + *k1 = 0; + *k2 = 0; + } +} + +static inline int btree_merge128(struct btree_head128 *target, + struct btree_head128 *victim) +{ + u64 scratch[2]; + + return btree_merge(&target->h, &victim->h, &btree_geo128, + (unsigned long *)scratch); +} + +void visitor128(void *elem, long opaque, unsigned long *__key, size_t index, + void *__func); + +typedef void (*visitor128_t)(void *elem, long opaque, u64 key1, u64 key2, + size_t index); + +static inline size_t btree_visitor128(struct btree_head128 *head, long opaque, + visitor128_t func2) +{ + return btree_visitor(&head->h, &btree_geo128, opaque, visitor128, func2); +} + +static inline size_t btree_grim_visitor128(struct btree_head128 *head, long opaque, + visitor128_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo128, opaque, visitor128, func2); +} + +#endif diff --git a/kerncompat.h b/kerncompat.h new file mode 100644 index 0000000..521436f --- /dev/null +++ b/kerncompat.h @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __KERNCOMPAT +#define __KERNCOMPAT + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <endian.h> +#include <byteswap.h> + +#define gfp_t int +#define get_cpu_var(p) (p) +#define __get_cpu_var(p) (p) +#define BITS_PER_LONG (sizeof(long) * 8) +#define __GFP_BITS_SHIFT 20 +#define __GFP_BITS_MASK ((int)((1 << __GFP_BITS_SHIFT) - 1)) +#define GFP_KERNEL 0 +#define GFP_NOFS 0 +#define __read_mostly +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ULONG_MAX (~0UL) +#ifdef __CHECKER__ +#define __force __attribute__((force)) +#define __bitwise__ __attribute__((bitwise)) +#else +#define __force +#define __bitwise__ +#endif + +#ifdef __CHECKER__ +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned int __u32; +typedef unsigned long long u64; +typedef char s8; +typedef short s16; +typedef int s32; +typedef int __s32; +typedef long long s64; +#else +#include <asm/types.h> +typedef __u8 u8; +typedef __u16 u16; +typedef __u32 u32; +typedef __u64 u64; +typedef __s8 s8; +typedef __s16 s16; +typedef __s32 s32; +typedef __s64 s64; +#endif + + +struct vma_shared { int prio_tree_node; }; +struct vm_area_struct { + unsigned long vm_pgoff; + unsigned long vm_start; + unsigned long vm_end; + struct vma_shared shared; +}; +struct page { + unsigned long index; +}; + +#define preempt_enable() do { } while (0) +#define preempt_disable() do { } while (0) + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p |= mask; +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p &= ~mask; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#define BUG() do { \ + printf("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ + abort(); \ +} while (0) + +#define unlikely(cond) cond +#define likely(cond) cond +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) + +#undef offsetof +#ifdef __compiler_offsetof +#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) +#else +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +#ifdef __CHECKER__ +#define __CHECK_ENDIAN__ +#define __bitwise __bitwise__ +#else +#undef __bitwise +#define __bitwise +#endif + +typedef u16 __bitwise __le16; +typedef u16 __bitwise __be16; +typedef u32 __bitwise __le32; +typedef u32 __bitwise __be32; +typedef u64 __bitwise __le64; +typedef u64 __bitwise __be64; + +#if __BYTE_ORDER == __BIG_ENDIAN +#define cpu_to_be64(x) ((__force __be64)(u64)(x)) +#define be64_to_cpu(x) ((__force u64)(__be64)(x)) +#define cpu_to_be32(x) ((__force __be32)(u32)(x)) +#define be32_to_cpu(x) ((__force u32)(__be32)(x)) +#define cpu_to_be16(x) ((__force __be16)(u16)(x)) +#define be16_to_cpu(x) ((__force u16)(__be16)(x)) +#define cpu_to_le64(x) ((__force __le64)(u64)(bswap_64(x))) +#define le64_to_cpu(x) ((__force u64)(__le64)(bswap_64(x))) +#define cpu_to_le32(x) ((__force __le32)(u32)(bswap_32(x))) +#define le32_to_cpu(x) ((__force u32)(__le32)(bswap_32(x))) +#define cpu_to_le16(x) ((__force __le16)(u16)(bswap_16(x))) +#define le16_to_cpu(x) ((__force u16)(__le16)(bswap_16(x))) +#else +#define cpu_to_be64(x) ((__force __be64)(u64)(bswap_64(x))) +#define be64_to_cpu(x) ((__force u64)(__be64)(bswap_64(x))) +#define cpu_to_be32(x) ((__force __be32)(u32)(bswap_32(x))) +#define be32_to_cpu(x) ((__force u32)(__be32)(bswap_32(x))) +#define cpu_to_be16(x) ((__force __be16)(u16)(bswap_16(x))) +#define be16_to_cpu(x) ((__force u16)(__be16)(bswap_16(x))) +#define cpu_to_le64(x) ((__force __le64)(u64)(x)) +#define le64_to_cpu(x) ((__force u64)(__le64)(x)) +#define cpu_to_le32(x) ((__force __le32)(u32)(x)) +#define le32_to_cpu(x) ((__force u32)(__le32)(x)) +#define cpu_to_le16(x) ((__force __le16)(u16)(x)) +#define le16_to_cpu(x) ((__force u16)(__le16)(x)) +#endif + +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + +#define min(x,y) ({ x < y ? x : y; }) + +#define max(x,y) ({ x > y ? x : y; }) + +#define L1_CACHE_BYTES 256 + +static inline void *kzalloc(size_t size, unsigned flags) +{ + void *p; + + p = malloc(size); + if (p) + memset(p, 0, size); + return p; +} + +static inline void kfree(void *p) +{ + free(p); +} + +#define TRACE() printf("TRACE %s:%d\n", __FILE__, __LINE__) + +#endif |