kernel_optimize_test/lib/rbtree_test.c

412 lines
9.5 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/timex.h>
#define __param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
struct test_node {
u32 key;
struct rb_node rb;
/* following fields used for testing augmented rbtree functionality */
u32 val;
u32 augmented;
};
static struct rb_root_cached root = RB_ROOT_CACHED;
static struct test_node *nodes = NULL;
static struct rnd_state rnd;
static void insert(struct test_node *node, struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
u32 key = node->key;
while (*new) {
parent = *new;
if (key < rb_entry(parent, struct test_node, rb)->key)
new = &parent->rb_left;
else
new = &parent->rb_right;
}
rb_link_node(&node->rb, parent, new);
rb_insert_color(&node->rb, &root->rb_root);
}
static void insert_cached(struct test_node *node, struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
u32 key = node->key;
bool leftmost = true;
while (*new) {
parent = *new;
if (key < rb_entry(parent, struct test_node, rb)->key)
new = &parent->rb_left;
else {
new = &parent->rb_right;
leftmost = false;
}
}
rb_link_node(&node->rb, parent, new);
rb_insert_color_cached(&node->rb, root, leftmost);
}
static inline void erase(struct test_node *node, struct rb_root_cached *root)
{
rb_erase(&node->rb, &root->rb_root);
}
static inline void erase_cached(struct test_node *node, struct rb_root_cached *root)
{
rb_erase_cached(&node->rb, root);
}
static inline u32 augment_recompute(struct test_node *node)
{
u32 max = node->val, child_augmented;
if (node->rb.rb_left) {
child_augmented = rb_entry(node->rb.rb_left, struct test_node,
rb)->augmented;
if (max < child_augmented)
max = child_augmented;
}
if (node->rb.rb_right) {
child_augmented = rb_entry(node->rb.rb_right, struct test_node,
rb)->augmented;
if (max < child_augmented)
max = child_augmented;
}
return max;
}
RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
u32, augmented, augment_recompute)
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
static void insert_augmented(struct test_node *node,
struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
u32 key = node->key;
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
u32 val = node->val;
struct test_node *parent;
while (*new) {
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
rb_parent = *new;
parent = rb_entry(rb_parent, struct test_node, rb);
if (parent->augmented < val)
parent->augmented = val;
if (key < parent->key)
new = &parent->rb.rb_left;
else
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
new = &parent->rb.rb_right;
}
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
node->augmented = val;
rb_link_node(&node->rb, rb_parent, new);
rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
}
static void insert_augmented_cached(struct test_node *node,
struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
u32 key = node->key;
u32 val = node->val;
struct test_node *parent;
bool leftmost = true;
while (*new) {
rb_parent = *new;
parent = rb_entry(rb_parent, struct test_node, rb);
if (parent->augmented < val)
parent->augmented = val;
if (key < parent->key)
new = &parent->rb.rb_left;
else {
new = &parent->rb.rb_right;
leftmost = false;
}
}
node->augmented = val;
rb_link_node(&node->rb, rb_parent, new);
rb_insert_augmented_cached(&node->rb, root,
leftmost, &augment_callbacks);
}
static void erase_augmented(struct test_node *node, struct rb_root_cached *root)
{
rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
}
static void erase_augmented_cached(struct test_node *node,
struct rb_root_cached *root)
{
rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
}
static void init(void)
{
int i;
for (i = 0; i < nnodes; i++) {
random32: rename random32 to prandom This renames all random32 functions to have 'prandom_' prefix as follows: void prandom_seed(u32 seed); /* rename from srandom32() */ u32 prandom_u32(void); /* rename from random32() */ void prandom_seed_state(struct rnd_state *state, u64 seed); /* rename from prandom32_seed() */ u32 prandom_u32_state(struct rnd_state *state); /* rename from prandom32() */ The purpose of this renaming is to prevent some kernel developers from assuming that prandom32() and random32() might imply that only prandom32() was the one using a pseudo-random number generator by prandom32's "p", and the result may be a very embarassing security exposure. This concern was expressed by Theodore Ts'o. And furthermore, I'm going to introduce new functions for getting the requested number of pseudo-random bytes. If I continue to use both prandom32 and random32 prefixes for these functions, the confusion is getting worse. As a result of this renaming, "prandom_" is the common prefix for pseudo-random number library. Currently, srandom32() and random32() are preserved because it is difficult to rename too many users at once. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Robert Love <robert.w.love@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Valdis Kletnieks <valdis.kletnieks@vt.edu> Cc: David Laight <david.laight@aculab.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Artem Bityutskiy <dedekind1@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-18 08:04:23 +08:00
nodes[i].key = prandom_u32_state(&rnd);
nodes[i].val = prandom_u32_state(&rnd);
}
}
static bool is_red(struct rb_node *rb)
{
return !(rb->__rb_parent_color & 1);
}
static int black_path_count(struct rb_node *rb)
{
int count;
for (count = 0; rb; rb = rb_parent(rb))
count += !is_red(rb);
return count;
}
static void check_postorder_foreach(int nr_nodes)
{
struct test_node *cur, *n;
int count = 0;
rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check_postorder(int nr_nodes)
{
struct rb_node *rb;
int count = 0;
for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check(int nr_nodes)
{
struct rb_node *rb;
int count = 0, blacks = 0;
u32 prev_key = 0;
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
WARN_ON_ONCE(node->key < prev_key);
WARN_ON_ONCE(is_red(rb) &&
(!rb_parent(rb) || is_red(rb_parent(rb))));
if (!count)
blacks = black_path_count(rb);
else
WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
blacks != black_path_count(rb));
prev_key = node->key;
count++;
}
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1);
check_postorder(nr_nodes);
check_postorder_foreach(nr_nodes);
}
static void check_augmented(int nr_nodes)
{
struct rb_node *rb;
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
WARN_ON_ONCE(node->augmented != augment_recompute(node));
}
}
static int __init rbtree_test_init(void)
{
int i, j;
cycles_t time1, time2, time;
struct rb_node *node;
treewide: kmalloc() -> kmalloc_array() The kmalloc() function has a 2-factor argument form, kmalloc_array(). This patch replaces cases of: kmalloc(a * b, gfp) with: kmalloc_array(a * b, gfp) as well as handling cases of: kmalloc(a * b * c, gfp) with: kmalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kmalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kmalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The tools/ directory was manually excluded, since it has its own implementation of kmalloc(). The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(char) * COUNT + COUNT , ...) | kmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kmalloc + kmalloc_array ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kmalloc(C1 * C2 * C3, ...) | kmalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kmalloc(sizeof(THING) * C2, ...) | kmalloc(sizeof(TYPE) * C2, ...) | kmalloc(C1 * C2 * C3, ...) | kmalloc(C1 * C2, ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - (E1) * E2 + E1, E2 , ...) | - kmalloc + kmalloc_array ( - (E1) * (E2) + E1, E2 , ...) | - kmalloc + kmalloc_array ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
printk(KERN_ALERT "rbtree testing");
random32: rename random32 to prandom This renames all random32 functions to have 'prandom_' prefix as follows: void prandom_seed(u32 seed); /* rename from srandom32() */ u32 prandom_u32(void); /* rename from random32() */ void prandom_seed_state(struct rnd_state *state, u64 seed); /* rename from prandom32_seed() */ u32 prandom_u32_state(struct rnd_state *state); /* rename from prandom32() */ The purpose of this renaming is to prevent some kernel developers from assuming that prandom32() and random32() might imply that only prandom32() was the one using a pseudo-random number generator by prandom32's "p", and the result may be a very embarassing security exposure. This concern was expressed by Theodore Ts'o. And furthermore, I'm going to introduce new functions for getting the requested number of pseudo-random bytes. If I continue to use both prandom32 and random32 prefixes for these functions, the confusion is getting worse. As a result of this renaming, "prandom_" is the common prefix for pseudo-random number library. Currently, srandom32() and random32() are preserved because it is difficult to rename too many users at once. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Robert Love <robert.w.love@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Valdis Kletnieks <valdis.kletnieks@vt.edu> Cc: David Laight <david.laight@aculab.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Artem Bityutskiy <dedekind1@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-18 08:04:23 +08:00
prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n",
(unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_cached(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_cached(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n",
(unsigned long long)time);
for (i = 0; i < nnodes; i++)
insert(nodes + i, &root);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (node = rb_first(&root.rb_root); node; node = rb_next(node))
;
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 3 (latency of inorder traversal): %llu cycles\n",
(unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++)
node = rb_first(&root.rb_root);
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 4 (latency to fetch first node)\n");
printk(" non-cached: %llu cycles\n", (unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++)
node = rb_first_cached(&root);
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" cached: %llu cycles\n", (unsigned long long)time);
for (i = 0; i < nnodes; i++)
erase(nodes + i, &root);
/* run checks */
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < nnodes; j++) {
check(j);
insert(nodes + j, &root);
}
for (j = 0; j < nnodes; j++) {
check(nnodes - j);
erase(nodes + j, &root);
}
check(0);
}
printk(KERN_ALERT "augmented rbtree testing");
init();
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_augmented(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_augmented(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_augmented_cached(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_augmented_cached(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time);
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < nnodes; j++) {
check_augmented(j);
insert_augmented(nodes + j, &root);
}
for (j = 0; j < nnodes; j++) {
check_augmented(nnodes - j);
erase_augmented(nodes + j, &root);
}
check_augmented(0);
}
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
}
static void __exit rbtree_test_exit(void)
{
printk(KERN_ALERT "test exit\n");
}
module_init(rbtree_test_init)
module_exit(rbtree_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michel Lespinasse");
MODULE_DESCRIPTION("Red Black Tree test");