d40fc181eb
Switch all of the sample code to use the function names from tools/lib/bpf so that they're consistent with that, and to declare their own log buffers. This allow the next commit to be purely devoted to getting rid of the duplicate library in samples/bpf. Committer notes: Testing it: On a fedora rawhide container, with clang/llvm 3.9, sharing the host linux kernel git tree: # make O=/tmp/build/linux/ headers_install # make O=/tmp/build/linux -C samples/bpf/ Since I forgot to make it privileged, just tested it outside the container, using what it generated: # uname -a Linux jouet 4.9.0-rc8+ #1 SMP Mon Dec 12 11:20:49 BRT 2016 x86_64 x86_64 x86_64 GNU/Linux # cd /var/lib/docker/devicemapper/mnt/c43e09a53ff56c86a07baf79847f00e2cc2a17a1e2220e1adbf8cbc62734feda/rootfs/tmp/build/linux/samples/bpf/ # ls -la offwaketime -rwxr-xr-x. 1 root root 24200 Dec 15 12:19 offwaketime # file offwaketime offwaketime: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.32, BuildID[sha1]=c940d3f127d5e66cdd680e42d885cb0b64f8a0e4, not stripped # readelf -SW offwaketime_kern.o | grep PROGBITS [ 2] .text PROGBITS 0000000000000000 000040 000000 00 AX 0 0 4 [ 3] kprobe/try_to_wake_up PROGBITS 0000000000000000 000040 0000d8 00 AX 0 0 8 [ 5] tracepoint/sched/sched_switch PROGBITS 0000000000000000 000118 000318 00 AX 0 0 8 [ 7] maps PROGBITS 0000000000000000 000430 000050 00 WA 0 0 4 [ 8] license PROGBITS 0000000000000000 000480 000004 00 WA 0 0 1 [ 9] version PROGBITS 0000000000000000 000484 000004 00 WA 0 0 4 # ./offwaketime | head -5 swapper/1;start_secondary;cpu_startup_entry;schedule_preempt_disabled;schedule;__schedule;-;---;; 106 CPU 0/KVM;entry_SYSCALL_64_fastpath;sys_ioctl;do_vfs_ioctl;kvm_vcpu_ioctl;kvm_arch_vcpu_ioctl_run;kvm_vcpu_block;schedule;__schedule;-;try_to_wake_up;swake_up_locked;swake_up;apic_timer_expired;apic_timer_fn;__hrtimer_run_queues;hrtimer_interrupt;local_apic_timer_interrupt;smp_apic_timer_interrupt;__irqentry_text_start;cpuidle_enter;call_cpuidle;cpu_startup_entry;start_secondary;;swapper/3 2 Compositor;entry_SYSCALL_64_fastpath;sys_futex;do_futex;futex_wait;futex_wait_queue_me;schedule;__schedule;-;try_to_wake_up;futex_requeue;do_futex;sys_futex;entry_SYSCALL_64_fastpath;;SoftwareVsyncTh 5 firefox;entry_SYSCALL_64_fastpath;sys_poll;do_sys_poll;poll_schedule_timeout;schedule_hrtimeout_range;schedule_hrtimeout_range_clock;schedule;__schedule;-;try_to_wake_up;pollwake;__wake_up_common;__wake_up_sync_key;pipe_write;__vfs_write;vfs_write;sys_write;entry_SYSCALL_64_fastpath;;Timer 13 JS Helper;entry_SYSCALL_64_fastpath;sys_futex;do_futex;futex_wait;futex_wait_queue_me;schedule;__schedule;-;try_to_wake_up;do_futex;sys_futex;entry_SYSCALL_64_fastpath;;firefox 2 # Signed-off-by: Joe Stringer <joe@ovn.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@fb.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Wang Nan <wangnan0@huawei.com> Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/20161214224342.12858-2-joe@ovn.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
542 lines
12 KiB
C
542 lines
12 KiB
C
/*
|
|
* Copyright (c) 2016 Facebook
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*/
|
|
#define _GNU_SOURCE
|
|
#include <linux/types.h>
|
|
#include <stdio.h>
|
|
#include <unistd.h>
|
|
#include <linux/bpf.h>
|
|
#include <errno.h>
|
|
#include <string.h>
|
|
#include <assert.h>
|
|
#include <sched.h>
|
|
#include <sys/wait.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/resource.h>
|
|
#include <fcntl.h>
|
|
#include <stdlib.h>
|
|
#include <time.h>
|
|
|
|
#include "libbpf.h"
|
|
#include "bpf_util.h"
|
|
|
|
#define min(a, b) ((a) < (b) ? (a) : (b))
|
|
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
|
|
#define container_of(ptr, type, member) ({ \
|
|
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
|
|
(type *)( (char *)__mptr - offsetof(type,member) );})
|
|
|
|
static int nr_cpus;
|
|
static unsigned long long *dist_keys;
|
|
static unsigned int dist_key_counts;
|
|
|
|
struct list_head {
|
|
struct list_head *next, *prev;
|
|
};
|
|
|
|
static inline void INIT_LIST_HEAD(struct list_head *list)
|
|
{
|
|
list->next = list;
|
|
list->prev = list;
|
|
}
|
|
|
|
static inline int list_empty(const struct list_head *head)
|
|
{
|
|
return head->next == head;
|
|
}
|
|
|
|
static inline void __list_add(struct list_head *new,
|
|
struct list_head *prev,
|
|
struct list_head *next)
|
|
{
|
|
next->prev = new;
|
|
new->next = next;
|
|
new->prev = prev;
|
|
prev->next = new;
|
|
}
|
|
|
|
static inline void list_add(struct list_head *new, struct list_head *head)
|
|
{
|
|
__list_add(new, head, head->next);
|
|
}
|
|
|
|
static inline void __list_del(struct list_head *prev, struct list_head *next)
|
|
{
|
|
next->prev = prev;
|
|
prev->next = next;
|
|
}
|
|
|
|
static inline void __list_del_entry(struct list_head *entry)
|
|
{
|
|
__list_del(entry->prev, entry->next);
|
|
}
|
|
|
|
static inline void list_move(struct list_head *list, struct list_head *head)
|
|
{
|
|
__list_del_entry(list);
|
|
list_add(list, head);
|
|
}
|
|
|
|
#define list_entry(ptr, type, member) \
|
|
container_of(ptr, type, member)
|
|
|
|
#define list_last_entry(ptr, type, member) \
|
|
list_entry((ptr)->prev, type, member)
|
|
|
|
struct pfect_lru_node {
|
|
struct list_head list;
|
|
unsigned long long key;
|
|
};
|
|
|
|
struct pfect_lru {
|
|
struct list_head list;
|
|
struct pfect_lru_node *free_nodes;
|
|
unsigned int cur_size;
|
|
unsigned int lru_size;
|
|
unsigned int nr_unique;
|
|
unsigned int nr_misses;
|
|
unsigned int total;
|
|
int map_fd;
|
|
};
|
|
|
|
static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size,
|
|
unsigned int nr_possible_elems)
|
|
{
|
|
lru->map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
|
|
sizeof(unsigned long long),
|
|
sizeof(struct pfect_lru_node *),
|
|
nr_possible_elems, 0);
|
|
assert(lru->map_fd != -1);
|
|
|
|
lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node));
|
|
assert(lru->free_nodes);
|
|
|
|
INIT_LIST_HEAD(&lru->list);
|
|
lru->cur_size = 0;
|
|
lru->lru_size = lru_size;
|
|
lru->nr_unique = lru->nr_misses = lru->total = 0;
|
|
}
|
|
|
|
static void pfect_lru_destroy(struct pfect_lru *lru)
|
|
{
|
|
close(lru->map_fd);
|
|
free(lru->free_nodes);
|
|
}
|
|
|
|
static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
|
|
unsigned long long key)
|
|
{
|
|
struct pfect_lru_node *node = NULL;
|
|
int seen = 0;
|
|
|
|
lru->total++;
|
|
if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
|
|
if (node) {
|
|
list_move(&node->list, &lru->list);
|
|
return 1;
|
|
}
|
|
seen = 1;
|
|
}
|
|
|
|
if (lru->cur_size < lru->lru_size) {
|
|
node = &lru->free_nodes[lru->cur_size++];
|
|
INIT_LIST_HEAD(&node->list);
|
|
} else {
|
|
struct pfect_lru_node *null_node = NULL;
|
|
|
|
node = list_last_entry(&lru->list,
|
|
struct pfect_lru_node,
|
|
list);
|
|
bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
|
|
}
|
|
|
|
node->key = key;
|
|
list_move(&node->list, &lru->list);
|
|
|
|
lru->nr_misses++;
|
|
if (seen) {
|
|
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
|
|
} else {
|
|
lru->nr_unique++;
|
|
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
|
|
}
|
|
|
|
return seen;
|
|
}
|
|
|
|
static unsigned int read_keys(const char *dist_file,
|
|
unsigned long long **keys)
|
|
{
|
|
struct stat fst;
|
|
unsigned long long *retkeys;
|
|
unsigned int counts = 0;
|
|
int dist_fd;
|
|
char *b, *l;
|
|
int i;
|
|
|
|
dist_fd = open(dist_file, 0);
|
|
assert(dist_fd != -1);
|
|
|
|
assert(fstat(dist_fd, &fst) == 0);
|
|
b = malloc(fst.st_size);
|
|
assert(b);
|
|
|
|
assert(read(dist_fd, b, fst.st_size) == fst.st_size);
|
|
close(dist_fd);
|
|
for (i = 0; i < fst.st_size; i++) {
|
|
if (b[i] == '\n')
|
|
counts++;
|
|
}
|
|
counts++; /* in case the last line has no \n */
|
|
|
|
retkeys = malloc(counts * sizeof(unsigned long long));
|
|
assert(retkeys);
|
|
|
|
counts = 0;
|
|
for (l = strtok(b, "\n"); l; l = strtok(NULL, "\n"))
|
|
retkeys[counts++] = strtoull(l, NULL, 10);
|
|
free(b);
|
|
|
|
*keys = retkeys;
|
|
|
|
return counts;
|
|
}
|
|
|
|
static int create_map(int map_type, int map_flags, unsigned int size)
|
|
{
|
|
int map_fd;
|
|
|
|
map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
|
|
sizeof(unsigned long long), size, map_flags);
|
|
|
|
if (map_fd == -1)
|
|
perror("bpf_create_map");
|
|
|
|
return map_fd;
|
|
}
|
|
|
|
static int sched_next_online(int pid, int next_to_try)
|
|
{
|
|
cpu_set_t cpuset;
|
|
|
|
if (next_to_try == nr_cpus)
|
|
return -1;
|
|
|
|
while (next_to_try < nr_cpus) {
|
|
CPU_ZERO(&cpuset);
|
|
CPU_SET(next_to_try++, &cpuset);
|
|
if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
|
|
break;
|
|
}
|
|
|
|
return next_to_try;
|
|
}
|
|
|
|
static void run_parallel(unsigned int tasks, void (*fn)(int i, void *data),
|
|
void *data)
|
|
{
|
|
int next_sched_cpu = 0;
|
|
pid_t pid[tasks];
|
|
int i;
|
|
|
|
for (i = 0; i < tasks; i++) {
|
|
pid[i] = fork();
|
|
if (pid[i] == 0) {
|
|
next_sched_cpu = sched_next_online(0, next_sched_cpu);
|
|
fn(i, data);
|
|
exit(0);
|
|
} else if (pid[i] == -1) {
|
|
printf("couldn't spawn #%d process\n", i);
|
|
exit(1);
|
|
}
|
|
/* It is mostly redundant and just allow the parent
|
|
* process to update next_shced_cpu for the next child
|
|
* process
|
|
*/
|
|
next_sched_cpu = sched_next_online(pid[i], next_sched_cpu);
|
|
}
|
|
for (i = 0; i < tasks; i++) {
|
|
int status;
|
|
|
|
assert(waitpid(pid[i], &status, 0) == pid[i]);
|
|
assert(status == 0);
|
|
}
|
|
}
|
|
|
|
static void do_test_lru_dist(int task, void *data)
|
|
{
|
|
unsigned int nr_misses = 0;
|
|
struct pfect_lru pfect_lru;
|
|
unsigned long long key, value = 1234;
|
|
unsigned int i;
|
|
|
|
unsigned int lru_map_fd = ((unsigned int *)data)[0];
|
|
unsigned int lru_size = ((unsigned int *)data)[1];
|
|
unsigned long long key_offset = task * dist_key_counts;
|
|
|
|
pfect_lru_init(&pfect_lru, lru_size, dist_key_counts);
|
|
|
|
for (i = 0; i < dist_key_counts; i++) {
|
|
key = dist_keys[i] + key_offset;
|
|
|
|
pfect_lru_lookup_or_insert(&pfect_lru, key);
|
|
|
|
if (!bpf_map_lookup_elem(lru_map_fd, &key, &value))
|
|
continue;
|
|
|
|
if (bpf_map_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
|
|
printf("bpf_map_update_elem(lru_map_fd, %llu): errno:%d\n",
|
|
key, errno);
|
|
assert(0);
|
|
}
|
|
|
|
nr_misses++;
|
|
}
|
|
|
|
printf(" task:%d BPF LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
|
|
task, pfect_lru.nr_unique, dist_key_counts, nr_misses,
|
|
dist_key_counts);
|
|
printf(" task:%d Perfect LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
|
|
task, pfect_lru.nr_unique, pfect_lru.total,
|
|
pfect_lru.nr_misses, pfect_lru.total);
|
|
|
|
pfect_lru_destroy(&pfect_lru);
|
|
close(lru_map_fd);
|
|
}
|
|
|
|
static void test_parallel_lru_dist(int map_type, int map_flags,
|
|
int nr_tasks, unsigned int lru_size)
|
|
{
|
|
int child_data[2];
|
|
int lru_map_fd;
|
|
|
|
printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
|
|
map_flags);
|
|
|
|
if (map_flags & BPF_F_NO_COMMON_LRU)
|
|
lru_map_fd = create_map(map_type, map_flags,
|
|
nr_cpus * lru_size);
|
|
else
|
|
lru_map_fd = create_map(map_type, map_flags,
|
|
nr_tasks * lru_size);
|
|
assert(lru_map_fd != -1);
|
|
|
|
child_data[0] = lru_map_fd;
|
|
child_data[1] = lru_size;
|
|
|
|
run_parallel(nr_tasks, do_test_lru_dist, child_data);
|
|
|
|
close(lru_map_fd);
|
|
}
|
|
|
|
static void test_lru_loss0(int map_type, int map_flags)
|
|
{
|
|
unsigned long long key, value[nr_cpus];
|
|
unsigned int old_unused_losses = 0;
|
|
unsigned int new_unused_losses = 0;
|
|
unsigned int used_losses = 0;
|
|
int map_fd;
|
|
|
|
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
|
|
map_flags);
|
|
|
|
assert(sched_next_online(0, 0) != -1);
|
|
|
|
if (map_flags & BPF_F_NO_COMMON_LRU)
|
|
map_fd = create_map(map_type, map_flags, 900 * nr_cpus);
|
|
else
|
|
map_fd = create_map(map_type, map_flags, 900);
|
|
|
|
assert(map_fd != -1);
|
|
|
|
value[0] = 1234;
|
|
|
|
for (key = 1; key <= 1000; key++) {
|
|
int start_key, end_key;
|
|
|
|
assert(bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
|
|
|
|
start_key = 101;
|
|
end_key = min(key, 900);
|
|
|
|
while (start_key <= end_key) {
|
|
bpf_map_lookup_elem(map_fd, &start_key, value);
|
|
start_key++;
|
|
}
|
|
}
|
|
|
|
for (key = 1; key <= 1000; key++) {
|
|
if (bpf_map_lookup_elem(map_fd, &key, value)) {
|
|
if (key <= 100)
|
|
old_unused_losses++;
|
|
else if (key <= 900)
|
|
used_losses++;
|
|
else
|
|
new_unused_losses++;
|
|
}
|
|
}
|
|
|
|
close(map_fd);
|
|
|
|
printf("older-elem-losses:%d(/100) active-elem-losses:%d(/800) "
|
|
"newer-elem-losses:%d(/100)\n",
|
|
old_unused_losses, used_losses, new_unused_losses);
|
|
}
|
|
|
|
static void test_lru_loss1(int map_type, int map_flags)
|
|
{
|
|
unsigned long long key, value[nr_cpus];
|
|
int map_fd;
|
|
unsigned int nr_losses = 0;
|
|
|
|
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
|
|
map_flags);
|
|
|
|
assert(sched_next_online(0, 0) != -1);
|
|
|
|
if (map_flags & BPF_F_NO_COMMON_LRU)
|
|
map_fd = create_map(map_type, map_flags, 1000 * nr_cpus);
|
|
else
|
|
map_fd = create_map(map_type, map_flags, 1000);
|
|
|
|
assert(map_fd != -1);
|
|
|
|
value[0] = 1234;
|
|
|
|
for (key = 1; key <= 1000; key++)
|
|
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
|
|
|
|
for (key = 1; key <= 1000; key++) {
|
|
if (bpf_map_lookup_elem(map_fd, &key, value))
|
|
nr_losses++;
|
|
}
|
|
|
|
close(map_fd);
|
|
|
|
printf("nr_losses:%d(/1000)\n", nr_losses);
|
|
}
|
|
|
|
static void do_test_parallel_lru_loss(int task, void *data)
|
|
{
|
|
const unsigned int nr_stable_elems = 1000;
|
|
const unsigned int nr_repeats = 100000;
|
|
|
|
int map_fd = *(int *)data;
|
|
unsigned long long stable_base;
|
|
unsigned long long key, value[nr_cpus];
|
|
unsigned long long next_ins_key;
|
|
unsigned int nr_losses = 0;
|
|
unsigned int i;
|
|
|
|
stable_base = task * nr_repeats * 2 + 1;
|
|
next_ins_key = stable_base;
|
|
value[0] = 1234;
|
|
for (i = 0; i < nr_stable_elems; i++) {
|
|
assert(bpf_map_update_elem(map_fd, &next_ins_key, value,
|
|
BPF_NOEXIST) == 0);
|
|
next_ins_key++;
|
|
}
|
|
|
|
for (i = 0; i < nr_repeats; i++) {
|
|
int rn;
|
|
|
|
rn = rand();
|
|
|
|
if (rn % 10) {
|
|
key = rn % nr_stable_elems + stable_base;
|
|
bpf_map_lookup_elem(map_fd, &key, value);
|
|
} else {
|
|
bpf_map_update_elem(map_fd, &next_ins_key, value,
|
|
BPF_NOEXIST);
|
|
next_ins_key++;
|
|
}
|
|
}
|
|
|
|
key = stable_base;
|
|
for (i = 0; i < nr_stable_elems; i++) {
|
|
if (bpf_map_lookup_elem(map_fd, &key, value))
|
|
nr_losses++;
|
|
key++;
|
|
}
|
|
|
|
printf(" task:%d nr_losses:%u\n", task, nr_losses);
|
|
}
|
|
|
|
static void test_parallel_lru_loss(int map_type, int map_flags, int nr_tasks)
|
|
{
|
|
int map_fd;
|
|
|
|
printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
|
|
map_flags);
|
|
|
|
/* Give 20% more than the active working set */
|
|
if (map_flags & BPF_F_NO_COMMON_LRU)
|
|
map_fd = create_map(map_type, map_flags,
|
|
nr_cpus * (1000 + 200));
|
|
else
|
|
map_fd = create_map(map_type, map_flags,
|
|
nr_tasks * (1000 + 200));
|
|
|
|
assert(map_fd != -1);
|
|
|
|
run_parallel(nr_tasks, do_test_parallel_lru_loss, &map_fd);
|
|
|
|
close(map_fd);
|
|
}
|
|
|
|
int main(int argc, char **argv)
|
|
{
|
|
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
|
|
int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
|
|
const char *dist_file;
|
|
int nr_tasks = 1;
|
|
int lru_size;
|
|
int f;
|
|
|
|
if (argc < 4) {
|
|
printf("Usage: %s <dist-file> <lru-size> <nr-tasks>\n",
|
|
argv[0]);
|
|
return -1;
|
|
}
|
|
|
|
dist_file = argv[1];
|
|
lru_size = atoi(argv[2]);
|
|
nr_tasks = atoi(argv[3]);
|
|
|
|
setbuf(stdout, NULL);
|
|
|
|
assert(!setrlimit(RLIMIT_MEMLOCK, &r));
|
|
|
|
srand(time(NULL));
|
|
|
|
nr_cpus = bpf_num_possible_cpus();
|
|
assert(nr_cpus != -1);
|
|
printf("nr_cpus:%d\n\n", nr_cpus);
|
|
|
|
nr_tasks = min(nr_tasks, nr_cpus);
|
|
|
|
dist_key_counts = read_keys(dist_file, &dist_keys);
|
|
if (!dist_key_counts) {
|
|
printf("%s has no key\n", dist_file);
|
|
return -1;
|
|
}
|
|
|
|
for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) {
|
|
test_lru_loss0(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
|
|
test_lru_loss1(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
|
|
test_parallel_lru_loss(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
|
|
nr_tasks);
|
|
test_parallel_lru_dist(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
|
|
nr_tasks, lru_size);
|
|
printf("\n");
|
|
}
|
|
|
|
free(dist_keys);
|
|
|
|
return 0;
|
|
}
|