Merge branch 'master' of git://1984.lsi.us.es/net-2.6
This commit is contained in:
commit
60dbb011df
@ -103,7 +103,7 @@ struct __fdb_entry {
|
||||
|
||||
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
|
||||
|
||||
typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
|
||||
typedef int br_should_route_hook_t(struct sk_buff *skb);
|
||||
extern br_should_route_hook_t __rcu *br_should_route_hook;
|
||||
|
||||
#endif
|
||||
|
@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
|
||||
* necessary for reading the counters.
|
||||
*/
|
||||
struct xt_info_lock {
|
||||
spinlock_t lock;
|
||||
seqlock_t lock;
|
||||
unsigned char readers;
|
||||
};
|
||||
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
|
||||
@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
|
||||
local_bh_disable();
|
||||
lock = &__get_cpu_var(xt_info_locks);
|
||||
if (likely(!lock->readers++))
|
||||
spin_lock(&lock->lock);
|
||||
write_seqlock(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void xt_info_rdunlock_bh(void)
|
||||
@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
|
||||
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
|
||||
|
||||
if (likely(!--lock->readers))
|
||||
spin_unlock(&lock->lock);
|
||||
write_sequnlock(&lock->lock);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
|
||||
*/
|
||||
static inline void xt_info_wrlock(unsigned int cpu)
|
||||
{
|
||||
spin_lock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
}
|
||||
|
||||
static inline void xt_info_wrunlock(unsigned int cpu)
|
||||
{
|
||||
spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
|
||||
struct arpt_entry *iter;
|
||||
unsigned int cpu;
|
||||
unsigned int i;
|
||||
unsigned int curcpu = get_cpu();
|
||||
|
||||
/* Instead of clearing (by a previous call to memset())
|
||||
* the counters and using adds, we set the counters
|
||||
* with data used by 'current' CPU
|
||||
*
|
||||
* Bottom half has to be disabled to prevent deadlock
|
||||
* if new softirq were to run and call ipt_do_table
|
||||
*/
|
||||
local_bh_disable();
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
|
||||
SET_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
++i;
|
||||
}
|
||||
local_bh_enable();
|
||||
/* Processing counters from other cpus, we can let bottom half enabled,
|
||||
* (preemption is disabled)
|
||||
*/
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == curcpu)
|
||||
continue;
|
||||
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
|
||||
|
||||
i = 0;
|
||||
local_bh_disable();
|
||||
xt_info_wrlock(cpu);
|
||||
xt_entry_foreach(iter, t->entries[cpu], t->size) {
|
||||
ADD_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
u64 bcnt, pcnt;
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = read_seqbegin(lock);
|
||||
bcnt = iter->counters.bcnt;
|
||||
pcnt = iter->counters.pcnt;
|
||||
} while (read_seqretry(lock, start));
|
||||
|
||||
ADD_COUNTER(counters[i], bcnt, pcnt);
|
||||
++i;
|
||||
}
|
||||
xt_info_wrunlock(cpu);
|
||||
local_bh_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
* about).
|
||||
*/
|
||||
countersize = sizeof(struct xt_counters) * private->number;
|
||||
counters = vmalloc(countersize);
|
||||
counters = vzalloc(countersize);
|
||||
|
||||
if (counters == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
|
||||
struct arpt_entry *iter;
|
||||
|
||||
ret = 0;
|
||||
counters = vmalloc(num_counters * sizeof(struct xt_counters));
|
||||
counters = vzalloc(num_counters * sizeof(struct xt_counters));
|
||||
if (!counters) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
|
||||
struct ipt_entry *iter;
|
||||
unsigned int cpu;
|
||||
unsigned int i;
|
||||
unsigned int curcpu = get_cpu();
|
||||
|
||||
/* Instead of clearing (by a previous call to memset())
|
||||
* the counters and using adds, we set the counters
|
||||
* with data used by 'current' CPU.
|
||||
*
|
||||
* Bottom half has to be disabled to prevent deadlock
|
||||
* if new softirq were to run and call ipt_do_table
|
||||
*/
|
||||
local_bh_disable();
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
|
||||
SET_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
++i;
|
||||
}
|
||||
local_bh_enable();
|
||||
/* Processing counters from other cpus, we can let bottom half enabled,
|
||||
* (preemption is disabled)
|
||||
*/
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == curcpu)
|
||||
continue;
|
||||
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
|
||||
|
||||
i = 0;
|
||||
local_bh_disable();
|
||||
xt_info_wrlock(cpu);
|
||||
xt_entry_foreach(iter, t->entries[cpu], t->size) {
|
||||
ADD_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
u64 bcnt, pcnt;
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = read_seqbegin(lock);
|
||||
bcnt = iter->counters.bcnt;
|
||||
pcnt = iter->counters.pcnt;
|
||||
} while (read_seqretry(lock, start));
|
||||
|
||||
ADD_COUNTER(counters[i], bcnt, pcnt);
|
||||
++i; /* macro does multi eval of i */
|
||||
}
|
||||
xt_info_wrunlock(cpu);
|
||||
local_bh_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
(other than comefrom, which userspace doesn't care
|
||||
about). */
|
||||
countersize = sizeof(struct xt_counters) * private->number;
|
||||
counters = vmalloc(countersize);
|
||||
counters = vzalloc(countersize);
|
||||
|
||||
if (counters == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
struct ipt_entry *iter;
|
||||
|
||||
ret = 0;
|
||||
counters = vmalloc(num_counters * sizeof(struct xt_counters));
|
||||
counters = vzalloc(num_counters * sizeof(struct xt_counters));
|
||||
if (!counters) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
|
||||
struct ip6t_entry *iter;
|
||||
unsigned int cpu;
|
||||
unsigned int i;
|
||||
unsigned int curcpu = get_cpu();
|
||||
|
||||
/* Instead of clearing (by a previous call to memset())
|
||||
* the counters and using adds, we set the counters
|
||||
* with data used by 'current' CPU
|
||||
*
|
||||
* Bottom half has to be disabled to prevent deadlock
|
||||
* if new softirq were to run and call ipt_do_table
|
||||
*/
|
||||
local_bh_disable();
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
|
||||
SET_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
++i;
|
||||
}
|
||||
local_bh_enable();
|
||||
/* Processing counters from other cpus, we can let bottom half enabled,
|
||||
* (preemption is disabled)
|
||||
*/
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == curcpu)
|
||||
continue;
|
||||
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
|
||||
|
||||
i = 0;
|
||||
local_bh_disable();
|
||||
xt_info_wrlock(cpu);
|
||||
xt_entry_foreach(iter, t->entries[cpu], t->size) {
|
||||
ADD_COUNTER(counters[i], iter->counters.bcnt,
|
||||
iter->counters.pcnt);
|
||||
u64 bcnt, pcnt;
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = read_seqbegin(lock);
|
||||
bcnt = iter->counters.bcnt;
|
||||
pcnt = iter->counters.pcnt;
|
||||
} while (read_seqretry(lock, start));
|
||||
|
||||
ADD_COUNTER(counters[i], bcnt, pcnt);
|
||||
++i;
|
||||
}
|
||||
xt_info_wrunlock(cpu);
|
||||
local_bh_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
(other than comefrom, which userspace doesn't care
|
||||
about). */
|
||||
countersize = sizeof(struct xt_counters) * private->number;
|
||||
counters = vmalloc(countersize);
|
||||
counters = vzalloc(countersize);
|
||||
|
||||
if (counters == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
ret = 0;
|
||||
counters = vmalloc(num_counters * sizeof(struct xt_counters));
|
||||
counters = vzalloc(num_counters * sizeof(struct xt_counters));
|
||||
if (!counters) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
|
||||
u_int8_t l3proto = nfmsg->nfgen_family;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
last = (struct nf_conn *)cb->args[1];
|
||||
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
|
||||
restart:
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
|
||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
|
||||
hnnode) {
|
||||
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
||||
continue;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (!atomic_inc_not_zero(&ct->ct_general.use))
|
||||
continue;
|
||||
/* Dump entries of a given L3 protocol number.
|
||||
* If it is not specified, ie. l3proto == 0,
|
||||
* then dump everything. */
|
||||
if (l3proto && nf_ct_l3num(ct) != l3proto)
|
||||
goto releasect;
|
||||
continue;
|
||||
if (cb->args[1]) {
|
||||
if (ct != last)
|
||||
goto releasect;
|
||||
continue;
|
||||
cb->args[1] = 0;
|
||||
}
|
||||
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
|
||||
@ -681,8 +679,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (acct)
|
||||
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
|
||||
}
|
||||
releasect:
|
||||
nf_ct_put(ct);
|
||||
}
|
||||
if (cb->args[1]) {
|
||||
cb->args[1] = 0;
|
||||
@ -690,7 +686,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
}
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
if (last)
|
||||
nf_ct_put(last);
|
||||
|
||||
|
@ -1325,7 +1325,8 @@ static int __init xt_init(void)
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
|
||||
spin_lock_init(&lock->lock);
|
||||
|
||||
seqlock_init(&lock->lock);
|
||||
lock->readers = 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user