Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter fixes for your net tree,
they are:

1) Put back reference on CLUSTERIP configuration structure from the
   error path, patch from Florian Westphal.

2) Put reference on CLUSTERIP configuration instead of freeing it,
   another cpu may still be walking over it, also from Florian.

3) Refetch pointer to IPv6 header from nf_nat_ipv6_manip_pkt() given
   packet manipulation may reallocation the skbuff header, from Florian.

4) Missing match size sanity checks in ebt_among, from Florian.

5) Convert BUG_ON to WARN_ON in ebtables, from Florian.

6) Sanity check userspace offsets from ebtables kernel, from Florian.

7) Missing checksum replace call in flowtable IPv4 DNAT, from Felix
   Fietkau.

8) Bump the right stats on checksum error from bridge netfilter,
   from Taehee Yoo.

9) Unset interface flag in IPv6 fib lookups otherwise we get
   misleading routing lookup results, from Florian.

10) Missing sk_to_full_sk() in ip6_route_me_harder() from Eric Dumazet.

11) Don't allow devices to be part of multiple flowtables at the same
    time, this may break setups.

12) Missing netlink attribute validation in flowtable deletion.

13) Wrong array index in nf_unregister_net_hook() call from error path
    in flowtable addition path.

14) Fix FTP IPVS helper when NAT mangling is in place, patch from
    Julian Anastasov.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-02 20:32:15 -05:00
commit 4a0c7191c7
11 changed files with 98 additions and 39 deletions

View File

@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
goto csum_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
*/
return 0;
csum_error:
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
inhdr_error:
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop:

View File

@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true;
}
static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
{
return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
}
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data);
int expected_length = sizeof(struct ebt_among_info);
unsigned int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err;
if (expected_length > em->match_size)
return -EINVAL;
wh_dst = ebt_among_wh_dst(info);
wh_src = ebt_among_wh_src(info);
if (poolsize_invalid(wh_dst))
return -EINVAL;
expected_length += ebt_mac_wormhash_size(wh_dst);
if (expected_length > em->match_size)
return -EINVAL;
wh_src = ebt_among_wh_src(info);
if (poolsize_invalid(wh_src))
return -EINVAL;
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {

View File

@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
int off = ebt_compat_match_offset(match, m->match_size);
compat_uint_t msize = m->match_size - off;
BUG_ON(off >= m->match_size);
if (WARN_ON(off >= m->match_size))
return -EINVAL;
if (copy_to_user(cm->u.name, match->name,
strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
int off = xt_compat_target_offset(target);
compat_uint_t tsize = t->target_size - off;
BUG_ON(off >= t->target_size);
if (WARN_ON(off >= t->target_size))
return -EINVAL;
if (copy_to_user(cm->u.name, target->name,
strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
if (state->buf_kern_start == NULL)
goto count_only;
BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
return -EINVAL;
memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
{
char *b = state->buf_kern_start;
BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
return -EINVAL;
if (b != NULL && sz > 0)
memset(b + state->buf_kern_offset, 0, sz);
@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
pad = XT_ALIGN(size_kern) - size_kern;
if (pad > 0 && dst) {
BUG_ON(state->buf_kern_len <= pad);
BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
if (WARN_ON(state->buf_kern_len <= pad))
return -EINVAL;
if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
return -EINVAL;
memset(dst + size_kern, 0, pad);
}
return off + match_size;
@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (ret < 0)
return ret;
BUG_ON(ret < match32->match_size);
if (WARN_ON(ret < match32->match_size))
return -EINVAL;
growth += ret - match32->match_size;
growth += ebt_compat_entry_padsize();
@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
WARN_ON(type == EBT_COMPAT_TARGET && size_left);
if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
}
@ -2109,6 +2118,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
*
* offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/
for (i = 0; i < 4 ; ++i) {
if (offsets[i] >= *total)
return -EINVAL;
if (i == 0)
continue;
if (offsets[i-1] > offsets[i])
return -EINVAL;
}
for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32;
unsigned int size;
@ -2140,7 +2158,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
startoff = state->buf_user_offset - startoff;
BUG_ON(*total < startoff);
if (WARN_ON(*total < startoff))
return -EINVAL;
*total -= startoff;
return 0;
}
@ -2267,7 +2286,8 @@ static int compat_do_replace(struct net *net, void __user *user,
state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
BUG_ON(ret < 0); /* parses same data again */
if (WARN_ON(ret < 0))
goto out_unlock;
vfree(entries_tmp);
tmp.entries_size = size64;

View File

@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
c->hash_mode = i->hash_mode;
c->hash_initval = i->hash_initval;
refcount_set(&c->refcount, 1);
refcount_set(&c->entries, 1);
spin_lock_bh(&cn->lock);
if (__clusterip_config_find(net, ip)) {
@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
c->notifier.notifier_call = clusterip_netdev_event;
err = register_netdevice_notifier(&c->notifier);
if (!err)
if (!err) {
refcount_set(&c->entries, 1);
return c;
}
#ifdef CONFIG_PROC_FS
proc_remove(c->pde);
@ -273,7 +274,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
spin_lock_bh(&cn->lock);
list_del_rcu(&c->list);
spin_unlock_bh(&cn->lock);
kfree(c);
clusterip_config_put(c);
return ERR_PTR(err);
}
@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
return PTR_ERR(config);
}
}
cipinfo->config = config;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
if (ret < 0) {
pr_info("cannot load conntrack support for proto=%u\n",
par->family);
clusterip_config_entry_put(par->net, config);
clusterip_config_put(config);
return ret;
}
if (!par->net->xt.clusterip_deprecated_warning) {
pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
par->net->xt.clusterip_deprecated_warning = true;
}
cipinfo->config = config;
return ret;
}

View File

@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
default:
return -1;
}
csum_replace4(&iph->check, addr, new_addr);
return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
}

View File

@ -21,18 +21,19 @@
int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk = sk_to_full_sk(skb->sk);
unsigned int hh_len;
struct dst_entry *dst;
struct flowi6 fl6 = {
.flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
.flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, skb->sk),
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
.saddr = iph->saddr,
};
int err;
dst = ip6_route_output(net, skb->sk, &fl6);
dst = ip6_route_output(net, sk, &fl6);
err = dst->error;
if (err) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
skb_dst_set(skb, NULL);
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_dst_set(skb, dst);

View File

@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
}
fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
if ((flags & XT_RPFILTER_LOOSE) == 0) {
fl6.flowi6_oif = dev->ifindex;
lookup_flags |= RT6_LOOKUP_F_IFACE;
}
rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
if (rt->dst.error)

View File

@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
target, maniptype))
return false;
/* must reload, offset might have changed */
ipv6h = (void *)skb->data + iphdroff;
manip_addr:
if (maniptype == NF_NAT_MANIP_SRC)
ipv6h->saddr = target->src.u3.in6;

View File

@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
}
*dest = 0;
again:
rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
if (rt->dst.error)
goto put_rt_err;
@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
goto put_rt_err;
if (oif && oif != rt->rt6i_idev->dev) {
/* multipath route? Try again with F_IFACE */
if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6.flowi6_oif = oif->ifindex;
ip6_rt_put(rt);
goto again;
}
}
if (oif && oif != rt->rt6i_idev->dev)
goto put_rt_err;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:

View File

@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
buf_len = strlen(buf);
ct = nf_ct_get(skb, &ctinfo);
if (ct && (ct->status & IPS_NAT_MASK)) {
if (ct) {
bool mangled;
/* If mangling fails this function will return 0

View File

@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
const struct nf_flowtable_type *type;
struct nft_flowtable *flowtable, *ft;
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct nft_table *table;
struct nft_ctx ctx;
int err, i, k;
@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err3;
for (i = 0; i < flowtable->ops_len; i++) {
if (!flowtable->ops[i].dev)
continue;
list_for_each_entry(ft, &table->flowtables, list) {
for (k = 0; k < ft->ops_len; k++) {
if (!ft->ops[k].dev)
continue;
if (flowtable->ops[i].dev == ft->ops[k].dev &&
flowtable->ops[i].pf == ft->ops[k].pf) {
err = -EBUSY;
goto err4;
}
}
}
err = nf_register_net_hook(net, &flowtable->ops[i]);
if (err < 0)
goto err4;
@ -5120,7 +5136,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
i = flowtable->ops_len;
err4:
for (k = i - 1; k >= 0; k--)
nf_unregister_net_hook(net, &flowtable->ops[i]);
nf_unregister_net_hook(net, &flowtable->ops[k]);
kfree(flowtable->ops);
err3:
@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
struct nft_table *table;
struct nft_ctx ctx;
if (!nla[NFTA_FLOWTABLE_TABLE] ||
(!nla[NFTA_FLOWTABLE_NAME] &&
!nla[NFTA_FLOWTABLE_HANDLE]))
return -EINVAL;
table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
family, genmask);
if (IS_ERR(table))