Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
Steffen Klassert says: ==================== pull request (net): ipsec 2018-03-13 1) Refuse to insert 32 bit userspace socket policies on 64 bit systems like we do it for standard policies. We don't have a compat layer, so inserting socket policies from 32 bit userspace will lead to a broken configuration. 2) Make the policy hold queue work without the flowcache. Dummy bundles are not chached anymore, so we need to generate a new one on each lookup as long as the SAs are not yet in place. 3) Fix the validation of the esn replay attribute. The The sanity check in verify_replay() is bypassed if the XFRM_STATE_ESN flag is not set. Fix this by doing the sanity check uncoditionally. From Florian Westphal. 4) After most of the dst_entry garbage collection code is removed, we may leak xfrm_dst entries as they are neither cached nor tracked somewhere. Fix this by reusing the 'uncached_list' to track xfrm_dst entries too. From Xin Long. 5) Fix a rcu_read_lock/rcu_read_unlock imbalance in xfrm_get_tos() From Xin Long. 6) Fix an infinite loop in xfrm_get_dst_nexthop. On transport mode we fetch the child dst_entry after we continue, so this pointer is never updated. Fix this by fetching it before we continue. 7) Fix ESN sequence number gap after IPsec GSO packets. We accidentally increment the sequence number counter on the xfrm_state by one packet too much in the ESN case. Fix this by setting the sequence number to the correct value. 8) Reset the ethernet protocol after decapsulation only if a mac header was set. Otherwise it breaks configurations with TUN devices. From Yossi Kuperman. 9) Fix __this_cpu_read() usage in preemptible code. Use this_cpu_read() instead in ipcomp_alloc_tfms(). From Greg Hackmann. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d2ddf628e9
|
@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
|
|||
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
|
||||
void rt6_multipath_rebalance(struct rt6_info *rt);
|
||||
|
||||
void rt6_uncached_list_add(struct rt6_info *rt);
|
||||
void rt6_uncached_list_del(struct rt6_info *rt);
|
||||
|
||||
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
|
||||
{
|
||||
const struct dst_entry *dst = skb_dst(skb);
|
||||
|
|
|
@ -227,6 +227,9 @@ struct in_ifaddr;
|
|||
void fib_add_ifaddr(struct in_ifaddr *);
|
||||
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
|
||||
|
||||
void rt_add_uncached_list(struct rtable *rt);
|
||||
void rt_del_uncached_list(struct rtable *rt);
|
||||
|
||||
static inline void ip_rt_put(struct rtable *rt)
|
||||
{
|
||||
/* dst_release() accepts a NULL parameter.
|
||||
|
|
|
@ -1393,7 +1393,7 @@ struct uncached_list {
|
|||
|
||||
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
|
||||
|
||||
static void rt_add_uncached_list(struct rtable *rt)
|
||||
void rt_add_uncached_list(struct rtable *rt)
|
||||
{
|
||||
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
|
||||
|
||||
|
@ -1404,14 +1404,8 @@ static void rt_add_uncached_list(struct rtable *rt)
|
|||
spin_unlock_bh(&ul->lock);
|
||||
}
|
||||
|
||||
static void ipv4_dst_destroy(struct dst_entry *dst)
|
||||
void rt_del_uncached_list(struct rtable *rt)
|
||||
{
|
||||
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
|
||||
kfree(p);
|
||||
|
||||
if (!list_empty(&rt->rt_uncached)) {
|
||||
struct uncached_list *ul = rt->rt_uncached_list;
|
||||
|
||||
|
@ -1421,6 +1415,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
|
|||
}
|
||||
}
|
||||
|
||||
static void ipv4_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
|
||||
if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
|
||||
kfree(p);
|
||||
|
||||
rt_del_uncached_list(rt);
|
||||
}
|
||||
|
||||
void rt_flush_dev(struct net_device *dev)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
|
|
|
@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
skb_reset_network_header(skb);
|
||||
skb_mac_header_rebuild(skb);
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
if (skb->mac_len)
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
|
||||
err = 0;
|
||||
|
||||
|
|
|
@ -102,6 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
|||
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
|
||||
xdst->u.rt.rt_table_id = rt->rt_table_id;
|
||||
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
|
||||
rt_add_uncached_list(&xdst->u.rt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -241,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
|
|||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
|
||||
dst_destroy_metrics_generic(dst);
|
||||
|
||||
if (xdst->u.rt.rt_uncached_list)
|
||||
rt_del_uncached_list(&xdst->u.rt);
|
||||
xfrm_dst_destroy(xdst);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ struct uncached_list {
|
|||
|
||||
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
|
||||
|
||||
static void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
void rt6_uncached_list_add(struct rt6_info *rt)
|
||||
{
|
||||
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
|
||||
|
||||
|
@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
|
|||
spin_unlock_bh(&ul->lock);
|
||||
}
|
||||
|
||||
static void rt6_uncached_list_del(struct rt6_info *rt)
|
||||
void rt6_uncached_list_del(struct rt6_info *rt)
|
||||
{
|
||||
if (!list_empty(&rt->rt6i_uncached)) {
|
||||
struct uncached_list *ul = rt->rt6i_uncached_list;
|
||||
|
|
|
@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
skb_reset_network_header(skb);
|
||||
skb_mac_header_rebuild(skb);
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
if (skb->mac_len)
|
||||
eth_hdr(skb)->h_proto = skb->protocol;
|
||||
|
||||
err = 0;
|
||||
|
||||
|
|
|
@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
|||
xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
|
||||
xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
|
||||
xdst->u.rt6.rt6i_src = rt->rt6i_src;
|
||||
INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
|
||||
rt6_uncached_list_add(&xdst->u.rt6);
|
||||
atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
|
|||
if (likely(xdst->u.rt6.rt6i_idev))
|
||||
in6_dev_put(xdst->u.rt6.rt6i_idev);
|
||||
dst_destroy_metrics_generic(dst);
|
||||
if (xdst->u.rt6.rt6i_uncached_list)
|
||||
rt6_uncached_list_del(&xdst->u.rt6);
|
||||
xfrm_dst_destroy(xdst);
|
||||
}
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
|
|||
struct crypto_comp *tfm;
|
||||
|
||||
/* This can be any valid CPU ID so we don't need locking. */
|
||||
tfm = __this_cpu_read(*pos->tfms);
|
||||
tfm = this_cpu_read(*pos->tfms);
|
||||
|
||||
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
|
||||
pos->users++;
|
||||
|
|
|
@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
|
|||
static int xfrm_get_tos(const struct flowi *fl, int family)
|
||||
{
|
||||
const struct xfrm_policy_afinfo *afinfo;
|
||||
int tos = 0;
|
||||
int tos;
|
||||
|
||||
afinfo = xfrm_policy_get_afinfo(family);
|
||||
tos = afinfo ? afinfo->get_tos(fl) : 0;
|
||||
if (!afinfo)
|
||||
return 0;
|
||||
|
||||
tos = afinfo->get_tos(fl);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
|
|||
spin_unlock(&pq->hold_queue.lock);
|
||||
|
||||
dst_hold(xfrm_dst_path(dst));
|
||||
dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
|
||||
dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
|
||||
if (IS_ERR(dst))
|
||||
goto purge_queue;
|
||||
|
||||
|
@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
|
|||
while (dst->xfrm) {
|
||||
const struct xfrm_state *xfrm = dst->xfrm;
|
||||
|
||||
dst = xfrm_dst_child(dst);
|
||||
|
||||
if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
|
||||
continue;
|
||||
if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
|
||||
daddr = xfrm->coaddr;
|
||||
else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
|
||||
daddr = &xfrm->id.daddr;
|
||||
|
||||
dst = xfrm_dst_child(dst);
|
||||
}
|
||||
return daddr;
|
||||
}
|
||||
|
|
|
@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
|
|||
} else {
|
||||
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
|
||||
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
|
||||
xo->seq.low = oseq = oseq + 1;
|
||||
xo->seq.low = oseq + 1;
|
||||
xo->seq.hi = oseq_hi;
|
||||
oseq += skb_shinfo(skb)->gso_segs;
|
||||
}
|
||||
|
|
|
@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
|
|||
struct xfrm_mgr *km;
|
||||
struct xfrm_policy *pol = NULL;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall())
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
|
||||
if (!optval && !optlen) {
|
||||
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
|
||||
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
|
||||
|
|
|
@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
|
|||
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
|
||||
struct xfrm_replay_state_esn *rs;
|
||||
|
||||
if (p->flags & XFRM_STATE_ESN) {
|
||||
if (!rt)
|
||||
return -EINVAL;
|
||||
|
||||
rs = nla_data(rt);
|
||||
|
||||
if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
|
||||
nla_len(rt) != sizeof(*rs))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!rt)
|
||||
return 0;
|
||||
return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
|
||||
|
||||
rs = nla_data(rt);
|
||||
|
||||
if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
|
||||
nla_len(rt) != sizeof(*rs))
|
||||
return -EINVAL;
|
||||
|
||||
/* As only ESP and AH support ESN feature. */
|
||||
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
|
||||
|
|
Loading…
Reference in New Issue
Block a user