2019-05-23 17:14:40 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2008-01-11 22:57:09 +08:00
|
|
|
/* SCTP kernel implementation
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (c) 1999-2000 Cisco, Inc.
|
|
|
|
* Copyright (c) 1999-2001 Motorola, Inc.
|
|
|
|
* Copyright (c) 2001-2002 International Business Machines, Corp.
|
|
|
|
* Copyright (c) 2001 Intel Corp.
|
|
|
|
* Copyright (c) 2001 Nokia, Inc.
|
|
|
|
* Copyright (c) 2001 La Monte H.P. Yarroll
|
|
|
|
*
|
2008-01-11 22:57:09 +08:00
|
|
|
* This file is part of the SCTP kernel implementation
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This abstraction represents an SCTP endpoint.
|
|
|
|
*
|
|
|
|
* Please send any bug reports or fixes you make to the
|
|
|
|
* email address(es):
|
2013-07-23 20:51:47 +08:00
|
|
|
* lksctp developers <linux-sctp@vger.kernel.org>
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Written or modified by:
|
|
|
|
* La Monte H.P. Yarroll <piggy@acm.org>
|
|
|
|
* Karl Knutson <karl@athena.chicago.il.us>
|
|
|
|
* Jon Grimm <jgrimm@austin.ibm.com>
|
|
|
|
* Daisy Chang <daisyc@us.ibm.com>
|
|
|
|
* Dajiang Zhang <dajiang.zhang@nokia.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/random.h> /* get_random_bytes() */
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/sctp/sctp.h>
|
|
|
|
#include <net/sctp/sm.h>
|
|
|
|
|
|
|
|
/* Forward declarations for internal helpers. */
|
2006-11-22 22:57:56 +08:00
|
|
|
static void sctp_endpoint_bh_rcv(struct work_struct *work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the base fields of the endpoint structure.
|
|
|
|
*/
|
|
|
|
static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
2005-07-12 11:57:47 +08:00
|
|
|
struct sock *sk,
|
2005-10-07 14:46:04 +08:00
|
|
|
gfp_t gfp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-08-07 15:29:57 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2007-09-17 10:31:35 +08:00
|
|
|
struct sctp_shared_key *null_key;
|
|
|
|
|
2006-11-10 08:29:57 +08:00
|
|
|
ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
|
|
|
|
if (!ep->digest)
|
|
|
|
return NULL;
|
|
|
|
|
2019-08-19 22:02:43 +08:00
|
|
|
ep->asconf_enable = net->sctp.addip_enable;
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
ep->auth_enable = net->sctp.auth_enable;
|
|
|
|
if (ep->auth_enable) {
|
2019-08-19 22:02:48 +08:00
|
|
|
if (sctp_auth_init(ep, gfp))
|
2007-09-17 10:31:35 +08:00
|
|
|
goto nomem;
|
2019-08-19 22:02:43 +08:00
|
|
|
if (ep->asconf_enable) {
|
2019-08-19 22:02:48 +08:00
|
|
|
sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
|
|
|
|
sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
|
2007-09-17 10:31:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Initialize the base structure. */
|
|
|
|
/* What type of endpoint are we? */
|
|
|
|
ep->base.type = SCTP_EP_TYPE_SOCKET;
|
|
|
|
|
|
|
|
/* Initialize the basic object fields. */
|
2017-07-04 20:53:28 +08:00
|
|
|
refcount_set(&ep->base.refcnt, 1);
|
2013-04-15 11:27:18 +08:00
|
|
|
ep->base.dead = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Create an input queue. */
|
|
|
|
sctp_inq_init(&ep->base.inqueue);
|
|
|
|
|
|
|
|
/* Set its top-half handler */
|
2006-11-22 22:57:56 +08:00
|
|
|
sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize the bind addr area */
|
|
|
|
sctp_bind_addr_init(&ep->base.bind_addr, 0);
|
|
|
|
|
|
|
|
/* Create the lists of associations. */
|
|
|
|
INIT_LIST_HEAD(&ep->asocs);
|
|
|
|
|
|
|
|
/* Use SCTP specific send buffer space queues. */
|
2012-08-07 15:29:57 +08:00
|
|
|
ep->sndbuf_policy = net->sctp.sndbuf_policy;
|
2007-08-16 07:07:44 +08:00
|
|
|
|
2010-04-28 16:47:18 +08:00
|
|
|
sk->sk_data_ready = sctp_data_ready;
|
2005-04-17 06:20:36 +08:00
|
|
|
sk->sk_write_space = sctp_write_space;
|
|
|
|
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
|
|
|
|
|
2005-11-12 08:08:24 +08:00
|
|
|
/* Get the receive buffer policy for this endpoint */
|
2012-08-07 15:29:57 +08:00
|
|
|
ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
|
2005-11-12 08:08:24 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Initialize the secret key used with cookie. */
|
2013-02-12 13:15:33 +08:00
|
|
|
get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-17 10:31:35 +08:00
|
|
|
/* SCTP-AUTH extensions*/
|
|
|
|
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
|
2013-03-01 03:27:43 +08:00
|
|
|
null_key = sctp_auth_shkey_create(0, gfp);
|
2007-09-17 10:31:35 +08:00
|
|
|
if (!null_key)
|
2019-03-03 17:54:54 +08:00
|
|
|
goto nomem_shkey;
|
2007-09-17 10:31:35 +08:00
|
|
|
|
|
|
|
list_add(&null_key->key_list, &ep->endpoint_shared_keys);
|
|
|
|
|
|
|
|
/* Add the null key to the endpoint shared keys list and
|
|
|
|
* set the hmcas and chunks pointers.
|
|
|
|
*/
|
2016-07-09 19:47:40 +08:00
|
|
|
ep->prsctp_enable = net->sctp.prsctp_enable;
|
2017-01-18 00:44:45 +08:00
|
|
|
ep->reconf_enable = net->sctp.reconf_enable;
|
2019-08-26 16:30:02 +08:00
|
|
|
ep->ecn_enable = net->sctp.ecn_enable;
|
2007-09-17 10:31:35 +08:00
|
|
|
|
2019-06-25 00:21:45 +08:00
|
|
|
/* Remember who we are attached to. */
|
|
|
|
ep->base.sk = sk;
|
2019-11-23 11:56:49 +08:00
|
|
|
ep->base.net = sock_net(sk);
|
2019-06-25 00:21:45 +08:00
|
|
|
sock_hold(ep->base.sk);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ep;
|
2007-09-17 10:31:35 +08:00
|
|
|
|
2019-03-03 17:54:54 +08:00
|
|
|
nomem_shkey:
|
2019-08-19 22:02:48 +08:00
|
|
|
sctp_auth_free(ep);
|
2007-09-17 10:31:35 +08:00
|
|
|
nomem:
|
|
|
|
kfree(ep->digest);
|
|
|
|
return NULL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a sctp_endpoint with all that boring stuff initialized.
|
|
|
|
* Returns NULL if there isn't enough memory.
|
|
|
|
*/
|
2005-10-07 14:46:04 +08:00
|
|
|
struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
|
|
|
|
/* Build a local endpoint. */
|
2013-06-17 17:40:04 +08:00
|
|
|
ep = kzalloc(sizeof(*ep), gfp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!ep)
|
|
|
|
goto fail;
|
2013-06-17 17:40:04 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sctp_endpoint_init(ep, sk, gfp))
|
|
|
|
goto fail_init;
|
2013-04-15 11:27:17 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
SCTP_DBG_OBJCNT_INC(ep);
|
|
|
|
return ep;
|
|
|
|
|
|
|
|
fail_init:
|
|
|
|
kfree(ep);
|
|
|
|
fail:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add an association to an endpoint. */
|
|
|
|
void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
struct sock *sk = ep->base.sk;
|
|
|
|
|
2006-10-31 10:55:11 +08:00
|
|
|
/* If this is a temporary association, don't bother
|
|
|
|
* since we'll be removing it shortly and don't
|
|
|
|
* want anyone to find it anyway.
|
|
|
|
*/
|
|
|
|
if (asoc->temp)
|
|
|
|
return;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Now just add it to our list of asocs */
|
|
|
|
list_add_tail(&asoc->asocs, &ep->asocs);
|
|
|
|
|
|
|
|
/* Increment the backlog value for a TCP-style listening socket. */
|
|
|
|
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
|
2019-11-06 06:11:52 +08:00
|
|
|
sk_acceptq_added(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free the endpoint structure. Delay cleanup until
|
|
|
|
* all users have released their reference count on this structure.
|
|
|
|
*/
|
|
|
|
void sctp_endpoint_free(struct sctp_endpoint *ep)
|
|
|
|
{
|
2013-04-15 11:27:18 +08:00
|
|
|
ep->base.dead = true;
|
2006-07-22 05:48:26 +08:00
|
|
|
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
|
2006-07-22 05:48:26 +08:00
|
|
|
|
|
|
|
/* Unlink this endpoint, so we can't find it again! */
|
|
|
|
sctp_unhash_endpoint(ep);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_endpoint_put(ep);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Final destructor for endpoint. */
|
sctp: use call_rcu to free endpoint
[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2021-12-24 02:04:30 +08:00
|
|
|
static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
|
|
|
|
struct sock *sk = ep->base.sk;
|
|
|
|
|
|
|
|
sctp_sk(sk)->ep = NULL;
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
kfree(ep);
|
|
|
|
SCTP_DBG_OBJCNT_DEC(ep);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
|
|
|
{
|
2013-06-26 00:17:29 +08:00
|
|
|
struct sock *sk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
if (unlikely(!ep->base.dead)) {
|
|
|
|
WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
|
|
|
|
return;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-10 08:29:57 +08:00
|
|
|
/* Free the digest buffer */
|
|
|
|
kfree(ep->digest);
|
|
|
|
|
2007-09-17 10:31:35 +08:00
|
|
|
/* SCTP-AUTH: Free up AUTH releated data such as shared keys
|
|
|
|
* chunks and hmacs arrays that were allocated
|
|
|
|
*/
|
|
|
|
sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
|
2019-08-19 22:02:48 +08:00
|
|
|
sctp_auth_free(ep);
|
2007-09-17 10:31:35 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Cleanup. */
|
|
|
|
sctp_inq_free(&ep->base.inqueue);
|
|
|
|
sctp_bind_addr_free(&ep->base.bind_addr);
|
|
|
|
|
2013-02-12 13:15:33 +08:00
|
|
|
memset(ep->secret_key, 0, sizeof(ep->secret_key));
|
2013-02-08 11:04:35 +08:00
|
|
|
|
2013-06-26 00:17:29 +08:00
|
|
|
sk = ep->base.sk;
|
2017-06-29 17:28:06 +08:00
|
|
|
/* Remove and free the port */
|
|
|
|
if (sctp_sk(sk)->bind_hash)
|
|
|
|
sctp_put_port(sk);
|
2013-06-26 00:17:29 +08:00
|
|
|
|
sctp: use call_rcu to free endpoint
[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2021-12-24 02:04:30 +08:00
|
|
|
call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold a reference to an endpoint. */
|
sctp: use call_rcu to free endpoint
[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2021-12-24 02:04:30 +08:00
|
|
|
int sctp_endpoint_hold(struct sctp_endpoint *ep)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
sctp: use call_rcu to free endpoint
[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2021-12-24 02:04:30 +08:00
|
|
|
return refcount_inc_not_zero(&ep->base.refcnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release a reference to an endpoint and clean up if there are
|
|
|
|
* no more references.
|
|
|
|
*/
|
|
|
|
void sctp_endpoint_put(struct sctp_endpoint *ep)
|
|
|
|
{
|
2017-07-04 20:53:28 +08:00
|
|
|
if (refcount_dec_and_test(&ep->base.refcnt))
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_endpoint_destroy(ep);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is this the endpoint we are looking for? */
|
|
|
|
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
|
2012-08-06 16:40:21 +08:00
|
|
|
struct net *net,
|
2005-04-17 06:20:36 +08:00
|
|
|
const union sctp_addr *laddr)
|
|
|
|
{
|
2007-09-17 07:03:28 +08:00
|
|
|
struct sctp_endpoint *retval = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-08-06 16:40:21 +08:00
|
|
|
if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
|
2019-12-09 13:45:18 +08:00
|
|
|
net_eq(ep->base.net, net)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
|
2007-09-17 07:03:28 +08:00
|
|
|
sctp_sk(ep->base.sk)))
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = ep;
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the association that goes with this chunk.
|
2015-12-30 23:50:47 +08:00
|
|
|
* We lookup the transport from hashtable at first, then get association
|
|
|
|
* through t->assoc.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-12-30 23:50:50 +08:00
|
|
|
struct sctp_association *sctp_endpoint_lookup_assoc(
|
2005-04-17 06:20:36 +08:00
|
|
|
const struct sctp_endpoint *ep,
|
|
|
|
const union sctp_addr *paddr,
|
|
|
|
struct sctp_transport **transport)
|
|
|
|
{
|
2007-11-10 00:41:36 +08:00
|
|
|
struct sctp_association *asoc = NULL;
|
2015-12-30 23:50:47 +08:00
|
|
|
struct sctp_transport *t;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-10 00:41:36 +08:00
|
|
|
*transport = NULL;
|
2011-04-20 05:29:23 +08:00
|
|
|
|
|
|
|
/* If the local port is not set, there can't be any associations
|
|
|
|
* on this endpoint.
|
|
|
|
*/
|
|
|
|
if (!ep->base.bind_addr.port)
|
2016-12-15 23:00:55 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2015-12-30 23:50:47 +08:00
|
|
|
t = sctp_epaddr_lookup_transport(ep, paddr);
|
2016-01-16 20:17:17 +08:00
|
|
|
if (!t)
|
2015-12-30 23:50:47 +08:00
|
|
|
goto out;
|
2011-04-20 05:29:23 +08:00
|
|
|
|
2015-12-30 23:50:47 +08:00
|
|
|
*transport = t;
|
|
|
|
asoc = t->asoc;
|
2011-04-20 05:29:23 +08:00
|
|
|
out:
|
2016-12-15 23:00:55 +08:00
|
|
|
rcu_read_unlock();
|
2007-11-10 00:41:36 +08:00
|
|
|
return asoc;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Look for any peeled off association from the endpoint that matches the
|
|
|
|
* given peer address.
|
|
|
|
*/
|
2018-03-26 16:55:00 +08:00
|
|
|
bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
|
|
|
|
const union sctp_addr *paddr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sockaddr_entry *addr;
|
2019-12-09 13:45:18 +08:00
|
|
|
struct net *net = ep->base.net;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_bind_addr *bp;
|
|
|
|
|
|
|
|
bp = &ep->base.bind_addr;
|
2007-09-17 07:03:28 +08:00
|
|
|
/* This function is called with the socket lock held,
|
|
|
|
* so the address_list can not change.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(addr, &bp->address_list, list) {
|
2012-08-06 16:41:13 +08:00
|
|
|
if (sctp_has_association(net, &addr->a, paddr))
|
2018-03-26 16:55:00 +08:00
|
|
|
return true;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-03-26 16:55:00 +08:00
|
|
|
return false;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do delayed input processing. This is scheduled by sctp_rcv().
|
|
|
|
* This may be called on BH or task time.
|
|
|
|
*/
|
2006-11-22 22:57:56 +08:00
|
|
|
static void sctp_endpoint_bh_rcv(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-22 22:57:56 +08:00
|
|
|
struct sctp_endpoint *ep =
|
|
|
|
container_of(work, struct sctp_endpoint,
|
|
|
|
base.inqueue.immediate);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sock *sk;
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_transport *transport;
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
struct sctp_inq *inqueue;
|
2017-08-05 20:00:04 +08:00
|
|
|
union sctp_subtype subtype;
|
2017-08-05 19:59:59 +08:00
|
|
|
enum sctp_state state;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = 0;
|
2011-11-29 12:31:00 +08:00
|
|
|
int first_time = 1; /* is this the first time through the loop */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (ep->base.dead)
|
|
|
|
return;
|
|
|
|
|
|
|
|
asoc = NULL;
|
|
|
|
inqueue = &ep->base.inqueue;
|
|
|
|
sk = ep->base.sk;
|
2012-08-07 15:25:24 +08:00
|
|
|
net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
|
|
|
|
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
|
|
|
|
|
2007-10-04 08:51:34 +08:00
|
|
|
/* If the first chunk in the packet is AUTH, do special
|
|
|
|
* processing specified in Section 6.3 of SCTP-AUTH spec
|
|
|
|
*/
|
|
|
|
if (first_time && (subtype.chunk == SCTP_CID_AUTH)) {
|
|
|
|
struct sctp_chunkhdr *next_hdr;
|
|
|
|
|
|
|
|
next_hdr = sctp_inq_peek(inqueue);
|
|
|
|
if (!next_hdr)
|
|
|
|
goto normal;
|
|
|
|
|
|
|
|
/* If the next chunk is COOKIE-ECHO, skip the AUTH
|
|
|
|
* chunk while saving a pointer to it so we can do
|
|
|
|
* Authentication later (during cookie-echo
|
|
|
|
* processing).
|
|
|
|
*/
|
|
|
|
if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
|
|
|
|
chunk->auth_chunk = skb_clone(chunk->skb,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
chunk->auth = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
normal:
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We might have grown an association since last we
|
|
|
|
* looked, so try again.
|
|
|
|
*
|
|
|
|
* This happens when we've just processed our
|
|
|
|
* COOKIE-ECHO chunk.
|
|
|
|
*/
|
|
|
|
if (NULL == chunk->asoc) {
|
|
|
|
asoc = sctp_endpoint_lookup_assoc(ep,
|
|
|
|
sctp_source(chunk),
|
|
|
|
&transport);
|
|
|
|
chunk->asoc = asoc;
|
|
|
|
chunk->transport = transport;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = asoc ? asoc->state : SCTP_STATE_CLOSED;
|
2007-10-04 08:51:34 +08:00
|
|
|
if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
|
|
|
|
continue;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Remember where the last DATA chunk came from so we
|
|
|
|
* know where to send the SACK.
|
|
|
|
*/
|
|
|
|
if (asoc && sctp_chunk_is_data(chunk))
|
|
|
|
asoc->peer.last_data_from = chunk->transport;
|
2012-12-01 12:49:42 +08:00
|
|
|
else {
|
2019-12-09 13:45:18 +08:00
|
|
|
SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS);
|
2012-12-01 12:49:42 +08:00
|
|
|
if (asoc)
|
|
|
|
asoc->stats.ictrlchunks++;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (chunk->transport)
|
2014-06-12 00:19:30 +08:00
|
|
|
chunk->transport->last_time_heard = ktime_get();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-08-07 15:25:24 +08:00
|
|
|
error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
|
2007-02-09 22:25:18 +08:00
|
|
|
ep, asoc, chunk, GFP_ATOMIC);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (error && chunk)
|
|
|
|
chunk->pdiscard = 1;
|
|
|
|
|
|
|
|
/* Check to see if the endpoint is freed in response to
|
|
|
|
* the incoming chunk. If so, get out of the while loop.
|
|
|
|
*/
|
|
|
|
if (!sctp_sk(sk)->ep)
|
|
|
|
break;
|
2007-10-04 08:51:34 +08:00
|
|
|
|
|
|
|
if (first_time)
|
|
|
|
first_time = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|