forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix several bpfilter/UMH bugs, in particular make the UMH build not depend upon X86 specific Kconfig symbols. From Alexei Starovoitov. 2) Fix handling of modified context pointer in bpf verifier, from Daniel Borkmann. 3) Kill regression in ifdown/ifup sequences for hv_netvsc driver, from Dexuan Cui. 4) When the bonding primary member name changes, we have to re-evaluate the bond->force_primary setting, from Xiangning Yu. 5) Eliminate possible padding beyone end of SKB in cdc_ncm driver, from Bjørn Mork. 6) RX queue length reported for UDP sockets in procfs and socket diag are inaccurate, from Paolo Abeni. 7) Fix br_fdb_find_port() locking, from Petr Machata. 8) Limit sk_rcvlowat values properly in TCP, from Soheil Hassas Yeganeh. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (23 commits) tcp: limit sk_rcvlowat by the maximum receive buffer net: phy: dp83822: use BMCR_ANENABLE instead of BMSR_ANEGCAPABLE for DP83620 socket: close race condition between sock_close() and sockfs_setattr() net: bridge: Fix locking in br_fdb_find_port() udp: fix rx queue len reported by diag and proc interface cdc_ncm: avoid padding beyond end of skb net/sched: act_simple: fix parsing of TCA_DEF_DATA net: fddi: fix a possible null-ptr-deref net: aquantia: fix unsigned numvecs comparison with less than zero net: stmmac: fix build failure due to missing COMMON_CLK dependency bpfilter: fix race in pipe access bpf, xdp: fix crash in xdp_umem_unaccount_pages xsk: Fix umem fill/completion queue mmap on 32-bit tools/bpf: fix selftest get_cgroup_id_user bpfilter: fix OUTPUT_FORMAT umh: fix race condition net: mscc: ocelot: Fix uninitialized error in ocelot_netdevice_event() bonding: re-evaluate force_primary when the primary slave name changes ip_tunnel: Fix name string concatenate in __ip_tunnel_create() hv_netvsc: Fix a network regression after ifdown/ifup ...
This commit is contained in:
commit
f0dc7f9c6d
|
@ -120,7 +120,8 @@ static int raw_form_header(uint8_t *header,
|
|||
skb,
|
||||
vheader,
|
||||
virtio_legacy_is_little_endian(),
|
||||
false
|
||||
false,
|
||||
0
|
||||
);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
|
|||
slave->dev->name);
|
||||
rcu_assign_pointer(bond->primary_slave, slave);
|
||||
strcpy(bond->params.primary, slave->dev->name);
|
||||
bond->force_primary = true;
|
||||
bond_select_active_slave(bond);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -267,14 +267,13 @@ static int aq_pci_probe(struct pci_dev *pdev,
|
|||
numvecs = min(numvecs, num_online_cpus());
|
||||
/*enable interrupts */
|
||||
#if !AQ_CFG_FORCE_LEGACY_INT
|
||||
numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI |
|
||||
PCI_IRQ_LEGACY);
|
||||
err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI |
|
||||
PCI_IRQ_LEGACY);
|
||||
|
||||
if (numvecs < 0) {
|
||||
err = numvecs;
|
||||
if (err < 0)
|
||||
goto err_hwinit;
|
||||
}
|
||||
numvecs = err;
|
||||
#endif
|
||||
self->irqvecs = numvecs;
|
||||
|
||||
|
|
|
@ -1126,7 +1126,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
|
|||
{
|
||||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (netif_is_lag_master(dev)) {
|
||||
struct net_device *slave;
|
||||
|
|
|
@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
|
|||
select PHYLIB
|
||||
select CRC32
|
||||
select MII
|
||||
depends on OF && HAS_DMA
|
||||
depends on OF && COMMON_CLK && HAS_DMA
|
||||
help
|
||||
Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
|
||||
|
||||
|
@ -57,7 +57,7 @@ config DWMAC_ANARION
|
|||
config DWMAC_IPQ806X
|
||||
tristate "QCA IPQ806x DWMAC support"
|
||||
default ARCH_QCOM
|
||||
depends on OF && (ARCH_QCOM || COMPILE_TEST)
|
||||
depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
|
||||
select MFD_SYSCON
|
||||
help
|
||||
Support for QCA IPQ806X DWMAC Ethernet.
|
||||
|
@ -100,7 +100,7 @@ config DWMAC_OXNAS
|
|||
config DWMAC_ROCKCHIP
|
||||
tristate "Rockchip dwmac support"
|
||||
default ARCH_ROCKCHIP
|
||||
depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
|
||||
depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST)
|
||||
select MFD_SYSCON
|
||||
help
|
||||
Support for Ethernet controller on Rockchip RK3288 SoC.
|
||||
|
@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
|
|||
config DWMAC_STI
|
||||
tristate "STi GMAC support"
|
||||
default ARCH_STI
|
||||
depends on OF && (ARCH_STI || COMPILE_TEST)
|
||||
depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST)
|
||||
select MFD_SYSCON
|
||||
---help---
|
||||
Support for ethernet controller on STi SOCs.
|
||||
|
@ -147,7 +147,7 @@ config DWMAC_STM32
|
|||
config DWMAC_SUNXI
|
||||
tristate "Allwinner GMAC support"
|
||||
default ARCH_SUNXI
|
||||
depends on OF && (ARCH_SUNXI || COMPILE_TEST)
|
||||
depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST)
|
||||
---help---
|
||||
Support for Allwinner A20/A31 GMAC ethernet controllers.
|
||||
|
||||
|
|
|
@ -297,11 +297,11 @@ static int skfp_init_one(struct pci_dev *pdev,
|
|||
return 0;
|
||||
err_out5:
|
||||
if (smc->os.SharedMemAddr)
|
||||
pci_free_consistent(pdev, smc->os.SharedMemSize,
|
||||
smc->os.SharedMemAddr,
|
||||
smc->os.SharedMemDMA);
|
||||
pci_free_consistent(pdev, MAX_FRAME_SIZE,
|
||||
smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
|
||||
dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
|
||||
smc->os.SharedMemAddr,
|
||||
smc->os.SharedMemDMA);
|
||||
dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
|
||||
smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
|
||||
err_out4:
|
||||
free_netdev(dev);
|
||||
err_out3:
|
||||
|
@ -328,17 +328,17 @@ static void skfp_remove_one(struct pci_dev *pdev)
|
|||
unregister_netdev(p);
|
||||
|
||||
if (lp->os.SharedMemAddr) {
|
||||
pci_free_consistent(&lp->os.pdev,
|
||||
lp->os.SharedMemSize,
|
||||
lp->os.SharedMemAddr,
|
||||
lp->os.SharedMemDMA);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
lp->os.SharedMemSize,
|
||||
lp->os.SharedMemAddr,
|
||||
lp->os.SharedMemDMA);
|
||||
lp->os.SharedMemAddr = NULL;
|
||||
}
|
||||
if (lp->os.LocalRxBuffer) {
|
||||
pci_free_consistent(&lp->os.pdev,
|
||||
MAX_FRAME_SIZE,
|
||||
lp->os.LocalRxBuffer,
|
||||
lp->os.LocalRxBufferDMA);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
MAX_FRAME_SIZE,
|
||||
lp->os.LocalRxBuffer,
|
||||
lp->os.LocalRxBufferDMA);
|
||||
lp->os.LocalRxBuffer = NULL;
|
||||
}
|
||||
#ifdef MEM_MAPPED_IO
|
||||
|
@ -394,7 +394,9 @@ static int skfp_driver_init(struct net_device *dev)
|
|||
spin_lock_init(&bp->DriverLock);
|
||||
|
||||
// Allocate invalid frame
|
||||
bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
|
||||
bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
|
||||
&bp->LocalRxBufferDMA,
|
||||
GFP_ATOMIC);
|
||||
if (!bp->LocalRxBuffer) {
|
||||
printk("could not allocate mem for ");
|
||||
printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
|
||||
|
@ -407,23 +409,22 @@ static int skfp_driver_init(struct net_device *dev)
|
|||
if (bp->SharedMemSize > 0) {
|
||||
bp->SharedMemSize += 16; // for descriptor alignment
|
||||
|
||||
bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
|
||||
bp->SharedMemSize,
|
||||
&bp->SharedMemDMA);
|
||||
bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev,
|
||||
bp->SharedMemSize,
|
||||
&bp->SharedMemDMA,
|
||||
GFP_ATOMIC);
|
||||
if (!bp->SharedMemAddr) {
|
||||
printk("could not allocate mem for ");
|
||||
printk("hardware module: %ld byte\n",
|
||||
bp->SharedMemSize);
|
||||
goto fail;
|
||||
}
|
||||
bp->SharedMemHeap = 0; // Nothing used yet.
|
||||
|
||||
} else {
|
||||
bp->SharedMemAddr = NULL;
|
||||
bp->SharedMemHeap = 0;
|
||||
} // SharedMemSize > 0
|
||||
}
|
||||
|
||||
memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
|
||||
bp->SharedMemHeap = 0;
|
||||
|
||||
card_stop(smc); // Reset adapter.
|
||||
|
||||
|
@ -442,15 +443,15 @@ static int skfp_driver_init(struct net_device *dev)
|
|||
|
||||
fail:
|
||||
if (bp->SharedMemAddr) {
|
||||
pci_free_consistent(&bp->pdev,
|
||||
bp->SharedMemSize,
|
||||
bp->SharedMemAddr,
|
||||
bp->SharedMemDMA);
|
||||
dma_free_coherent(&bp->pdev.dev,
|
||||
bp->SharedMemSize,
|
||||
bp->SharedMemAddr,
|
||||
bp->SharedMemDMA);
|
||||
bp->SharedMemAddr = NULL;
|
||||
}
|
||||
if (bp->LocalRxBuffer) {
|
||||
pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
|
||||
bp->LocalRxBuffer, bp->LocalRxBufferDMA);
|
||||
dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
|
||||
bp->LocalRxBuffer, bp->LocalRxBufferDMA);
|
||||
bp->LocalRxBuffer = NULL;
|
||||
}
|
||||
return err;
|
||||
|
|
|
@ -125,8 +125,10 @@ static int netvsc_open(struct net_device *net)
|
|||
}
|
||||
|
||||
rdev = nvdev->extension;
|
||||
if (!rdev->link_state)
|
||||
if (!rdev->link_state) {
|
||||
netif_carrier_on(net);
|
||||
netif_tx_wake_all_queues(net);
|
||||
}
|
||||
|
||||
if (vf_netdev) {
|
||||
/* Setting synthetic device up transparently sets
|
||||
|
|
|
@ -74,6 +74,25 @@ static int dp83848_config_intr(struct phy_device *phydev)
|
|||
return phy_write(phydev, DP83848_MICR, control);
|
||||
}
|
||||
|
||||
static int dp83848_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
int val;
|
||||
|
||||
err = genphy_config_init(phydev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
|
||||
* we check initial value of BMCR Auto negotiation enable bit
|
||||
*/
|
||||
val = phy_read(phydev, MII_BMCR);
|
||||
if (!(val & BMCR_ANENABLE))
|
||||
phydev->autoneg = AUTONEG_DISABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
|
||||
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
|
||||
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
|
||||
|
@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
|
||||
|
||||
#define DP83848_PHY_DRIVER(_id, _name) \
|
||||
#define DP83848_PHY_DRIVER(_id, _name, _config_init) \
|
||||
{ \
|
||||
.phy_id = _id, \
|
||||
.phy_id_mask = 0xfffffff0, \
|
||||
|
@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
|
|||
.flags = PHY_HAS_INTERRUPT, \
|
||||
\
|
||||
.soft_reset = genphy_soft_reset, \
|
||||
.config_init = genphy_config_init, \
|
||||
.config_init = _config_init, \
|
||||
.suspend = genphy_suspend, \
|
||||
.resume = genphy_resume, \
|
||||
\
|
||||
|
@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
|
|||
}
|
||||
|
||||
static struct phy_driver dp83848_driver[] = {
|
||||
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
|
||||
genphy_config_init),
|
||||
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
|
||||
genphy_config_init),
|
||||
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
|
||||
dp83848_config_init),
|
||||
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
|
||||
genphy_config_init),
|
||||
};
|
||||
module_phy_driver(dp83848_driver);
|
||||
|
||||
|
|
|
@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
|
|||
int total;
|
||||
|
||||
if (q->flags & IFF_VNET_HDR) {
|
||||
int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
|
||||
struct virtio_net_hdr vnet_hdr;
|
||||
|
||||
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
|
||||
if (iov_iter_count(iter) < vnet_hdr_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
|
||||
tap_is_little_endian(q), true))
|
||||
tap_is_little_endian(q), true,
|
||||
vlan_hlen))
|
||||
BUG();
|
||||
|
||||
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
|
||||
|
|
|
@ -2089,7 +2089,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
return -EINVAL;
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &gso,
|
||||
tun_is_little_endian(tun), true)) {
|
||||
tun_is_little_endian(tun), true,
|
||||
vlan_hlen)) {
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
pr_err("unexpected GSO type: "
|
||||
"0x%x, gso_size %d, hdr_len %d\n",
|
||||
|
|
|
@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||
* accordingly. Otherwise, we should check here.
|
||||
*/
|
||||
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
|
||||
delayed_ndp_size = ctx->max_ndp_size;
|
||||
delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
|
||||
else
|
||||
delayed_ndp_size = 0;
|
||||
|
||||
|
@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||
/* If requested, put NDP at end of frame. */
|
||||
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
|
||||
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
|
||||
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
|
||||
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
|
||||
nth16->wNdpIndex = cpu_to_le16(skb_out->len);
|
||||
skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
|
||||
|
||||
|
|
|
@ -1411,7 +1411,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|||
hdr = skb_vnet_hdr(skb);
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
|
||||
virtio_is_little_endian(vi->vdev), false))
|
||||
virtio_is_little_endian(vi->vdev), false,
|
||||
0))
|
||||
BUG();
|
||||
|
||||
if (vi->mergeable_rx_bufs)
|
||||
|
|
|
@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
|
||||
struct virtio_net_hdr *hdr,
|
||||
bool little_endian,
|
||||
bool has_data_valid)
|
||||
bool has_data_valid,
|
||||
int vlan_hlen)
|
||||
{
|
||||
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
|
||||
|
||||
|
@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
|
|||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
if (skb_vlan_tag_present(skb))
|
||||
hdr->csum_start = __cpu_to_virtio16(little_endian,
|
||||
skb_checksum_start_offset(skb) + VLAN_HLEN);
|
||||
else
|
||||
hdr->csum_start = __cpu_to_virtio16(little_endian,
|
||||
skb_checksum_start_offset(skb));
|
||||
hdr->csum_start = __cpu_to_virtio16(little_endian,
|
||||
skb_checksum_start_offset(skb) + vlan_hlen);
|
||||
hdr->csum_offset = __cpu_to_virtio16(little_endian,
|
||||
skb->csum_offset);
|
||||
} else if (has_data_valid &&
|
||||
|
|
|
@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
|
|||
struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
|
||||
struct sockcm_cookie *sockc);
|
||||
|
||||
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
__u16 srcp, __u16 destp, int bucket);
|
||||
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
__u16 srcp, __u16 destp, int rqueue, int bucket);
|
||||
static inline void
|
||||
ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
|
||||
__u16 destp, int bucket)
|
||||
{
|
||||
__ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
|
||||
bucket);
|
||||
}
|
||||
|
||||
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
|
||||
|
||||
|
|
|
@ -247,6 +247,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
|
|||
return htons((((u64) hash * (max - min)) >> 32) + min);
|
||||
}
|
||||
|
||||
static inline int udp_rqueue_get(struct sock *sk)
|
||||
{
|
||||
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
|
||||
}
|
||||
|
||||
/* net/ipv4/udp.c */
|
||||
void udp_destruct_sock(struct sock *sk);
|
||||
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
|
||||
|
|
|
@ -63,8 +63,8 @@ struct xdp_statistics {
|
|||
/* Pgoff for mmaping the rings */
|
||||
#define XDP_PGOFF_RX_RING 0
|
||||
#define XDP_PGOFF_TX_RING 0x80000000
|
||||
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000
|
||||
#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000
|
||||
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
|
||||
#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
|
||||
|
||||
/* Rx/Tx descriptor */
|
||||
struct xdp_desc {
|
||||
|
|
|
@ -1099,9 +1099,31 @@ enum nft_log_attributes {
|
|||
#define NFTA_LOG_MAX (__NFTA_LOG_MAX - 1)
|
||||
|
||||
/**
|
||||
* LOGLEVEL_AUDIT - a pseudo log level enabling audit logging
|
||||
* enum nft_log_level - nf_tables log levels
|
||||
*
|
||||
* @NFT_LOGLEVEL_EMERG: system is unusable
|
||||
* @NFT_LOGLEVEL_ALERT: action must be taken immediately
|
||||
* @NFT_LOGLEVEL_CRIT: critical conditions
|
||||
* @NFT_LOGLEVEL_ERR: error conditions
|
||||
* @NFT_LOGLEVEL_WARNING: warning conditions
|
||||
* @NFT_LOGLEVEL_NOTICE: normal but significant condition
|
||||
* @NFT_LOGLEVEL_INFO: informational
|
||||
* @NFT_LOGLEVEL_DEBUG: debug-level messages
|
||||
* @NFT_LOGLEVEL_AUDIT: enabling audit logging
|
||||
*/
|
||||
#define LOGLEVEL_AUDIT 8
|
||||
enum nft_log_level {
|
||||
NFT_LOGLEVEL_EMERG,
|
||||
NFT_LOGLEVEL_ALERT,
|
||||
NFT_LOGLEVEL_CRIT,
|
||||
NFT_LOGLEVEL_ERR,
|
||||
NFT_LOGLEVEL_WARNING,
|
||||
NFT_LOGLEVEL_NOTICE,
|
||||
NFT_LOGLEVEL_INFO,
|
||||
NFT_LOGLEVEL_DEBUG,
|
||||
NFT_LOGLEVEL_AUDIT,
|
||||
__NFT_LOGLEVEL_MAX
|
||||
};
|
||||
#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1)
|
||||
|
||||
/**
|
||||
* enum nft_queue_attributes - nf_tables queue expression netlink attributes
|
||||
|
|
|
@ -1617,6 +1617,30 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
|||
}
|
||||
#endif
|
||||
|
||||
static int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
{
|
||||
/* Access to ctx or passing it to a helper is only allowed in
|
||||
* its original, unmodified form.
|
||||
*/
|
||||
|
||||
if (reg->off) {
|
||||
verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
|
||||
regno, reg->off);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
char tn_buf[48];
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* truncate register to smaller size (in bytes)
|
||||
* must be called with size < BPF_REG_SIZE
|
||||
*/
|
||||
|
@ -1686,24 +1710,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
verbose(env, "R%d leaks addr into ctx\n", value_regno);
|
||||
return -EACCES;
|
||||
}
|
||||
/* ctx accesses must be at a fixed offset, so that we can
|
||||
* determine what type of data were returned.
|
||||
*/
|
||||
if (reg->off) {
|
||||
verbose(env,
|
||||
"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
|
||||
regno, reg->off, off - reg->off);
|
||||
return -EACCES;
|
||||
}
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
char tn_buf[48];
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose(env,
|
||||
"variable ctx access var_off=%s off=%d size=%d",
|
||||
tn_buf, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
/* ctx access returns either a scalar, or a
|
||||
|
@ -1984,6 +1995,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
expected_type = PTR_TO_CTX;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else if (arg_type_is_mem_ptr(arg_type)) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
/* One exception here. In case function allows for NULL to be
|
||||
|
|
|
@ -99,6 +99,7 @@ static int call_usermodehelper_exec_async(void *data)
|
|||
|
||||
commit_creds(new);
|
||||
|
||||
sub_info->pid = task_pid_nr(current);
|
||||
if (sub_info->file)
|
||||
retval = do_execve_file(sub_info->file,
|
||||
sub_info->argv, sub_info->envp);
|
||||
|
@ -191,8 +192,6 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
|
|||
if (pid < 0) {
|
||||
sub_info->retval = pid;
|
||||
umh_complete(sub_info);
|
||||
} else {
|
||||
sub_info->pid = pid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ endif
|
|||
# which bpfilter_kern.c passes further into umh blob loader at run-time
|
||||
quiet_cmd_copy_umh = GEN $@
|
||||
cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
|
||||
$(OBJCOPY) -I binary -O $(CONFIG_OUTPUT_FORMAT) \
|
||||
$(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
|
||||
-B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
|
||||
--rename-section .data=.init.rodata $< $@
|
||||
|
||||
|
|
|
@ -24,17 +24,19 @@ static void shutdown_umh(struct umh_info *info)
|
|||
{
|
||||
struct task_struct *tsk;
|
||||
|
||||
if (!info->pid)
|
||||
return;
|
||||
tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
|
||||
if (tsk)
|
||||
force_sig(SIGKILL, tsk);
|
||||
fput(info->pipe_to_umh);
|
||||
fput(info->pipe_from_umh);
|
||||
info->pid = 0;
|
||||
}
|
||||
|
||||
static void __stop_umh(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_INET) &&
|
||||
bpfilter_process_sockopt) {
|
||||
if (IS_ENABLED(CONFIG_INET)) {
|
||||
bpfilter_process_sockopt = NULL;
|
||||
shutdown_umh(&info);
|
||||
}
|
||||
|
@ -55,7 +57,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
|
|||
struct mbox_reply reply;
|
||||
loff_t pos;
|
||||
ssize_t n;
|
||||
int ret;
|
||||
int ret = -EFAULT;
|
||||
|
||||
req.is_set = is_set;
|
||||
req.pid = current->pid;
|
||||
|
@ -63,6 +65,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
|
|||
req.addr = (long)optval;
|
||||
req.len = optlen;
|
||||
mutex_lock(&bpfilter_lock);
|
||||
if (!info.pid)
|
||||
goto out;
|
||||
n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos);
|
||||
if (n != sizeof(req)) {
|
||||
pr_err("write fail %zd\n", n);
|
||||
|
|
|
@ -135,9 +135,11 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
|
|||
return NULL;
|
||||
|
||||
br = netdev_priv(br_dev);
|
||||
f = br_fdb_find(br, addr, vid);
|
||||
rcu_read_lock();
|
||||
f = br_fdb_find_rcu(br, addr, vid);
|
||||
if (f && f->dst)
|
||||
dev = f->dst->dev;
|
||||
rcu_read_unlock();
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
@ -261,8 +261,8 @@ static struct net_device *__ip_tunnel_create(struct net *net,
|
|||
} else {
|
||||
if (strlen(ops->kind) > (IFNAMSIZ - 3))
|
||||
goto failed;
|
||||
strlcpy(name, ops->kind, IFNAMSIZ);
|
||||
strncat(name, "%d", 2);
|
||||
strcpy(name, ops->kind);
|
||||
strcat(name, "%d");
|
||||
}
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
|
|
@ -1694,6 +1694,13 @@ EXPORT_SYMBOL(tcp_peek_len);
|
|||
/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
|
||||
int tcp_set_rcvlowat(struct sock *sk, int val)
|
||||
{
|
||||
int cap;
|
||||
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
|
||||
cap = sk->sk_rcvbuf >> 1;
|
||||
else
|
||||
cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
|
||||
val = min(val, cap);
|
||||
sk->sk_rcvlowat = val ? : 1;
|
||||
|
||||
/* Check if we need to signal EPOLLIN right now */
|
||||
|
@ -1702,12 +1709,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
|
|||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
|
||||
return 0;
|
||||
|
||||
/* val comes from user space and might be close to INT_MAX */
|
||||
val <<= 1;
|
||||
if (val < 0)
|
||||
val = INT_MAX;
|
||||
|
||||
val = min(val, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
|
||||
if (val > sk->sk_rcvbuf) {
|
||||
sk->sk_rcvbuf = val;
|
||||
tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
|
||||
|
|
|
@ -2772,7 +2772,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
|
|||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
|
||||
bucket, src, srcp, dest, destp, sp->sk_state,
|
||||
sk_wmem_alloc_get(sp),
|
||||
sk_rmem_alloc_get(sp),
|
||||
udp_rqueue_get(sp),
|
||||
0, 0L, 0,
|
||||
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
|
||||
0, sock_i_ino(sp),
|
||||
|
|
|
@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
|||
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
void *info)
|
||||
{
|
||||
r->idiag_rqueue = sk_rmem_alloc_get(sk);
|
||||
r->idiag_rqueue = udp_rqueue_get(sk);
|
||||
r->idiag_wqueue = sk_wmem_alloc_get(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -1019,8 +1019,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
|
||||
|
||||
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
__u16 srcp, __u16 destp, int bucket)
|
||||
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
__u16 srcp, __u16 destp, int rqueue, int bucket)
|
||||
{
|
||||
const struct in6_addr *dest, *src;
|
||||
|
||||
|
@ -1036,7 +1036,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
|||
dest->s6_addr32[2], dest->s6_addr32[3], destp,
|
||||
sp->sk_state,
|
||||
sk_wmem_alloc_get(sp),
|
||||
sk_rmem_alloc_get(sp),
|
||||
rqueue,
|
||||
0, 0L, 0,
|
||||
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
|
||||
0,
|
||||
|
|
|
@ -1523,7 +1523,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
|
|||
struct inet_sock *inet = inet_sk(v);
|
||||
__u16 srcp = ntohs(inet->inet_sport);
|
||||
__u16 destp = ntohs(inet->inet_dport);
|
||||
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
|
||||
__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
|
||||
udp_rqueue_get(v), bucket);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static void nft_log_eval(const struct nft_expr *expr,
|
|||
const struct nft_log *priv = nft_expr_priv(expr);
|
||||
|
||||
if (priv->loginfo.type == NF_LOG_TYPE_LOG &&
|
||||
priv->loginfo.u.log.level == LOGLEVEL_AUDIT) {
|
||||
priv->loginfo.u.log.level == NFT_LOGLEVEL_AUDIT) {
|
||||
nft_log_eval_audit(pkt);
|
||||
return;
|
||||
}
|
||||
|
@ -166,9 +166,9 @@ static int nft_log_init(const struct nft_ctx *ctx,
|
|||
li->u.log.level =
|
||||
ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
|
||||
} else {
|
||||
li->u.log.level = LOGLEVEL_WARNING;
|
||||
li->u.log.level = NFT_LOGLEVEL_WARNING;
|
||||
}
|
||||
if (li->u.log.level > LOGLEVEL_AUDIT) {
|
||||
if (li->u.log.level > NFT_LOGLEVEL_AUDIT) {
|
||||
err = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
|
|||
break;
|
||||
}
|
||||
|
||||
if (li->u.log.level == LOGLEVEL_AUDIT)
|
||||
if (li->u.log.level == NFT_LOGLEVEL_AUDIT)
|
||||
return 0;
|
||||
|
||||
err = nf_logger_find_get(ctx->family, li->type);
|
||||
|
@ -220,7 +220,7 @@ static void nft_log_destroy(const struct nft_ctx *ctx,
|
|||
if (priv->prefix != nft_log_null_prefix)
|
||||
kfree(priv->prefix);
|
||||
|
||||
if (li->u.log.level == LOGLEVEL_AUDIT)
|
||||
if (li->u.log.level == NFT_LOGLEVEL_AUDIT)
|
||||
return;
|
||||
|
||||
nf_logger_put(ctx->family, li->type);
|
||||
|
|
|
@ -2005,7 +2005,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
|
|||
return -EINVAL;
|
||||
*len -= sizeof(vnet_hdr);
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
|
||||
if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
|
||||
return -EINVAL;
|
||||
|
||||
return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
|
||||
|
@ -2272,7 +2272,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
if (do_vnet) {
|
||||
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
|
||||
sizeof(struct virtio_net_hdr),
|
||||
vio_le(), true)) {
|
||||
vio_le(), true, 0)) {
|
||||
spin_lock(&sk->sk_receive_queue.lock);
|
||||
goto drop_n_account;
|
||||
}
|
||||
|
|
|
@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a)
|
|||
kfree(d->tcfd_defdata);
|
||||
}
|
||||
|
||||
static int alloc_defdata(struct tcf_defact *d, char *defdata)
|
||||
static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
|
||||
{
|
||||
d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
|
||||
if (unlikely(!d->tcfd_defdata))
|
||||
return -ENOMEM;
|
||||
strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
|
||||
nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_policy(struct tcf_defact *d, char *defdata,
|
||||
static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
|
||||
struct tc_defact *p)
|
||||
{
|
||||
spin_lock_bh(&d->tcf_lock);
|
||||
d->tcf_action = p->action;
|
||||
memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
|
||||
strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
|
||||
nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
|
||||
spin_unlock_bh(&d->tcf_lock);
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||
struct tcf_defact *d;
|
||||
bool exists = false;
|
||||
int ret = 0, err;
|
||||
char *defdata;
|
||||
|
||||
if (nla == NULL)
|
||||
return -EINVAL;
|
||||
|
@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
defdata = nla_data(tb[TCA_DEF_DATA]);
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
||||
&act_simp_ops, bind, false);
|
||||
|
@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||
return ret;
|
||||
|
||||
d = to_defact(*a);
|
||||
ret = alloc_defdata(d, defdata);
|
||||
ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
|
||||
if (ret < 0) {
|
||||
tcf_idr_release(*a, bind);
|
||||
return ret;
|
||||
|
@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||
if (!ovr)
|
||||
return -EEXIST;
|
||||
|
||||
reset_policy(d, defdata, parm);
|
||||
reset_policy(d, tb[TCA_DEF_DATA], parm);
|
||||
}
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
|
|
18
net/socket.c
18
net/socket.c
|
@ -541,7 +541,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
if (!err && (iattr->ia_valid & ATTR_UID)) {
|
||||
struct socket *sock = SOCKET_I(d_inode(dentry));
|
||||
|
||||
sock->sk->sk_uid = iattr->ia_uid;
|
||||
if (sock->sk)
|
||||
sock->sk->sk_uid = iattr->ia_uid;
|
||||
else
|
||||
err = -ENOENT;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -590,12 +593,16 @@ EXPORT_SYMBOL(sock_alloc);
|
|||
* an inode not a file.
|
||||
*/
|
||||
|
||||
void sock_release(struct socket *sock)
|
||||
static void __sock_release(struct socket *sock, struct inode *inode)
|
||||
{
|
||||
if (sock->ops) {
|
||||
struct module *owner = sock->ops->owner;
|
||||
|
||||
if (inode)
|
||||
inode_lock(inode);
|
||||
sock->ops->release(sock);
|
||||
if (inode)
|
||||
inode_unlock(inode);
|
||||
sock->ops = NULL;
|
||||
module_put(owner);
|
||||
}
|
||||
|
@ -609,6 +616,11 @@ void sock_release(struct socket *sock)
|
|||
}
|
||||
sock->file = NULL;
|
||||
}
|
||||
|
||||
void sock_release(struct socket *sock)
|
||||
{
|
||||
__sock_release(sock, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_release);
|
||||
|
||||
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
|
||||
|
@ -1171,7 +1183,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
static int sock_close(struct inode *inode, struct file *filp)
|
||||
{
|
||||
sock_release(SOCKET_I(inode));
|
||||
__sock_release(SOCKET_I(inode), inode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,8 +132,10 @@ static void xdp_umem_unpin_pages(struct xdp_umem *umem)
|
|||
|
||||
static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
|
||||
{
|
||||
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
|
||||
free_uid(umem->user);
|
||||
if (umem->user) {
|
||||
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
|
||||
free_uid(umem->user);
|
||||
}
|
||||
}
|
||||
|
||||
static void xdp_umem_release(struct xdp_umem *umem)
|
||||
|
|
|
@ -643,7 +643,7 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
|
|||
static int xsk_mmap(struct file *file, struct socket *sock,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
struct xdp_sock *xs = xdp_sk(sock->sk);
|
||||
struct xsk_queue *q = NULL;
|
||||
|
|
|
@ -11,12 +11,24 @@ struct bpf_map_def SEC("maps") cg_ids = {
|
|||
.max_entries = 1,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") pidmap = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 1,
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_nanosleep")
|
||||
int trace(void *ctx)
|
||||
{
|
||||
__u32 key = 0;
|
||||
__u32 pid = bpf_get_current_pid_tgid();
|
||||
__u32 key = 0, *expected_pid;
|
||||
__u64 *val;
|
||||
|
||||
expected_pid = bpf_map_lookup_elem(&pidmap, &key);
|
||||
if (!expected_pid || *expected_pid != pid)
|
||||
return 0;
|
||||
|
||||
val = bpf_map_lookup_elem(&cg_ids, &key);
|
||||
if (val)
|
||||
*val = bpf_get_current_cgroup_id();
|
||||
|
|
|
@ -50,13 +50,13 @@ int main(int argc, char **argv)
|
|||
const char *probe_name = "syscalls/sys_enter_nanosleep";
|
||||
const char *file = "get_cgroup_id_kern.o";
|
||||
int err, bytes, efd, prog_fd, pmu_fd;
|
||||
int cgroup_fd, cgidmap_fd, pidmap_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
int cgroup_fd, cgidmap_fd;
|
||||
struct bpf_object *obj;
|
||||
__u64 kcgid = 0, ucgid;
|
||||
__u32 key = 0, pid;
|
||||
int exit_code = 1;
|
||||
char buf[256];
|
||||
__u32 key = 0;
|
||||
|
||||
err = setup_cgroup_environment();
|
||||
if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err,
|
||||
|
@ -81,6 +81,14 @@ int main(int argc, char **argv)
|
|||
cgidmap_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
|
||||
if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
|
||||
pidmap_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
pid = getpid();
|
||||
bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
|
|
|
@ -8647,7 +8647,7 @@ static struct bpf_test tests[] = {
|
|||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
|
@ -12258,6 +12258,62 @@ static struct bpf_test tests[] = {
|
|||
.result = ACCEPT,
|
||||
.retval = 5,
|
||||
},
|
||||
{
|
||||
"pass unmodified ctx pointer to helper",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 1",
|
||||
.insns = {
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 2",
|
||||
.insns = {
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_get_socket_cookie),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "dereference of modified ctx ptr",
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "variable ctx access var_off=(0x0; 0x4)",
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
|
|
Loading…
Reference in New Issue
Block a user