Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix negative error code usage in ATM layer, from Stefan Hajnoczi. 2) If CONFIG_SYSCTL is disabled, the default TTL is not initialized properly. From Ezequiel Garcia. 3) Missing spinlock init in mvneta driver, from Gregory CLEMENT. 4) Missing unlocks in hwmb error paths, also from Gregory CLEMENT. 5) Fix deadlock on team->lock when propagating features, from Ivan Vecera. 6) Work around buffer offset hw bug in alx chips, from Feng Tang. 7) Fix double listing of SCTP entries in sctp_diag dumps, from Xin Long. 8) Various statistics bug fixes in mlx4 from Eric Dumazet. 9) Fix some randconfig build errors wrt fou ipv6 from Arnd Bergmann. 10) All of l2tp was namespace aware, but the ipv6 support code was not doing so. From Shmulik Ladkani. 11) Handle on-stack hrtimers properly in pktgen, from Guenter Roeck. 12) Propagate MAC changes properly through VLAN devices, from Mike Manning. 13) Fix memory leak in bnx2x_init_one(), from Vitaly Kuznetsov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (62 commits) sfc: Track RPS flow IDs per channel instead of per function usbnet: smsc95xx: fix link detection for disabled autonegotiation virtio_net: fix virtnet_open and virtnet_probe competing for try_fill_recv bnx2x: avoid leaking memory on bnx2x_init_one() failures fou: fix IPv6 Kconfig options openvswitch: update checksum in {push,pop}_mpls sctp: sctp_diag should dump sctp socket type net: fec: update dirty_tx even if no skb vlan: Propagate MAC address to VLANs atm: iphase: off by one in rx_pkt() atm: firestream: add more reserved strings vxlan: Accept user specified MTU value when create new vxlan link net: pktgen: Call destroy_hrtimer_on_stack() timer: Export destroy_hrtimer_on_stack() net: l2tp: Make l2tp_ip6 namespace aware Documentation: ip-sysctl.txt: clarify secure_redirects sfc: use flow dissector helpers for aRFS ieee802154: fix logic error in ieee802154_llsec_parse_dev_addr net: nps_enet: Disable interrupts before napi reschedule net/lapb: tuse %*ph to dump buffers ...
This commit is contained in:
commit
6b15d6650c
|
@ -369,8 +369,6 @@ does not allocate any driver private context space.
|
|||
Switch configuration
|
||||
--------------------
|
||||
|
||||
- priv_size: additional size needed by the switch driver for its private context
|
||||
|
||||
- tag_protocol: this is to indicate what kind of tagging protocol is supported,
|
||||
should be a valid value from the dsa_tag_protocol enum
|
||||
|
||||
|
@ -416,11 +414,6 @@ PHY devices and link management
|
|||
to the switch port MDIO registers. If unavailable return a negative error
|
||||
code.
|
||||
|
||||
- poll_link: Function invoked by DSA to query the link state of the switch
|
||||
builtin Ethernet PHYs, per port. This function is responsible for calling
|
||||
netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
|
||||
single call. Executes from workqueue context.
|
||||
|
||||
- adjust_link: Function invoked by the PHY library when a slave network device
|
||||
is attached to a PHY device. This function is responsible for appropriately
|
||||
configuring the switch port link parameters: speed, duplex, pause based on
|
||||
|
@ -542,6 +535,16 @@ Bridge layer
|
|||
Bridge VLAN filtering
|
||||
---------------------
|
||||
|
||||
- port_vlan_filtering: bridge layer function invoked when the bridge gets
|
||||
configured for turning on or off VLAN filtering. If nothing specific needs to
|
||||
be done at the hardware level, this callback does not need to be implemented.
|
||||
When VLAN filtering is turned on, the hardware must be programmed with
|
||||
rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
|
||||
VLAN ID map/rules. If there is no PVID programmed into the switch port,
|
||||
untagged frames must be rejected as well. When turned off the switch must
|
||||
accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
|
||||
allowed.
|
||||
|
||||
- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
|
||||
configuration of a VLAN on the given port. If the operation is not supported
|
||||
by the hardware, this function should return -EOPNOTSUPP to inform the bridge
|
||||
|
|
|
@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
|
|||
|
||||
shared_media - BOOLEAN
|
||||
Send(router) or accept(host) RFC1620 shared media redirects.
|
||||
Overrides ip_secure_redirects.
|
||||
Overrides secure_redirects.
|
||||
shared_media for the interface will be enabled if at least one of
|
||||
conf/{all,interface}/shared_media is set to TRUE,
|
||||
it will be disabled otherwise
|
||||
default TRUE
|
||||
|
||||
secure_redirects - BOOLEAN
|
||||
Accept ICMP redirect messages only for gateways,
|
||||
listed in default gateway list.
|
||||
Accept ICMP redirect messages only to gateways listed in the
|
||||
interface's current gateway list. Even if disabled, RFC1122 redirect
|
||||
rules still apply.
|
||||
Overridden by shared_media.
|
||||
secure_redirects for the interface will be enabled if at least one of
|
||||
conf/{all,interface}/secure_redirects is set to TRUE,
|
||||
it will be disabled otherwise
|
||||
|
|
|
@ -7989,6 +7989,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
S: Odd Fixes
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/net/
|
||||
F: include/linux/if_*
|
||||
F: include/linux/netdevice.h
|
||||
|
|
|
@ -181,13 +181,17 @@ static char *res_strings[] = {
|
|||
"reserved 27",
|
||||
"reserved 28",
|
||||
"reserved 29",
|
||||
"reserved 30",
|
||||
"reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
|
||||
"reassembly abort: no buffers",
|
||||
"receive buffer overflow",
|
||||
"change in GFC",
|
||||
"receive buffer full",
|
||||
"low priority discard - no receive descriptor",
|
||||
"low priority discard - missing end of packet",
|
||||
"reserved 37",
|
||||
"reserved 38",
|
||||
"reserved 39",
|
||||
"reseverd 40",
|
||||
"reserved 41",
|
||||
"reserved 42",
|
||||
"reserved 43",
|
||||
|
|
|
@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
|
|||
/* make the ptr point to the corresponding buffer desc entry */
|
||||
buf_desc_ptr += desc;
|
||||
if (!desc || (desc > iadev->num_rx_desc) ||
|
||||
((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
|
||||
((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
|
||||
free_desc(dev, desc);
|
||||
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
|
||||
return -1;
|
||||
|
|
|
@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
|||
priv->bus = bus;
|
||||
bus->priv = priv;
|
||||
bus->parent = priv->dev;
|
||||
bus->name = "Synopsys MII Bus",
|
||||
bus->name = "Synopsys MII Bus";
|
||||
bus->read = &arc_mdio_read;
|
||||
bus->write = &arc_mdio_write;
|
||||
bus->reset = &arc_mdio_reset;
|
||||
|
|
|
@ -96,6 +96,10 @@ struct alx_priv {
|
|||
unsigned int rx_ringsz;
|
||||
unsigned int rxbuf_size;
|
||||
|
||||
struct page *rx_page;
|
||||
unsigned int rx_page_offset;
|
||||
unsigned int rx_frag_size;
|
||||
|
||||
struct napi_struct napi;
|
||||
struct alx_tx_queue txq;
|
||||
struct alx_rx_queue rxq;
|
||||
|
|
|
@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
|
|||
}
|
||||
}
|
||||
|
||||
static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
|
||||
if (alx->rx_frag_size > PAGE_SIZE)
|
||||
return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
|
||||
|
||||
page = alx->rx_page;
|
||||
if (!page) {
|
||||
alx->rx_page = page = alloc_page(gfp);
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
alx->rx_page_offset = 0;
|
||||
}
|
||||
|
||||
skb = build_skb(page_address(page) + alx->rx_page_offset,
|
||||
alx->rx_frag_size);
|
||||
if (likely(skb)) {
|
||||
alx->rx_page_offset += alx->rx_frag_size;
|
||||
if (alx->rx_page_offset >= PAGE_SIZE)
|
||||
alx->rx_page = NULL;
|
||||
else
|
||||
get_page(page);
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
||||
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
|
||||
{
|
||||
struct alx_rx_queue *rxq = &alx->rxq;
|
||||
|
@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
|
|||
while (!cur_buf->skb && next != rxq->read_idx) {
|
||||
struct alx_rfd *rfd = &rxq->rfd[cur];
|
||||
|
||||
skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
|
||||
skb = alx_alloc_skb(alx, gfp);
|
||||
if (!skb)
|
||||
break;
|
||||
dma = dma_map_single(&alx->hw.pdev->dev,
|
||||
|
@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
|
|||
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
|
||||
}
|
||||
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
|
|||
kfree(alx->txq.bufs);
|
||||
kfree(alx->rxq.bufs);
|
||||
|
||||
if (alx->rx_page) {
|
||||
put_page(alx->rx_page);
|
||||
alx->rx_page = NULL;
|
||||
}
|
||||
|
||||
dma_free_coherent(&alx->hw.pdev->dev,
|
||||
alx->descmem.size,
|
||||
alx->descmem.virt,
|
||||
|
@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
|
|||
alx->dev->name, alx);
|
||||
if (!err)
|
||||
goto out;
|
||||
|
||||
/* fall back to legacy interrupt */
|
||||
pci_disable_msi(alx->hw.pdev);
|
||||
}
|
||||
|
@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
|
|||
struct pci_dev *pdev = alx->hw.pdev;
|
||||
struct alx_hw *hw = &alx->hw;
|
||||
int err;
|
||||
unsigned int head_size;
|
||||
|
||||
err = alx_identify_hw(alx);
|
||||
if (err) {
|
||||
|
@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
|
|||
|
||||
hw->smb_timer = 400;
|
||||
hw->mtu = alx->dev->mtu;
|
||||
|
||||
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
|
||||
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
alx->rx_frag_size = roundup_pow_of_two(head_size);
|
||||
|
||||
alx->tx_ringsz = 256;
|
||||
alx->rx_ringsz = 512;
|
||||
hw->imt = 200;
|
||||
|
@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
|
|||
{
|
||||
struct alx_priv *alx = netdev_priv(netdev);
|
||||
int max_frame = ALX_MAX_FRAME_LEN(mtu);
|
||||
unsigned int head_size;
|
||||
|
||||
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
|
||||
(max_frame > ALX_MAX_FRAME_SIZE))
|
||||
|
@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
|
|||
netdev->mtu = mtu;
|
||||
alx->hw.mtu = mtu;
|
||||
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
|
||||
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
alx->rx_frag_size = roundup_pow_of_two(head_size);
|
||||
netdev_update_features(netdev);
|
||||
if (netif_running(netdev))
|
||||
alx_reinit(alx);
|
||||
|
|
|
@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
bp->doorbells = bnx2x_vf_doorbells(bp);
|
||||
rc = bnx2x_vf_pci_alloc(bp);
|
||||
if (rc)
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
} else {
|
||||
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
|
||||
if (doorbell_size > pci_resource_len(pdev, 2)) {
|
||||
dev_err(&bp->pdev->dev,
|
||||
"Cannot map doorbells, bar size too small, aborting\n");
|
||||
rc = -ENOMEM;
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
}
|
||||
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
|
||||
doorbell_size);
|
||||
|
@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
dev_err(&bp->pdev->dev,
|
||||
"Cannot map doorbell space, aborting\n");
|
||||
rc = -ENOMEM;
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
}
|
||||
|
||||
if (IS_VF(bp)) {
|
||||
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
|
||||
if (rc)
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
}
|
||||
|
||||
/* Enable SRIOV if capability found in configuration space */
|
||||
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
|
||||
if (rc)
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
|
||||
/* calc qm_cid_count */
|
||||
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
|
||||
|
@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
rc = bnx2x_set_int_mode(bp);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot set interrupts\n");
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
}
|
||||
BNX2X_DEV_INFO("set interrupts successfully\n");
|
||||
|
||||
|
@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register net device\n");
|
||||
goto init_one_exit;
|
||||
goto init_one_freemem;
|
||||
}
|
||||
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
|
||||
|
||||
|
@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
|
||||
return 0;
|
||||
|
||||
init_one_freemem:
|
||||
bnx2x_free_mem_bp(bp);
|
||||
|
||||
init_one_exit:
|
||||
bnx2x_disable_pcie_error_reporting(bp);
|
||||
|
||||
|
|
|
@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
|
|||
* re-adding ourselves to the poll list.
|
||||
*/
|
||||
|
||||
if (priv->tx_skb && !tx_ctrl_ct)
|
||||
if (priv->tx_skb && !tx_ctrl_ct) {
|
||||
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
|
||||
napi_reschedule(napi);
|
||||
}
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
|
|
@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
|||
fec16_to_cpu(bdp->cbd_datlen),
|
||||
DMA_TO_DEVICE);
|
||||
bdp->cbd_bufaddr = cpu_to_fec32(0);
|
||||
if (!skb) {
|
||||
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
|
||||
continue;
|
||||
}
|
||||
if (!skb)
|
||||
goto skb_done;
|
||||
|
||||
/* Check for errors. */
|
||||
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
|
||||
|
@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
|||
|
||||
/* Free the sk buffer associated with this last transmit */
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
skb_done:
|
||||
/* Make sure the update to bdp and tx_skbuff are performed
|
||||
* before dirty_tx
|
||||
*/
|
||||
|
|
|
@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
|
|||
u32 link_stat = priv->link;
|
||||
struct hnae_handle *h;
|
||||
|
||||
assert(priv && priv->ae_handle);
|
||||
h = priv->ae_handle;
|
||||
|
||||
if (priv->phy) {
|
||||
|
@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
|
|||
{
|
||||
struct hns_nic_priv *priv = netdev_priv(net_dev);
|
||||
|
||||
assert(priv);
|
||||
|
||||
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
|
||||
sizeof(drvinfo->version));
|
||||
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
|
||||
|
@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
|
|||
struct hnae_handle *h;
|
||||
struct hnae_ae_ops *ops;
|
||||
|
||||
assert(priv || priv->ae_handle);
|
||||
|
||||
h = priv->ae_handle;
|
||||
ops = h->dev->ops;
|
||||
|
||||
|
@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
|
|||
struct hnae_ae_ops *ops;
|
||||
int ret;
|
||||
|
||||
assert(priv || priv->ae_handle);
|
||||
|
||||
ops = priv->ae_handle->dev->ops;
|
||||
|
||||
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
|
||||
|
@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
|
|||
struct hns_nic_priv *priv = netdev_priv(net_dev);
|
||||
struct hnae_ae_ops *ops;
|
||||
|
||||
assert(priv || priv->ae_handle);
|
||||
|
||||
ops = priv->ae_handle->dev->ops;
|
||||
|
||||
cmd->version = HNS_CHIP_VERSION;
|
||||
|
@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
|
|||
struct hns_nic_priv *priv = netdev_priv(net_dev);
|
||||
struct hnae_ae_ops *ops;
|
||||
|
||||
assert(priv || priv->ae_handle);
|
||||
|
||||
ops = priv->ae_handle->dev->ops;
|
||||
if (!ops->get_regs_len) {
|
||||
netdev_err(net_dev, "ops->get_regs_len is null!\n");
|
||||
|
|
|
@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
|
|||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
hwbm_pool->construct = mvneta_bm_construct;
|
||||
hwbm_pool->priv = new_pool;
|
||||
spin_lock_init(&hwbm_pool->lock);
|
||||
|
||||
/* Create new pool */
|
||||
err = mvneta_bm_pool_create(priv, new_pool);
|
||||
|
|
|
@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
|
|||
|
||||
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
|
||||
if (bitmap_iterator_test(&it))
|
||||
data[index++] = ((unsigned long *)&priv->stats)[i];
|
||||
data[index++] = ((unsigned long *)&dev->stats)[i];
|
||||
|
||||
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
|
||||
if (bitmap_iterator_test(&it))
|
||||
|
|
|
@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
|
||||
static struct rtnl_link_stats64 *
|
||||
mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
||||
spin_lock_bh(&priv->stats_lock);
|
||||
memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
spin_unlock_bh(&priv->stats_lock);
|
||||
|
||||
return &priv->ret_stats;
|
||||
return stats;
|
||||
}
|
||||
|
||||
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
||||
|
@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|||
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
|
||||
en_dbg(HW, priv, "Failed dumping statistics\n");
|
||||
|
||||
memset(&priv->stats, 0, sizeof(priv->stats));
|
||||
memset(&priv->pstats, 0, sizeof(priv->pstats));
|
||||
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
|
||||
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
|
||||
|
@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|||
priv->tx_ring[i]->bytes = 0;
|
||||
priv->tx_ring[i]->packets = 0;
|
||||
priv->tx_ring[i]->tx_csum = 0;
|
||||
priv->tx_ring[i]->tx_dropped = 0;
|
||||
priv->tx_ring[i]->queue_stopped = 0;
|
||||
priv->tx_ring[i]->wake_queue = 0;
|
||||
priv->tx_ring[i]->tso_packets = 0;
|
||||
priv->tx_ring[i]->xmit_more = 0;
|
||||
}
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
priv->rx_ring[i]->bytes = 0;
|
||||
|
@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
|
|||
.ndo_stop = mlx4_en_close,
|
||||
.ndo_start_xmit = mlx4_en_xmit,
|
||||
.ndo_select_queue = mlx4_en_select_queue,
|
||||
.ndo_get_stats = mlx4_en_get_stats,
|
||||
.ndo_get_stats64 = mlx4_en_get_stats64,
|
||||
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
|
||||
.ndo_set_mac_address = mlx4_en_set_mac,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
|
|||
.ndo_stop = mlx4_en_close,
|
||||
.ndo_start_xmit = mlx4_en_xmit,
|
||||
.ndo_select_queue = mlx4_en_select_queue,
|
||||
.ndo_get_stats = mlx4_en_get_stats,
|
||||
.ndo_get_stats64 = mlx4_en_get_stats64,
|
||||
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
|
||||
.ndo_set_mac_address = mlx4_en_set_mac,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
|
|
@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
struct mlx4_counter tmp_counter_stats;
|
||||
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
|
||||
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
|
||||
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
|
||||
struct net_device_stats *stats = &priv->stats;
|
||||
struct net_device *dev = mdev->pndev[port];
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
u64 in_mod = reset << 8 | port;
|
||||
int err;
|
||||
|
@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
}
|
||||
stats->tx_packets = 0;
|
||||
stats->tx_bytes = 0;
|
||||
stats->tx_dropped = 0;
|
||||
priv->port_stats.tx_chksum_offload = 0;
|
||||
priv->port_stats.queue_stopped = 0;
|
||||
priv->port_stats.wake_queue = 0;
|
||||
|
@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
|
||||
stats->tx_packets += ring->packets;
|
||||
stats->tx_bytes += ring->bytes;
|
||||
stats->tx_dropped += ring->tx_dropped;
|
||||
priv->port_stats.tx_chksum_offload += ring->tx_csum;
|
||||
priv->port_stats.queue_stopped += ring->queue_stopped;
|
||||
priv->port_stats.wake_queue += ring->wake_queue;
|
||||
|
@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
|
||||
&mlx4_en_stats->MCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
stats->collisions = 0;
|
||||
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
|
||||
sw_rx_dropped;
|
||||
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
||||
stats->rx_over_errors = 0;
|
||||
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
||||
stats->rx_frame_errors = 0;
|
||||
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
stats->rx_missed_errors = 0;
|
||||
stats->tx_aborted_errors = 0;
|
||||
stats->tx_carrier_errors = 0;
|
||||
stats->tx_fifo_errors = 0;
|
||||
stats->tx_heartbeat_errors = 0;
|
||||
stats->tx_window_errors = 0;
|
||||
stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
|
||||
stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
|
||||
|
||||
/* RX stats */
|
||||
priv->pkstats.rx_multicast_packets = stats->multicast;
|
||||
|
|
|
@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
bool inline_ok;
|
||||
u32 ring_cons;
|
||||
|
||||
if (!priv->port_up)
|
||||
goto tx_drop;
|
||||
|
||||
tx_ind = skb_get_queue_mapping(skb);
|
||||
ring = priv->tx_ring[tx_ind];
|
||||
|
||||
if (!priv->port_up)
|
||||
goto tx_drop;
|
||||
|
||||
/* fetch ring->cons far ahead before needing it to avoid stall */
|
||||
ring_cons = ACCESS_ONCE(ring->cons);
|
||||
|
||||
|
@ -1030,7 +1030,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tx_drop:
|
||||
dev_kfree_skb_any(skb);
|
||||
priv->stats.tx_dropped++;
|
||||
ring->tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
|
|||
unsigned long tx_csum;
|
||||
unsigned long tso_packets;
|
||||
unsigned long xmit_more;
|
||||
unsigned int tx_dropped;
|
||||
struct mlx4_bf bf;
|
||||
unsigned long queue_stopped;
|
||||
|
||||
|
@ -482,8 +483,6 @@ struct mlx4_en_priv {
|
|||
struct mlx4_en_port_profile *prof;
|
||||
struct net_device *dev;
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
struct net_device_stats stats;
|
||||
struct net_device_stats ret_stats;
|
||||
struct mlx4_en_port_state port_state;
|
||||
spinlock_t stats_lock;
|
||||
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
|
||||
|
|
|
@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
|||
struct dcbx_app_priority_entry *p_tbl,
|
||||
u32 pri_tc_tbl, int count, bool dcbx_enabled)
|
||||
{
|
||||
u8 tc, priority, priority_map;
|
||||
u8 tc, priority_map;
|
||||
enum dcbx_protocol_type type;
|
||||
u16 protocol_id;
|
||||
int priority;
|
||||
bool enable;
|
||||
int i;
|
||||
|
||||
|
@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
|||
* indication, but we only got here if there was an
|
||||
* app tlv for the protocol, so dcbx must be enabled.
|
||||
*/
|
||||
enable = !!(type == DCBX_PROTOCOL_ETH);
|
||||
enable = !(type == DCBX_PROTOCOL_ETH);
|
||||
|
||||
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
|
||||
priority, tc, type);
|
||||
|
|
|
@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
|
|||
}
|
||||
}
|
||||
|
||||
static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
|
||||
static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
|
||||
{
|
||||
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
struct init_qm_port_params *p_qm_port;
|
||||
u16 num_pqs, multi_cos_tcs = 1;
|
||||
u8 pf_wfq = qm_info->pf_wfq;
|
||||
u32 pf_rl = qm_info->pf_rl;
|
||||
u16 num_vfs = 0;
|
||||
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
|
@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
|
|||
|
||||
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
|
||||
*/
|
||||
qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
|
||||
num_pqs, GFP_KERNEL);
|
||||
qm_info->qm_pq_params = kcalloc(num_pqs,
|
||||
sizeof(struct init_qm_pq_params),
|
||||
b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!qm_info->qm_pq_params)
|
||||
goto alloc_err;
|
||||
|
||||
qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
|
||||
num_vports, GFP_KERNEL);
|
||||
qm_info->qm_vport_params = kcalloc(num_vports,
|
||||
sizeof(struct init_qm_vport_params),
|
||||
b_sleepable ? GFP_KERNEL
|
||||
: GFP_ATOMIC);
|
||||
if (!qm_info->qm_vport_params)
|
||||
goto alloc_err;
|
||||
|
||||
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
|
||||
MAX_NUM_PORTS, GFP_KERNEL);
|
||||
qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
|
||||
sizeof(struct init_qm_port_params),
|
||||
b_sleepable ? GFP_KERNEL
|
||||
: GFP_ATOMIC);
|
||||
if (!qm_info->qm_port_params)
|
||||
goto alloc_err;
|
||||
|
||||
qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
|
||||
GFP_KERNEL);
|
||||
qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
|
||||
b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!qm_info->wfq_data)
|
||||
goto alloc_err;
|
||||
|
||||
|
@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
|
|||
for (i = 0; i < qm_info->num_vports; i++)
|
||||
qm_info->qm_vport_params[i].vport_wfq = 1;
|
||||
|
||||
qm_info->pf_wfq = 0;
|
||||
qm_info->pf_rl = 0;
|
||||
qm_info->vport_rl_en = 1;
|
||||
qm_info->vport_wfq_en = 1;
|
||||
qm_info->pf_rl = pf_rl;
|
||||
qm_info->pf_wfq = pf_wfq;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
qed_qm_info_free(p_hwfn);
|
||||
|
||||
/* initialize qed's qm data structure */
|
||||
rc = qed_init_qm_info(p_hwfn);
|
||||
rc = qed_init_qm_info(p_hwfn, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
|
|||
goto alloc_err;
|
||||
|
||||
/* Prepare and process QM requirements */
|
||||
rc = qed_init_qm_info(p_hwfn);
|
||||
rc = qed_init_qm_info(p_hwfn, true);
|
||||
if (rc)
|
||||
goto alloc_err;
|
||||
|
||||
|
@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
|
|||
|
||||
hw_mode |= 1 << MODE_ASIC;
|
||||
|
||||
if (p_hwfn->cdev->num_hwfns > 1)
|
||||
hw_mode |= 1 << MODE_100G;
|
||||
|
||||
p_hwfn->hw_info.hw_mode = hw_mode;
|
||||
|
||||
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
|
||||
"Configuring function for hw_mode: 0x%08x\n",
|
||||
p_hwfn->hw_info.hw_mode);
|
||||
}
|
||||
|
||||
/* Init run time data for all PFs on an engine. */
|
||||
|
@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
|
|||
u32 load_code, param;
|
||||
int rc, mfw_rc, i;
|
||||
|
||||
if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
|
||||
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (IS_PF(cdev)) {
|
||||
rc = qed_init_fw_data(cdev, bin_fw_data);
|
||||
if (rc != 0)
|
||||
|
@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (cdev->num_hwfns > 1) {
|
||||
DP_VERBOSE(cdev,
|
||||
NETIF_MSG_LINK,
|
||||
"WFQ configuration is not supported for this device\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
|
|
|
@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
|
|||
/* Fallthrough */
|
||||
|
||||
case QED_INT_MODE_MSI:
|
||||
rc = pci_enable_msi(cdev->pdev);
|
||||
if (!rc) {
|
||||
int_params->out.int_mode = QED_INT_MODE_MSI;
|
||||
goto out;
|
||||
}
|
||||
if (cdev->num_hwfns == 1) {
|
||||
rc = pci_enable_msi(cdev->pdev);
|
||||
if (!rc) {
|
||||
int_params->out.int_mode = QED_INT_MODE_MSI;
|
||||
goto out;
|
||||
}
|
||||
|
||||
DP_NOTICE(cdev, "Failed to enable MSI\n");
|
||||
if (force_mode)
|
||||
goto out;
|
||||
DP_NOTICE(cdev, "Failed to enable MSI\n");
|
||||
if (force_mode)
|
||||
goto out;
|
||||
}
|
||||
/* Fallthrough */
|
||||
|
||||
case QED_INT_MODE_INTA:
|
||||
|
|
|
@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
|
|||
case ETH_SS_PRIV_FLAGS:
|
||||
return QEDE_PRI_FLAG_LEN;
|
||||
case ETH_SS_TEST:
|
||||
return QEDE_ETHTOOL_TEST_MAX;
|
||||
if (!IS_VF(edev))
|
||||
return QEDE_ETHTOOL_TEST_MAX;
|
||||
else
|
||||
return 0;
|
||||
default:
|
||||
DP_VERBOSE(edev, QED_MSG_DEBUG,
|
||||
"Unsupported stringset 0x%08x\n", stringset);
|
||||
|
|
|
@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
|
|||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
|
||||
return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
|
||||
return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
|
||||
max_tx_rate);
|
||||
}
|
||||
|
||||
|
@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
|
|||
edev->accept_any_vlan = false;
|
||||
}
|
||||
|
||||
int qede_set_features(struct net_device *dev, netdev_features_t features)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
netdev_features_t changes = features ^ dev->features;
|
||||
bool need_reload = false;
|
||||
|
||||
/* No action needed if hardware GRO is disabled during driver load */
|
||||
if (changes & NETIF_F_GRO) {
|
||||
if (dev->features & NETIF_F_GRO)
|
||||
need_reload = !edev->gro_disable;
|
||||
else
|
||||
need_reload = edev->gro_disable;
|
||||
}
|
||||
|
||||
if (need_reload && netif_running(edev->ndev)) {
|
||||
dev->features = features;
|
||||
qede_reload(edev, NULL, NULL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QEDE_VXLAN
|
||||
static void qede_add_vxlan_port(struct net_device *dev,
|
||||
sa_family_t sa_family, __be16 port)
|
||||
|
@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
|
|||
#endif
|
||||
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
|
||||
.ndo_set_features = qede_set_features,
|
||||
.ndo_get_stats64 = qede_get_stats64,
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
.ndo_set_vf_link_state = qede_set_vf_link_state,
|
||||
|
|
|
@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
|
|||
}
|
||||
|
||||
/* Disabling the timer */
|
||||
del_timer_sync(&qdev->timer);
|
||||
ql_cancel_all_work_sync(qdev);
|
||||
|
||||
for (i = 0; i < qdev->rss_ring_count; i++)
|
||||
|
@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
|
|||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
case pci_channel_io_frozen:
|
||||
netif_device_detach(ndev);
|
||||
del_timer_sync(&qdev->timer);
|
||||
if (netif_running(ndev))
|
||||
ql_eeh_close(ndev);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
|
|||
case pci_channel_io_perm_failure:
|
||||
dev_err(&pdev->dev,
|
||||
"%s: pci_channel_io_perm_failure.\n", __func__);
|
||||
del_timer_sync(&qdev->timer);
|
||||
ql_eeh_close(ndev);
|
||||
set_bit(QL_EEH_FATAL, &qdev->flags);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
|
|
@ -619,6 +619,17 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
/* All our existing PIO buffers went away */
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
tx_queue->piobuf = NULL;
|
||||
}
|
||||
|
||||
#else /* !EFX_USE_PIO */
|
||||
|
||||
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
|
||||
|
@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
|
|||
{
|
||||
}
|
||||
|
||||
static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
static void efx_ef10_remove(struct efx_nic *efx)
|
||||
|
@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
|
|||
nic_data->must_realloc_vis = true;
|
||||
nic_data->must_restore_filters = true;
|
||||
nic_data->must_restore_piobufs = true;
|
||||
efx_ef10_forget_old_piobufs(efx);
|
||||
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
|
||||
|
||||
/* Driver-created vswitches and vports must be re-created */
|
||||
|
|
|
@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
|
|||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->type->offload_features & NETIF_F_NTUPLE) {
|
||||
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*efx->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!efx->rps_flow_id) {
|
||||
struct efx_channel *channel;
|
||||
int i, success = 1;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
channel->rps_flow_id =
|
||||
kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*channel->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!channel->rps_flow_id)
|
||||
success = 0;
|
||||
else
|
||||
for (i = 0;
|
||||
i < efx->type->max_rx_ip_filters;
|
||||
++i)
|
||||
channel->rps_flow_id[i] =
|
||||
RPS_FLOW_ID_INVALID;
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
efx_for_each_channel(channel, efx)
|
||||
kfree(channel->rps_flow_id);
|
||||
efx->type->filter_table_remove(efx);
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
efx->rps_expire_index = efx->rps_expire_channel = 0;
|
||||
}
|
||||
#endif
|
||||
out_unlock:
|
||||
|
@ -1744,7 +1763,10 @@ static int efx_probe_filters(struct efx_nic *efx)
|
|||
static void efx_remove_filters(struct efx_nic *efx)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
kfree(efx->rps_flow_id);
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
kfree(channel->rps_flow_id);
|
||||
#endif
|
||||
down_write(&efx->filter_sem);
|
||||
efx->type->filter_table_remove(efx);
|
||||
|
|
|
@ -403,6 +403,8 @@ enum efx_sync_events_state {
|
|||
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
|
||||
* @irq_count: Number of IRQs since last adaptive moderation decision
|
||||
* @irq_mod_score: IRQ moderation score
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
|
||||
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
|
||||
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
|
||||
|
@ -446,6 +448,8 @@ struct efx_channel {
|
|||
unsigned int irq_mod_score;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
unsigned int rfs_filters_added;
|
||||
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
|
||||
u32 *rps_flow_id;
|
||||
#endif
|
||||
|
||||
unsigned n_rx_tobe_disc;
|
||||
|
@ -889,9 +893,9 @@ struct vfdi_status;
|
|||
* @filter_sem: Filter table rw_semaphore, for freeing the table
|
||||
* @filter_lock: Filter table lock, for mere content changes
|
||||
* @filter_state: Architecture-dependent filter table state
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @rps_expire_index: Next index to check for expiry in @rps_flow_id
|
||||
* @rps_expire_channel: Next channel to check for expiry
|
||||
* @rps_expire_index: Next index to check for expiry in
|
||||
* @rps_expire_channel's @rps_flow_id
|
||||
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
|
||||
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
|
||||
* Decremented when the efx_flush_rx_queue() is called.
|
||||
|
@ -1035,7 +1039,7 @@ struct efx_nic {
|
|||
spinlock_t filter_lock;
|
||||
void *filter_state;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
u32 *rps_flow_id;
|
||||
unsigned int rps_expire_channel;
|
||||
unsigned int rps_expire_index;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
struct efx_filter_spec spec;
|
||||
const __be16 *ports;
|
||||
__be16 ether_type;
|
||||
int nhoff;
|
||||
struct flow_keys fk;
|
||||
int rc;
|
||||
|
||||
/* The core RPS/RFS code has already parsed and validated
|
||||
* VLAN, IP and transport headers. We assume they are in the
|
||||
* header area.
|
||||
*/
|
||||
if (flow_id == RPS_FLOW_ID_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
const struct vlan_hdr *vh =
|
||||
(const struct vlan_hdr *)skb->data;
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
/* We can't filter on the IP 5-tuple and the vlan
|
||||
* together, so just strip the vlan header and filter
|
||||
* on the IP part.
|
||||
*/
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
|
||||
ether_type = vh->h_vlan_encapsulated_proto;
|
||||
nhoff = sizeof(struct vlan_hdr);
|
||||
} else {
|
||||
ether_type = skb->protocol;
|
||||
nhoff = 0;
|
||||
}
|
||||
|
||||
if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
|
||||
|
@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
||||
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
||||
spec.ether_type = ether_type;
|
||||
spec.ether_type = fk.basic.n_proto;
|
||||
spec.ip_proto = fk.basic.ip_proto;
|
||||
|
||||
if (ether_type == htons(ETH_P_IP)) {
|
||||
const struct iphdr *ip =
|
||||
(const struct iphdr *)(skb->data + nhoff);
|
||||
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
|
||||
if (ip_is_fragment(ip))
|
||||
return -EPROTONOSUPPORT;
|
||||
spec.ip_proto = ip->protocol;
|
||||
spec.rem_host[0] = ip->saddr;
|
||||
spec.loc_host[0] = ip->daddr;
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
|
||||
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
||||
if (fk.basic.n_proto == htons(ETH_P_IP)) {
|
||||
spec.rem_host[0] = fk.addrs.v4addrs.src;
|
||||
spec.loc_host[0] = fk.addrs.v4addrs.dst;
|
||||
} else {
|
||||
const struct ipv6hdr *ip6 =
|
||||
(const struct ipv6hdr *)(skb->data + nhoff);
|
||||
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
|
||||
nhoff + sizeof(*ip6) + 4);
|
||||
spec.ip_proto = ip6->nexthdr;
|
||||
memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
|
||||
memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
|
||||
ports = (const __be16 *)(ip6 + 1);
|
||||
memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
|
||||
memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
|
||||
}
|
||||
|
||||
spec.rem_port = ports[0];
|
||||
spec.loc_port = ports[1];
|
||||
spec.rem_port = fk.ports.src;
|
||||
spec.loc_port = fk.ports.dst;
|
||||
|
||||
rc = efx->type->filter_rfs_insert(efx, &spec);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Remember this so we can check whether to expire the filter later */
|
||||
efx->rps_flow_id[rc] = flow_id;
|
||||
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
|
||||
channel = efx_get_channel(efx, rxq_index);
|
||||
channel->rps_flow_id[rc] = flow_id;
|
||||
++channel->rfs_filters_added;
|
||||
|
||||
if (ether_type == htons(ETH_P_IP))
|
||||
if (spec.ether_type == htons(ETH_P_IP))
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
|
||||
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
spec.rem_host, ntohs(ports[0]), spec.loc_host,
|
||||
ntohs(ports[1]), rxq_index, flow_id, rc);
|
||||
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
|
||||
ntohs(spec.loc_port), rxq_index, flow_id, rc);
|
||||
else
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
|
||||
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
spec.rem_host, ntohs(ports[0]), spec.loc_host,
|
||||
ntohs(ports[1]), rxq_index, flow_id, rc);
|
||||
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
|
||||
ntohs(spec.loc_port), rxq_index, flow_id, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
|
||||
{
|
||||
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
|
||||
unsigned int index, size;
|
||||
unsigned int channel_idx, index, size;
|
||||
u32 flow_id;
|
||||
|
||||
if (!spin_trylock_bh(&efx->filter_lock))
|
||||
return false;
|
||||
|
||||
expire_one = efx->type->filter_rfs_expire_one;
|
||||
channel_idx = efx->rps_expire_channel;
|
||||
index = efx->rps_expire_index;
|
||||
size = efx->type->max_rx_ip_filters;
|
||||
while (quota--) {
|
||||
flow_id = efx->rps_flow_id[index];
|
||||
if (expire_one(efx, flow_id, index))
|
||||
struct efx_channel *channel = efx_get_channel(efx, channel_idx);
|
||||
flow_id = channel->rps_flow_id[index];
|
||||
|
||||
if (flow_id != RPS_FLOW_ID_INVALID &&
|
||||
expire_one(efx, flow_id, index)) {
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"expired filter %d [flow %u]\n",
|
||||
index, flow_id);
|
||||
if (++index == size)
|
||||
"expired filter %d [queue %u flow %u]\n",
|
||||
index, channel_idx, flow_id);
|
||||
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
|
||||
}
|
||||
if (++index == size) {
|
||||
if (++channel_idx == efx->n_channels)
|
||||
channel_idx = 0;
|
||||
index = 0;
|
||||
}
|
||||
}
|
||||
efx->rps_expire_channel = channel_idx;
|
||||
efx->rps_expire_index = index;
|
||||
|
||||
spin_unlock_bh(&efx->filter_lock);
|
||||
|
|
|
@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
|
|||
return -ENOMEM;
|
||||
|
||||
if (mdio_bus_data->irqs)
|
||||
memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
|
||||
memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
if (priv->device->of_node)
|
||||
|
|
|
@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
|
|||
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
|
||||
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
|
||||
|
||||
static void __team_compute_features(struct team *team)
|
||||
static void ___team_compute_features(struct team *team)
|
||||
{
|
||||
struct team_port *port;
|
||||
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
|
||||
|
@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
|
|||
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
|
||||
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
|
||||
}
|
||||
|
||||
static void __team_compute_features(struct team *team)
|
||||
{
|
||||
___team_compute_features(team);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
static void team_compute_features(struct team *team)
|
||||
{
|
||||
mutex_lock(&team->lock);
|
||||
__team_compute_features(team);
|
||||
___team_compute_features(team);
|
||||
mutex_unlock(&team->lock);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
static int team_port_enter(struct team *team, struct team_port *port)
|
||||
|
|
|
@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
|
|||
goto goon;
|
||||
}
|
||||
|
||||
if (!count || count < 4)
|
||||
if (count < 4)
|
||||
goto goon;
|
||||
|
||||
rx_status = buf[count - 2];
|
||||
|
|
|
@ -61,6 +61,8 @@
|
|||
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
|
||||
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
|
||||
|
||||
#define CARRIER_CHECK_DELAY (2 * HZ)
|
||||
|
||||
struct smsc95xx_priv {
|
||||
u32 mac_cr;
|
||||
u32 hash_hi;
|
||||
|
@ -69,6 +71,9 @@ struct smsc95xx_priv {
|
|||
spinlock_t mac_cr_lock;
|
||||
u8 features;
|
||||
u8 suspend_flags;
|
||||
bool link_ok;
|
||||
struct delayed_work carrier_check;
|
||||
struct usbnet *dev;
|
||||
};
|
||||
|
||||
static bool turbo_mode = true;
|
||||
|
@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
|
|||
intdata);
|
||||
}
|
||||
|
||||
static void set_carrier(struct usbnet *dev, bool link)
|
||||
{
|
||||
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
|
||||
|
||||
if (pdata->link_ok == link)
|
||||
return;
|
||||
|
||||
pdata->link_ok = link;
|
||||
|
||||
if (link)
|
||||
usbnet_link_change(dev, 1, 0);
|
||||
else
|
||||
usbnet_link_change(dev, 0, 0);
|
||||
}
|
||||
|
||||
static void check_carrier(struct work_struct *work)
|
||||
{
|
||||
struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
|
||||
carrier_check.work);
|
||||
struct usbnet *dev = pdata->dev;
|
||||
int ret;
|
||||
|
||||
if (pdata->suspend_flags != 0)
|
||||
return;
|
||||
|
||||
ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Failed to read MII_BMSR\n");
|
||||
return;
|
||||
}
|
||||
if (ret & BMSR_LSTATUS)
|
||||
set_carrier(dev, 1);
|
||||
else
|
||||
set_carrier(dev, 0);
|
||||
|
||||
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
|
||||
}
|
||||
|
||||
/* Enable or disable Tx & Rx checksum offload engines */
|
||||
static int smsc95xx_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
|
@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
dev->net->flags |= IFF_MULTICAST;
|
||||
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
|
||||
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
|
||||
|
||||
pdata->dev = dev;
|
||||
INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
|
||||
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
|
||||
|
||||
if (pdata) {
|
||||
cancel_delayed_work(&pdata->carrier_check);
|
||||
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
|
||||
kfree(pdata);
|
||||
pdata = NULL;
|
||||
|
@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
|
|||
|
||||
/* do this first to ensure it's cleared even in error case */
|
||||
pdata->suspend_flags = 0;
|
||||
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
|
||||
|
||||
if (suspend_flags & SUSPEND_ALLMODES) {
|
||||
/* clear wake-up sources */
|
||||
|
|
|
@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
/* Last of all, set up some receive buffers. */
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++) {
|
||||
try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
|
||||
|
||||
/* If we didn't even get one input buffer, we're useless. */
|
||||
if (vi->rq[i].vq->num_free ==
|
||||
virtqueue_get_vring_size(vi->rq[i].vq)) {
|
||||
free_unused_bufs(vi);
|
||||
err = -ENOMEM;
|
||||
goto free_recv_bufs;
|
||||
}
|
||||
}
|
||||
|
||||
vi->nb.notifier_call = &virtnet_cpu_callback;
|
||||
err = register_hotcpu_notifier(&vi->nb);
|
||||
if (err) {
|
||||
pr_debug("virtio_net: registering cpu notifier failed\n");
|
||||
goto free_recv_bufs;
|
||||
goto free_unregister_netdev;
|
||||
}
|
||||
|
||||
/* Assume link up if device can't report link status,
|
||||
|
@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
|
||||
return 0;
|
||||
|
||||
free_recv_bufs:
|
||||
free_unregister_netdev:
|
||||
vi->vdev->config->reset(vdev);
|
||||
|
||||
free_receive_bufs(vi);
|
||||
unregister_netdev(dev);
|
||||
free_vqs:
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
|
|
@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
|
||||
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
|
||||
|
||||
if (tb[IFLA_MTU])
|
||||
conf.mtu = nla_get_u32(tb[IFLA_MTU]);
|
||||
|
||||
err = vxlan_dev_configure(src_net, dev, &conf);
|
||||
switch (err) {
|
||||
case -ENODEV:
|
||||
|
|
|
@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|||
break;
|
||||
|
||||
case PTP_SYS_OFFSET:
|
||||
sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
|
||||
if (!sysoff) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(sysoff, (void __user *)arg,
|
||||
sizeof(*sysoff))) {
|
||||
err = -EFAULT;
|
||||
sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
|
||||
if (IS_ERR(sysoff)) {
|
||||
err = PTR_ERR(sysoff);
|
||||
sysoff = NULL;
|
||||
break;
|
||||
}
|
||||
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
|
||||
|
|
|
@ -765,6 +765,8 @@ struct sctp_info {
|
|||
__u8 sctpi_s_disable_fragments;
|
||||
__u8 sctpi_s_v4mapped;
|
||||
__u8 sctpi_s_frag_interleave;
|
||||
__u32 sctpi_s_type;
|
||||
__u32 __reserved3;
|
||||
};
|
||||
|
||||
struct sctp_infox {
|
||||
|
|
|
@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
|
|||
u8 *protocol, struct flowi6 *fl6);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
|
||||
extern const struct ip6_tnl_encap_ops __rcu *
|
||||
ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
|
||||
|
||||
|
@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
|
|||
int ip6_tnl_get_iflink(const struct net_device *dev);
|
||||
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
|
|||
}
|
||||
|
||||
struct qdisc_watchdog {
|
||||
u64 last_expires;
|
||||
struct hrtimer timer;
|
||||
struct Qdisc *qdisc;
|
||||
};
|
||||
|
|
|
@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
|
|||
ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
|
||||
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
|
||||
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
|
||||
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
|
||||
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
|
||||
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
|
||||
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
|
||||
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
|
||||
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
|
||||
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
|
||||
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
|
||||
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
|
||||
|
||||
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
|
||||
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
|
||||
|
@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
|
|||
*/
|
||||
|
||||
__ETHTOOL_LINK_MODE_LAST
|
||||
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
|
||||
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
|
||||
};
|
||||
|
||||
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
|
||||
|
|
|
@ -145,6 +145,8 @@ enum {
|
|||
TCA_POLICE_PEAKRATE,
|
||||
TCA_POLICE_AVRATE,
|
||||
TCA_POLICE_RESULT,
|
||||
TCA_POLICE_TM,
|
||||
TCA_POLICE_PAD,
|
||||
__TCA_POLICE_MAX
|
||||
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
|
||||
};
|
||||
|
@ -173,7 +175,7 @@ enum {
|
|||
TCA_U32_DIVISOR,
|
||||
TCA_U32_SEL,
|
||||
TCA_U32_POLICE,
|
||||
TCA_U32_ACT,
|
||||
TCA_U32_ACT,
|
||||
TCA_U32_INDEV,
|
||||
TCA_U32_PCNT,
|
||||
TCA_U32_MARK,
|
||||
|
|
|
@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = {
|
|||
.name = "bpf",
|
||||
.mount = bpf_mount,
|
||||
.kill_sb = kill_litter_super,
|
||||
.fs_flags = FS_USERNS_MOUNT,
|
||||
};
|
||||
|
||||
MODULE_ALIAS_FS("bpf");
|
||||
|
|
|
@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
|
|||
{
|
||||
debug_object_free(timer, &hrtimer_debug_descr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
|
||||
|
||||
#else
|
||||
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
|
||||
|
|
|
@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
|
|||
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
|
||||
return;
|
||||
|
||||
/* vlan continues to inherit address of lower device */
|
||||
if (vlan_dev_inherit_address(vlandev, dev))
|
||||
goto out;
|
||||
|
||||
/* vlan address was different from the old address and is equal to
|
||||
* the new address */
|
||||
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
|
||||
|
@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
|
|||
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
|
||||
dev_uc_add(dev, vlandev->dev_addr);
|
||||
|
||||
out:
|
||||
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
|
|||
void vlan_setup(struct net_device *dev);
|
||||
int register_vlan_dev(struct net_device *dev);
|
||||
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
|
||||
bool vlan_dev_inherit_address(struct net_device *dev,
|
||||
struct net_device *real_dev);
|
||||
|
||||
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
|
||||
u16 vlan_tci)
|
||||
|
|
|
@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
|
|||
strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
|
||||
}
|
||||
|
||||
bool vlan_dev_inherit_address(struct net_device *dev,
|
||||
struct net_device *real_dev)
|
||||
{
|
||||
if (dev->addr_assign_type != NET_ADDR_STOLEN)
|
||||
return false;
|
||||
|
||||
ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
|
||||
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vlan_dev_open(struct net_device *dev)
|
||||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
|
@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev)
|
|||
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
|
||||
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
|
||||
!vlan_dev_inherit_address(dev, real_dev)) {
|
||||
err = dev_uc_add(real_dev, dev->dev_addr);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev)
|
|||
/* ipv6 shared card related stuff */
|
||||
dev->dev_id = real_dev->dev_id;
|
||||
|
||||
if (is_zero_ether_addr(dev->dev_addr))
|
||||
eth_hw_addr_inherit(dev, real_dev);
|
||||
if (is_zero_ether_addr(dev->dev_addr)) {
|
||||
ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
|
||||
dev->addr_assign_type = NET_ADDR_STOLEN;
|
||||
}
|
||||
if (is_zero_ether_addr(dev->broadcast))
|
||||
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|||
break;
|
||||
case as_addparty:
|
||||
case as_dropparty:
|
||||
sk->sk_err_soft = msg->reply;
|
||||
sk->sk_err_soft = -msg->reply;
|
||||
/* < 0 failure, otherwise ep_ref */
|
||||
clear_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
break;
|
||||
|
|
|
@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
|
|||
schedule();
|
||||
}
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
error = xchg(&sk->sk_err_soft, 0);
|
||||
error = -xchg(&sk->sk_err_soft, 0);
|
||||
out:
|
||||
release_sock(sk);
|
||||
return error;
|
||||
|
@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
|
|||
error = -EUNATCH;
|
||||
goto out;
|
||||
}
|
||||
error = xchg(&sk->sk_err_soft, 0);
|
||||
error = -xchg(&sk->sk_err_soft, 0);
|
||||
out:
|
||||
release_sock(sk);
|
||||
return error;
|
||||
|
|
|
@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
|
|||
spin_lock_irqsave(&bm_pool->lock, flags);
|
||||
if (bm_pool->buf_num == bm_pool->size) {
|
||||
pr_warn("pool already filled\n");
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return bm_pool->buf_num;
|
||||
}
|
||||
|
||||
if (buf_num + bm_pool->buf_num > bm_pool->size) {
|
||||
pr_warn("cannot allocate %d buffers for pool\n",
|
||||
buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
|
||||
pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
|
||||
buf_num, bm_pool->buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|||
hrtimer_set_expires(&t.timer, spin_until);
|
||||
|
||||
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
|
||||
if (remaining <= 0) {
|
||||
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
|
||||
return;
|
||||
}
|
||||
if (remaining <= 0)
|
||||
goto out;
|
||||
|
||||
start_time = ktime_get();
|
||||
if (remaining < 100000) {
|
||||
|
@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
|||
}
|
||||
|
||||
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
||||
out:
|
||||
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
|
||||
destroy_hrtimer_on_stack(&t.timer);
|
||||
}
|
||||
|
||||
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
||||
|
|
|
@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
|
|||
nl802154_dev_addr_policy))
|
||||
return -EINVAL;
|
||||
|
||||
if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
|
||||
!attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
|
||||
if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
|
||||
!attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
|
||||
!(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
|
||||
attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
|
|||
*/
|
||||
net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
|
||||
net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
|
||||
|
||||
/* Default values for sysctl-controlled parameters.
|
||||
* We set them here, in case sysctl is not compiled.
|
||||
*/
|
||||
net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
|
||||
net->ipv4.sysctl_ip_dynaddr = 0;
|
||||
net->ipv4.sysctl_ip_early_demux = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
|
|||
if (!net->ipv4.sysctl_local_reserved_ports)
|
||||
goto err_ports;
|
||||
|
||||
net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
|
||||
net->ipv4.sysctl_ip_dynaddr = 0;
|
||||
net->ipv4.sysctl_ip_early_demux = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ports:
|
||||
|
|
|
@ -232,6 +232,15 @@ config IPV6_GRE
|
|||
|
||||
Saying M here will produce a module called ip6_gre. If unsure, say N.
|
||||
|
||||
config IPV6_FOU
|
||||
tristate
|
||||
default NET_FOU && IPV6
|
||||
|
||||
config IPV6_FOU_TUNNEL
|
||||
tristate
|
||||
default NET_FOU_IP_TUNNELS && IPV6_FOU
|
||||
select IPV6_TUNNEL
|
||||
|
||||
config IPV6_MULTIPLE_TABLES
|
||||
bool "IPv6: Multiple Routing Tables"
|
||||
select FIB_RULES
|
||||
|
|
|
@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
|
|||
obj-$(CONFIG_IPV6_SIT) += sit.o
|
||||
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
|
||||
obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
|
||||
obj-$(CONFIG_NET_FOU) += fou6.o
|
||||
obj-$(CONFIG_IPV6_FOU) += fou6.o
|
||||
|
||||
obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
|
||||
obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
|
||||
|
|
|
@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
|
|||
}
|
||||
EXPORT_SYMBOL(gue6_build_header);
|
||||
|
||||
#ifdef CONFIG_NET_FOU_IP_TUNNELS
|
||||
#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
|
||||
|
||||
static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
|
||||
.encap_hlen = fou_encap_hlen,
|
||||
|
|
|
@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|||
fl6->daddr = p->raddr;
|
||||
fl6->flowi6_oif = p->link;
|
||||
fl6->flowlabel = 0;
|
||||
fl6->flowi6_proto = IPPROTO_GRE;
|
||||
|
||||
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
|
||||
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
|
||||
|
@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
|
|||
|
||||
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen;
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
dev->mtu -= ETH_HLEN;
|
||||
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
dev->mtu -= 8;
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
|
|||
*/
|
||||
static int l2tp_ip6_recv(struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct sock *sk;
|
||||
u32 session_id;
|
||||
u32 tunnel_id;
|
||||
|
@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Ok, this is a data packet. Lookup the session. */
|
||||
session = l2tp_session_find(&init_net, NULL, session_id);
|
||||
session = l2tp_session_find(net, NULL, session_id);
|
||||
if (session == NULL)
|
||||
goto discard;
|
||||
|
||||
|
@ -188,14 +189,14 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
goto discard;
|
||||
|
||||
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
|
||||
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
|
||||
tunnel = l2tp_tunnel_find(net, tunnel_id);
|
||||
if (tunnel != NULL)
|
||||
sk = tunnel->sock;
|
||||
else {
|
||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
|
||||
read_lock_bh(&l2tp_ip6_lock);
|
||||
sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
|
||||
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
|
||||
0, tunnel_id);
|
||||
read_unlock_bh(&l2tp_ip6_lock);
|
||||
}
|
||||
|
@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
|
||||
struct net *net = sock_net(sk);
|
||||
__be32 v4addr = 0;
|
||||
int addr_type;
|
||||
int err;
|
||||
|
@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
|
||||
err = -EADDRINUSE;
|
||||
read_lock_bh(&l2tp_ip6_lock);
|
||||
if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
|
||||
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
|
||||
sk->sk_bound_dev_if, addr->l2tp_conn_id))
|
||||
goto out_in_use;
|
||||
read_unlock_bh(&l2tp_ip6_lock);
|
||||
|
@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
|
|||
return 0;
|
||||
|
||||
drop:
|
||||
IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
|
||||
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
|
|||
break;
|
||||
|
||||
case LAPB_FRMR:
|
||||
lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
|
||||
lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
|
||||
lapb->dev, frame->pf,
|
||||
skb->data[0], skb->data[1], skb->data[2],
|
||||
skb->data[3], skb->data[4]);
|
||||
skb->data);
|
||||
lapb_establish_data_link(lapb);
|
||||
lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
|
||||
lapb_requeue_frames(lapb);
|
||||
|
|
|
@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
|
|||
}
|
||||
}
|
||||
|
||||
lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
|
||||
lapb->dev, lapb->state,
|
||||
skb->data[0], skb->data[1], skb->data[2]);
|
||||
lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
|
||||
|
||||
if (!lapb_data_transmit(lapb, skb))
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
|
|||
{
|
||||
frame->type = LAPB_ILLEGAL;
|
||||
|
||||
lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
|
||||
lapb->dev, lapb->state,
|
||||
skb->data[0], skb->data[1], skb->data[2]);
|
||||
lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
|
||||
|
||||
/* We always need to look at 2 bytes, sometimes we need
|
||||
* to look at 3 and those cases are handled below.
|
||||
|
@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
|
|||
dptr++;
|
||||
*dptr++ = lapb->frmr_type;
|
||||
|
||||
lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
|
||||
lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
|
||||
lapb->dev, lapb->state,
|
||||
skb->data[1], skb->data[2], skb->data[3],
|
||||
skb->data[4], skb->data[5]);
|
||||
&skb->data[1]);
|
||||
} else {
|
||||
dptr = skb_put(skb, 4);
|
||||
*dptr++ = LAPB_FRMR;
|
||||
|
@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
|
|||
dptr++;
|
||||
*dptr++ = lapb->frmr_type;
|
||||
|
||||
lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
|
||||
lapb->dev, lapb->state, skb->data[1],
|
||||
skb->data[2], skb->data[3]);
|
||||
lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
|
||||
lapb->dev, lapb->state, &skb->data[1]);
|
||||
}
|
||||
|
||||
lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
|
||||
|
|
|
@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
|
|||
return !!key->eth.type;
|
||||
}
|
||||
|
||||
static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
|
||||
__be16 ethertype)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
||||
__be16 diff[] = { ~(hdr->h_proto), ethertype };
|
||||
|
||||
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
|
||||
~skb->csum);
|
||||
}
|
||||
|
||||
hdr->h_proto = ethertype;
|
||||
}
|
||||
|
||||
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
||||
const struct ovs_action_push_mpls *mpls)
|
||||
{
|
||||
__be32 *new_mpls_lse;
|
||||
struct ethhdr *hdr;
|
||||
|
||||
/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
|
||||
if (skb->encapsulation)
|
||||
|
@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
|||
|
||||
skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
|
||||
|
||||
hdr = eth_hdr(skb);
|
||||
hdr->h_proto = mpls->mpls_ethertype;
|
||||
|
||||
update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
|
||||
if (!skb->inner_protocol)
|
||||
skb_set_inner_protocol(skb, skb->protocol);
|
||||
skb->protocol = mpls->mpls_ethertype;
|
||||
|
@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
|||
* field correctly in the presence of VLAN tags.
|
||||
*/
|
||||
hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
|
||||
hdr->h_proto = ethertype;
|
||||
update_ethertype(skb, hdr, ethertype);
|
||||
if (eth_p_mpls(skb->protocol))
|
||||
skb->protocol = ethertype;
|
||||
|
||||
|
|
|
@ -239,6 +239,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
|
|||
police->tcfp_t_c = ktime_get_ns();
|
||||
police->tcf_index = parm->index ? parm->index :
|
||||
tcf_hash_new_index(tn);
|
||||
police->tcf_tm.install = jiffies;
|
||||
police->tcf_tm.lastuse = jiffies;
|
||||
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
|
||||
spin_lock_bh(&hinfo->lock);
|
||||
hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
|
||||
|
@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
|
|||
spin_lock(&police->tcf_lock);
|
||||
|
||||
bstats_update(&police->tcf_bstats, skb);
|
||||
tcf_lastuse_update(&police->tcf_tm);
|
||||
|
||||
if (police->tcfp_ewma_rate &&
|
||||
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
|
||||
|
@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
|
|||
.refcnt = police->tcf_refcnt - ref,
|
||||
.bindcnt = police->tcf_bindcnt - bind,
|
||||
};
|
||||
struct tcf_t t;
|
||||
|
||||
if (police->rate_present)
|
||||
psched_ratecfg_getrate(&opt.rate, &police->rate);
|
||||
|
@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
|
|||
if (police->tcfp_ewma_rate &&
|
||||
nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
|
||||
goto nla_put_failure;
|
||||
|
||||
t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
|
||||
t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
|
||||
t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
|
||||
if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
|
@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
|
|||
if (throttle)
|
||||
qdisc_throttled(wd->qdisc);
|
||||
|
||||
if (wd->last_expires == expires)
|
||||
return;
|
||||
|
||||
wd->last_expires = expires;
|
||||
hrtimer_start(&wd->timer,
|
||||
ns_to_ktime(expires),
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
|
|
|
@ -928,17 +928,10 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
|
|||
}
|
||||
}
|
||||
qdisc_qstats_overlimit(sch);
|
||||
if (likely(next_event > q->now)) {
|
||||
if (!test_bit(__QDISC_STATE_DEACTIVATED,
|
||||
&qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
|
||||
ktime_t time = ns_to_ktime(next_event);
|
||||
qdisc_throttled(q->watchdog.qdisc);
|
||||
hrtimer_start(&q->watchdog.timer, time,
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
} else {
|
||||
if (likely(next_event > q->now))
|
||||
qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
|
||||
else
|
||||
schedule_work(&q->work);
|
||||
}
|
||||
fin:
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
|
|||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
|
||||
if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
|
||||
goto next;
|
||||
|
||||
if (r->sdiag_family != AF_UNSPEC &&
|
||||
sk->sk_family != r->sdiag_family)
|
||||
goto next;
|
||||
|
|
|
@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
|
|||
info->sctpi_s_disable_fragments = sp->disable_fragments;
|
||||
info->sctpi_s_v4mapped = sp->v4mapped;
|
||||
info->sctpi_s_frag_interleave = sp->frag_interleave;
|
||||
info->sctpi_s_type = sp->type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
|
|||
struct nlattr **attrs)
|
||||
{
|
||||
struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
|
||||
int err;
|
||||
|
||||
nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
|
||||
NULL);
|
||||
if (!attrs[TIPC_NLA_BEARER])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
|
||||
attrs[TIPC_NLA_BEARER], NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
|
||||
nla_data(bearer[TIPC_NLA_BEARER_NAME]),
|
||||
|
@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
|||
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
|
||||
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
|
||||
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
|
||||
int err;
|
||||
|
||||
nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
|
||||
if (!attrs[TIPC_NLA_LINK])
|
||||
return -EINVAL;
|
||||
|
||||
nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
|
||||
NULL);
|
||||
err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
|
||||
NULL);
|
||||
if (!link[TIPC_NLA_LINK_PROP])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
|
||||
link[TIPC_NLA_LINK_PROP], NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!link[TIPC_NLA_LINK_STATS])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
|
||||
link[TIPC_NLA_LINK_STATS], NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
name = (char *)TLV_DATA(msg->req);
|
||||
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
|
||||
|
@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
|
|||
{
|
||||
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
|
||||
struct tipc_link_info link_info;
|
||||
int err;
|
||||
|
||||
nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
|
||||
if (!attrs[TIPC_NLA_LINK])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
|
||||
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
|
||||
|
@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
|
|||
u32 node, depth, type, lowbound, upbound;
|
||||
static const char * const scope_str[] = {"", " zone", " cluster",
|
||||
" node"};
|
||||
int err;
|
||||
|
||||
nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
|
||||
attrs[TIPC_NLA_NAME_TABLE], NULL);
|
||||
if (!attrs[TIPC_NLA_NAME_TABLE])
|
||||
return -EINVAL;
|
||||
|
||||
nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
|
||||
NULL);
|
||||
err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
|
||||
attrs[TIPC_NLA_NAME_TABLE], NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
|
||||
nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
|
||||
|
||||
|
@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
|
|||
{
|
||||
u32 type, lower, upper;
|
||||
struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
|
||||
int err;
|
||||
|
||||
nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
|
||||
if (!attrs[TIPC_NLA_PUBL])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
|
||||
lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
|
||||
|
@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
|
|||
u32 sock_ref;
|
||||
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
|
||||
|
||||
nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
|
||||
if (!attrs[TIPC_NLA_SOCK])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
|
||||
tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
|
||||
|
@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
|
|||
struct nlattr **attrs)
|
||||
{
|
||||
struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
|
||||
int err;
|
||||
|
||||
nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
|
||||
NULL);
|
||||
if (!attrs[TIPC_NLA_MEDIA])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
|
||||
nla_data(media[TIPC_NLA_MEDIA_NAME]),
|
||||
|
@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
|
|||
{
|
||||
struct tipc_node_info node_info;
|
||||
struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
|
||||
int err;
|
||||
|
||||
nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
|
||||
if (!attrs[TIPC_NLA_NODE])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
|
||||
node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
|
||||
|
@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
|
|||
{
|
||||
__be32 id;
|
||||
struct nlattr *net[TIPC_NLA_NET_MAX + 1];
|
||||
int err;
|
||||
|
||||
if (!attrs[TIPC_NLA_NET])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
|
||||
id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
|
||||
|
||||
return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
|
||||
|
|
Loading…
Reference in New Issue
Block a user