forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) sunhme driver lacks DMA mapping error checks, based upon a report by Meelis Roos. 2) Fix memory leak in mvpp2 driver, from Sudip Mukherjee. 3) DMA memory allocation sizes are wrong in systemport ethernet driver, fix from Florian Fainelli. 4) Fix use after free in mac80211 defragmentation code, from Johannes Berg. 5) Some networking uapi headers missing from Kbuild file, from Stephen Hemminger. 6) TUN driver gets csum_start offset wrong when VLAN accel is enabled, and macvtap has a similar bug, from Herbert Xu. 7) Adjust several tunneling drivers to set dev->iflink after registry, because registry sets that to -1 overwriting whatever we did. From Steffen Klassert. 8) Geneve forgets to set inner tunneling type, causing GSO segmentation to fail on some NICs. From Jesse Gross. 9) Fix several locking bugs in stmmac driver, from Fabrice Gasnier and Giuseppe CAVALLARO. 10) Fix spurious timeouts with NewReno on low traffic connections, from Marcelo Leitner. 11) Fix descriptor updates in enic driver, from Govindarajulu Varadarajan. 12) PPP calls bpf_prog_create() with locks held, which isn't kosher. Fix from Takashi Iwai. 13) Fix NULL deref in SCTP with malformed INIT packets, from Daniel Borkmann. 14) psock_fanout selftest accesses past the end of the mmap ring, fix from Shuah Khan. 15) Fix PTP timestamping for VLAN packets, from Richard Cochran. 16) netlink_unbind() calls in netlink pass wrong initial argument, from Hiroaki SHIMODA. 17) vxlan socket reuse accidently reuses a socket when the address family is different, so we have to explicitly check this, from Marcelo Lietner. 18) Fix missing include in nft_reject_bridge.c breaking the build on ppc and other architectures, from Guenter Roeck. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits) vxlan: Do not reuse sockets for a different address family smsc911x: power-up phydev before doing a software reset. lib: rhashtable - Remove weird non-ASCII characters from comments net/smsc911x: Fix delays in the PHY enable/disable routines net/smsc911x: Fix rare soft reset timeout issue due to PHY power-down mode netlink: Properly unbind in error conditions. net: ptp: fix time stamp matching logic for VLAN packets. cxgb4 : dcb open-lldp interop fixes selftests/net: psock_fanout seg faults in sock_fanout_read_ring() net: bcmgenet: apply MII configuration in bcmgenet_open() net: bcmgenet: connect and disconnect from the PHY state machine net: qualcomm: Fix dependency ixgbe: phy: fix uninitialized status in ixgbe_setup_phy_link_tnx net: phy: Correctly handle MII ioctl which changes autonegotiation. ipv6: fix IPV6_PKTINFO with v4 mapped net: sctp: fix memory leak in auth key management net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet net: ppp: Don't call bpf_prog_create() in ppp_lock net/mlx4_en: Advertize encapsulation offloads features only when VXLAN tunnel is set cxgb4 : Fix bug in DCB app deletion ...
This commit is contained in:
commit
5cf5203704
|
@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
|
|||
0 - disabled
|
||||
1 - enabled
|
||||
|
||||
fwmark_reflect - BOOLEAN
|
||||
Controls the fwmark of kernel-generated IPv4 reply packets that are not
|
||||
associated with a socket for example, TCP RSTs or ICMP echo replies).
|
||||
If unset, these packets have a fwmark of zero. If set, they have the
|
||||
fwmark of the packet they are replying to.
|
||||
Default: 0
|
||||
|
||||
route/max_size - INTEGER
|
||||
Maximum number of routes allowed in the kernel. Increase
|
||||
this when using large numbers of interfaces and/or routes.
|
||||
|
@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
|
|||
proxy_ndp - BOOLEAN
|
||||
Do proxy ndp.
|
||||
|
||||
fwmark_reflect - BOOLEAN
|
||||
Controls the fwmark of kernel-generated IPv6 reply packets that are not
|
||||
associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
|
||||
If unset, these packets have a fwmark of zero. If set, they have the
|
||||
fwmark of the packet they are replying to.
|
||||
Default: 0
|
||||
|
||||
conf/interface/*:
|
||||
Change special settings per interface.
|
||||
|
||||
|
|
|
@ -599,7 +599,7 @@ menet: ethernet@17020000 {
|
|||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x17020000 0x0 0xd100>,
|
||||
<0x0 0X17030000 0x0 0X400>,
|
||||
<0x0 0X17030000 0x0 0Xc300>,
|
||||
<0x0 0X10000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0x3c 0x4>;
|
||||
|
@ -624,9 +624,9 @@ menetphy: menetphy@3 {
|
|||
sgenet0: ethernet@1f210000 {
|
||||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x1f210000 0x0 0x10000>,
|
||||
<0x0 0x1f200000 0x0 0X10000>,
|
||||
<0x0 0x1B000000 0x0 0X20000>;
|
||||
reg = <0x0 0x1f210000 0x0 0xd100>,
|
||||
<0x0 0x1f200000 0x0 0Xc300>,
|
||||
<0x0 0x1B000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0xA0 0x4>;
|
||||
dma-coherent;
|
||||
|
@ -639,7 +639,7 @@ xgenet: ethernet@1f610000 {
|
|||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x1f610000 0x0 0xd100>,
|
||||
<0x0 0x1f600000 0x0 0X400>,
|
||||
<0x0 0x1f600000 0x0 0Xc300>,
|
||||
<0x0 0x18000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0x60 0x4>;
|
||||
|
|
|
@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
|
|||
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
|
||||
{
|
||||
if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
|
||||
return false;
|
||||
|
||||
if (ioread32(p->ring_csr_addr + SRST_ADDR))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (!xgene_ring_mgr_init(pdata))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(pdata->clk);
|
||||
clk_disable_unprepare(pdata->clk);
|
||||
clk_prepare_enable(pdata->clk);
|
||||
|
@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
|||
val |= SCAN_AUTO_INCR;
|
||||
MGMT_CLOCK_SEL_SET(&val, 1);
|
||||
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
|
||||
|
|
|
@ -104,6 +104,9 @@ enum xgene_enet_rm {
|
|||
#define BLOCK_ETH_MAC_OFFSET 0x0000
|
||||
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
|
||||
|
||||
#define CLKEN_ADDR 0xc208
|
||||
#define SRST_ADDR 0xc200
|
||||
|
||||
#define MAC_ADDR_REG_OFFSET 0x00
|
||||
#define MAC_COMMAND_REG_OFFSET 0x04
|
||||
#define MAC_WRITE_REG_OFFSET 0x08
|
||||
|
@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
|
|||
|
||||
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
|
||||
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
|
||||
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
|
||||
|
||||
extern struct xgene_mac_ops xgene_gmac_ops;
|
||||
extern struct xgene_port_ops xgene_gport_ops;
|
||||
|
|
|
@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
struct device *dev = ndev_to_dev(ndev);
|
||||
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
|
||||
struct xgene_enet_desc_ring *buf_pool = NULL;
|
||||
u8 cpu_bufnum = 0, eth_bufnum = 0;
|
||||
u8 bp_bufnum = 0x20;
|
||||
u16 ring_id, ring_num = 0;
|
||||
u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
|
||||
u8 bp_bufnum = START_BP_BUFNUM;
|
||||
u16 ring_id, ring_num = START_RING_NUM;
|
||||
int ret;
|
||||
|
||||
/* allocate rx descriptor ring */
|
||||
|
@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
|
|||
u16 dst_ring_num;
|
||||
int ret;
|
||||
|
||||
pdata->port_ops->reset(pdata);
|
||||
ret = pdata->port_ops->reset(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xgene_enet_create_desc_rings(ndev);
|
||||
if (ret) {
|
||||
|
@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
|||
|
||||
return ret;
|
||||
err:
|
||||
unregister_netdev(ndev);
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -38,6 +38,9 @@
|
|||
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
|
||||
#define NUM_PKT_BUF 64
|
||||
#define NUM_BUFPOOL 32
|
||||
#define START_ETH_BUFNUM 2
|
||||
#define START_BP_BUFNUM 0x22
|
||||
#define START_RING_NUM 8
|
||||
|
||||
#define PHY_POLL_LINK_ON (10 * HZ)
|
||||
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
|
||||
|
@ -83,7 +86,7 @@ struct xgene_mac_ops {
|
|||
};
|
||||
|
||||
struct xgene_port_ops {
|
||||
void (*reset)(struct xgene_enet_pdata *pdata);
|
||||
int (*reset)(struct xgene_enet_pdata *pdata);
|
||||
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
|
||||
u32 dst_ring_num, u16 bufpool_id);
|
||||
void (*shutdown)(struct xgene_enet_pdata *pdata);
|
||||
|
|
|
@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
|
|||
xgene_sgmac_rxtx(p, TX_EN, false);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *p)
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *p)
|
||||
{
|
||||
if (!xgene_ring_mgr_init(p))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(p->clk);
|
||||
clk_disable_unprepare(p->clk);
|
||||
clk_prepare_enable(p->clk);
|
||||
|
||||
xgene_enet_ecc_init(p);
|
||||
xgene_enet_config_ring_if_assoc(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
|
||||
|
|
|
@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
|
|||
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
if (!xgene_ring_mgr_init(pdata))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(pdata->clk);
|
||||
clk_disable_unprepare(pdata->clk);
|
||||
clk_prepare_enable(pdata->clk);
|
||||
|
||||
xgene_enet_ecc_init(pdata);
|
||||
xgene_enet_config_ring_if_assoc(pdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
|
||||
|
|
|
@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
|
|||
/* We just need one DMA descriptor which is DMA-able, since writing to
|
||||
* the port will allocate a new descriptor in its internal linked-list
|
||||
*/
|
||||
p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
|
||||
p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
|
||||
GFP_KERNEL);
|
||||
if (!p) {
|
||||
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
|
||||
return -ENOMEM;
|
||||
|
@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
|||
if (!(reg & TDMA_DISABLED))
|
||||
netdev_warn(priv->netdev, "TDMA not stopped!\n");
|
||||
|
||||
/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
|
||||
* fail, so by checking this pointer we know whether the TX ring was
|
||||
* fully initialized or not.
|
||||
*/
|
||||
if (!ring->cbs)
|
||||
return;
|
||||
|
||||
napi_disable(&ring->napi);
|
||||
netif_napi_del(&ring->napi);
|
||||
|
||||
|
@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
|||
ring->cbs = NULL;
|
||||
|
||||
if (ring->desc_dma) {
|
||||
dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
|
||||
dma_free_coherent(kdev, sizeof(struct dma_desc),
|
||||
ring->desc_cpu, ring->desc_dma);
|
||||
ring->desc_dma = 0;
|
||||
}
|
||||
ring->size = 0;
|
||||
|
|
|
@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
|
|||
goto err_irq0;
|
||||
}
|
||||
|
||||
/* Re-configure the port multiplexer towards the PHY device */
|
||||
bcmgenet_mii_config(priv->dev, false);
|
||||
|
||||
phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
|
||||
priv->phy_interface);
|
||||
|
||||
bcmgenet_netif_start(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
|
|||
|
||||
bcmgenet_netif_stop(dev);
|
||||
|
||||
/* Really kill the PHY state machine and disconnect from it */
|
||||
phy_disconnect(priv->phydev);
|
||||
|
||||
/* Disable MAC receive */
|
||||
umac_enable_set(priv, CMD_RX_EN, false);
|
||||
|
||||
|
@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
|
|||
|
||||
phy_init_hw(priv->phydev);
|
||||
/* Speed settings must be restored */
|
||||
bcmgenet_mii_config(priv->dev);
|
||||
bcmgenet_mii_config(priv->dev, false);
|
||||
|
||||
/* disable ethernet MAC while updating its registers */
|
||||
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
|
||||
|
|
|
@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
|
|||
|
||||
/* MDIO routines */
|
||||
int bcmgenet_mii_init(struct net_device *dev);
|
||||
int bcmgenet_mii_config(struct net_device *dev);
|
||||
int bcmgenet_mii_config(struct net_device *dev, bool init);
|
||||
void bcmgenet_mii_exit(struct net_device *dev);
|
||||
void bcmgenet_mii_reset(struct net_device *dev);
|
||||
void bcmgenet_mii_setup(struct net_device *dev);
|
||||
|
||||
/* Wake-on-LAN routines */
|
||||
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
|
||||
|
|
|
@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
|
|||
/* setup netdev link state when PHY link status change and
|
||||
* update UMAC and RGMII block when link up
|
||||
*/
|
||||
static void bcmgenet_mii_setup(struct net_device *dev)
|
||||
void bcmgenet_mii_setup(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
|
@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
|||
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
|
||||
}
|
||||
|
||||
int bcmgenet_mii_config(struct net_device *dev)
|
||||
int bcmgenet_mii_config(struct net_device *dev, bool init)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
|
@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (init)
|
||||
dev_info(kdev, "configuring instance for %s\n", phy_name);
|
||||
|
||||
return 0;
|
||||
|
@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
|
|||
* PHY speed which is needed for bcmgenet_mii_config() to configure
|
||||
* things appropriately.
|
||||
*/
|
||||
ret = bcmgenet_mii_config(dev);
|
||||
ret = bcmgenet_mii_config(dev, true);
|
||||
if (ret) {
|
||||
phy_disconnect(priv->phydev);
|
||||
return ret;
|
||||
|
|
|
@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
|
|||
app.protocol = dcb->app_priority[i].protocolid;
|
||||
|
||||
if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
|
||||
app.priority = dcb->app_priority[i].user_prio_map;
|
||||
app.selector = dcb->app_priority[i].sel_field + 1;
|
||||
err = dcb_ieee_setapp(dev, &app);
|
||||
err = dcb_ieee_delapp(dev, &app);
|
||||
} else {
|
||||
app.selector = !!(dcb->app_priority[i].sel_field);
|
||||
err = dcb_setapp(dev, &app);
|
||||
|
@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
|
|||
case CXGB4_DCB_INPUT_FW_ENABLED: {
|
||||
/* we're going to use Firmware DCB */
|
||||
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
|
||||
dcb->supported = CXGB4_DCBX_FW_SUPPORT;
|
||||
dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
|
||||
if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
|
||||
dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
|
||||
else
|
||||
dcb->supported |= DCB_CAP_DCBX_VER_CEE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -436,6 +441,7 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
|
|||
*up_tc_map = (1 << tc);
|
||||
|
||||
/* prio_type is link strict */
|
||||
if (*pgid != 0xF)
|
||||
*prio_type = 0x2;
|
||||
}
|
||||
|
||||
|
@ -443,7 +449,9 @@ static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
|
|||
u8 *prio_type, u8 *pgid, u8 *bw_per,
|
||||
u8 *up_tc_map)
|
||||
{
|
||||
return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
|
||||
/* tc 0 is written at MSB position */
|
||||
return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
|
||||
up_tc_map, 1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
|
|||
u8 *prio_type, u8 *pgid, u8 *bw_per,
|
||||
u8 *up_tc_map)
|
||||
{
|
||||
return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
|
||||
/* tc 0 is written at MSB position */
|
||||
return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
|
||||
up_tc_map, 0);
|
||||
}
|
||||
|
||||
static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
|
||||
|
@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
|
|||
struct fw_port_cmd pcmd;
|
||||
struct port_info *pi = netdev2pinfo(dev);
|
||||
struct adapter *adap = pi->adapter;
|
||||
int fw_tc = 7 - tc;
|
||||
u32 _pgid;
|
||||
int err;
|
||||
|
||||
|
@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
|
|||
}
|
||||
|
||||
_pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
|
||||
_pgid &= ~(0xF << (tc * 4));
|
||||
_pgid |= pgid << (tc * 4);
|
||||
_pgid &= ~(0xF << (fw_tc * 4));
|
||||
_pgid |= pgid << (fw_tc * 4);
|
||||
pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
|
||||
|
||||
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
|
||||
|
@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
|
|||
priority >= CXGB4_MAX_PRIORITY)
|
||||
*pfccfg = 0;
|
||||
else
|
||||
*pfccfg = (pi->dcb.pfcen >> priority) & 1;
|
||||
*pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
|
||||
}
|
||||
|
||||
/* Enable/disable Priority Pause Frames for the specified Traffic Class
|
||||
|
@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
|
|||
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
|
||||
|
||||
if (pfccfg)
|
||||
pcmd.u.dcb.pfc.pfcen |= (1 << priority);
|
||||
pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
|
||||
else
|
||||
pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
|
||||
pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
|
||||
|
||||
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
|
||||
if (err != FW_PORT_DCB_CFG_SUCCESS) {
|
||||
|
|
|
@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
|
|||
int t4_sge_init(struct adapter *adap)
|
||||
{
|
||||
struct sge *s = &adap->sge;
|
||||
u32 sge_control, sge_conm_ctrl;
|
||||
u32 sge_control, sge_control2, sge_conm_ctrl;
|
||||
unsigned int ingpadboundary, ingpackboundary;
|
||||
int ret, egress_threshold;
|
||||
|
||||
/*
|
||||
|
@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
|
|||
sge_control = t4_read_reg(adap, SGE_CONTROL);
|
||||
s->pktshift = PKTSHIFT_GET(sge_control);
|
||||
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
|
||||
s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
* separately. The actual Ingress Packet Data alignment boundary
|
||||
* within Packed Buffer Mode is the maximum of these two
|
||||
* specifications.
|
||||
*/
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
|
||||
X_INGPADBOUNDARY_SHIFT);
|
||||
if (is_t4(adap->params.chip)) {
|
||||
s->fl_align = ingpadboundary;
|
||||
} else {
|
||||
/* T5 has a different interpretation of one of the PCIe Packing
|
||||
* Boundary values.
|
||||
*/
|
||||
sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
|
||||
ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
|
||||
if (ingpackboundary == INGPACKBOUNDARY_16B_X)
|
||||
ingpackboundary = 16;
|
||||
else
|
||||
ingpackboundary = 1 << (ingpackboundary +
|
||||
INGPACKBOUNDARY_SHIFT_X);
|
||||
|
||||
s->fl_align = max(ingpadboundary, ingpackboundary);
|
||||
}
|
||||
|
||||
if (adap->flags & USING_SOFT_PARAMS)
|
||||
ret = t4_sge_init_soft(adap);
|
||||
|
|
|
@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
|
|||
HOSTPAGESIZEPF6(sge_hps) |
|
||||
HOSTPAGESIZEPF7(sge_hps));
|
||||
|
||||
if (is_t4(adap->params.chip)) {
|
||||
t4_set_reg_field(adap, SGE_CONTROL,
|
||||
INGPADBOUNDARY_MASK |
|
||||
EGRSTATUSPAGESIZE_MASK,
|
||||
INGPADBOUNDARY(fl_align_log - 5) |
|
||||
EGRSTATUSPAGESIZE(stat_len != 64));
|
||||
|
||||
} else {
|
||||
/* T5 introduced the separation of the Free List Padding and
|
||||
* Packing Boundaries. Thus, we can select a smaller Padding
|
||||
* Boundary to avoid uselessly chewing up PCIe Link and Memory
|
||||
* Bandwidth, and use a Packing Boundary which is large enough
|
||||
* to avoid false sharing between CPUs, etc.
|
||||
*
|
||||
* For the PCI Link, the smaller the Padding Boundary the
|
||||
* better. For the Memory Controller, a smaller Padding
|
||||
* Boundary is better until we cross under the Memory Line
|
||||
* Size (the minimum unit of transfer to/from Memory). If we
|
||||
* have a Padding Boundary which is smaller than the Memory
|
||||
* Line Size, that'll involve a Read-Modify-Write cycle on the
|
||||
* Memory Controller which is never good. For T5 the smallest
|
||||
* Padding Boundary which we can select is 32 bytes which is
|
||||
* larger than any known Memory Controller Line Size so we'll
|
||||
* use that.
|
||||
*
|
||||
* T5 has a different interpretation of the "0" value for the
|
||||
* Packing Boundary. This corresponds to 16 bytes instead of
|
||||
* the expected 32 bytes. We never have a Packing Boundary
|
||||
* less than 32 bytes so we can't use that special value but
|
||||
* on the other hand, if we wanted 32 bytes, the best we can
|
||||
* really do is 64 bytes.
|
||||
*/
|
||||
if (fl_align <= 32) {
|
||||
fl_align = 64;
|
||||
fl_align_log = 6;
|
||||
}
|
||||
t4_set_reg_field(adap, SGE_CONTROL,
|
||||
INGPADBOUNDARY_MASK |
|
||||
EGRSTATUSPAGESIZE_MASK,
|
||||
INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
|
||||
EGRSTATUSPAGESIZE(stat_len != 64));
|
||||
t4_set_reg_field(adap, SGE_CONTROL2_A,
|
||||
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
|
||||
INGPACKBOUNDARY_V(fl_align_log -
|
||||
INGPACKBOUNDARY_SHIFT_X));
|
||||
}
|
||||
/*
|
||||
* Adjust various SGE Free List Host Buffer Sizes.
|
||||
*
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
#define X_INGPADBOUNDARY_SHIFT 5
|
||||
|
||||
#define SGE_CONTROL 0x1008
|
||||
#define SGE_CONTROL2_A 0x1124
|
||||
#define DCASYSTYPE 0x00080000U
|
||||
#define RXPKTCPLMODE_MASK 0x00040000U
|
||||
#define RXPKTCPLMODE_SHIFT 18
|
||||
|
@ -106,6 +107,7 @@
|
|||
#define PKTSHIFT_SHIFT 10
|
||||
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
|
||||
#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
|
||||
#define INGPCIEBOUNDARY_32B_X 0
|
||||
#define INGPCIEBOUNDARY_MASK 0x00000380U
|
||||
#define INGPCIEBOUNDARY_SHIFT 7
|
||||
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
|
||||
|
@ -114,6 +116,14 @@
|
|||
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
|
||||
#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
|
||||
>> INGPADBOUNDARY_SHIFT)
|
||||
#define INGPACKBOUNDARY_16B_X 0
|
||||
#define INGPACKBOUNDARY_SHIFT_X 5
|
||||
|
||||
#define INGPACKBOUNDARY_S 16
|
||||
#define INGPACKBOUNDARY_M 0x7U
|
||||
#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
|
||||
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
|
||||
& INGPACKBOUNDARY_M)
|
||||
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
|
||||
#define EGRPCIEBOUNDARY_SHIFT 1
|
||||
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
|
||||
|
|
|
@ -299,6 +299,14 @@ struct sge {
|
|||
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
|
||||
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
|
||||
|
||||
/* Decoded Adapter Parameters.
|
||||
*/
|
||||
u32 fl_pg_order; /* large page allocation size */
|
||||
u32 stat_len; /* length of status page at ring end */
|
||||
u32 pktshift; /* padding between CPL & packet data */
|
||||
u32 fl_align; /* response queue message alignment */
|
||||
u32 fl_starve_thres; /* Free List starvation threshold */
|
||||
|
||||
/*
|
||||
* Reverse maps from Absolute Queue IDs to associated queue pointers.
|
||||
* The absolute Queue IDs are in a compact range which start at a
|
||||
|
|
|
@ -50,14 +50,6 @@
|
|||
#include "../cxgb4/t4fw_api.h"
|
||||
#include "../cxgb4/t4_msg.h"
|
||||
|
||||
/*
|
||||
* Decoded Adapter Parameters.
|
||||
*/
|
||||
static u32 FL_PG_ORDER; /* large page allocation size */
|
||||
static u32 STAT_LEN; /* length of status page at ring end */
|
||||
static u32 PKTSHIFT; /* padding between CPL and packet data */
|
||||
static u32 FL_ALIGN; /* response queue message alignment */
|
||||
|
||||
/*
|
||||
* Constants ...
|
||||
*/
|
||||
|
@ -101,12 +93,6 @@ enum {
|
|||
TX_QCHECK_PERIOD = (HZ / 2),
|
||||
MAX_TIMER_TX_RECLAIM = 100,
|
||||
|
||||
/*
|
||||
* An FL with <= FL_STARVE_THRES buffers is starving and a periodic
|
||||
* timer will attempt to refill it.
|
||||
*/
|
||||
FL_STARVE_THRES = 4,
|
||||
|
||||
/*
|
||||
* Suspend an Ethernet TX queue with fewer available descriptors than
|
||||
* this. We always want to have room for a maximum sized packet:
|
||||
|
@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
|
|||
|
||||
/**
|
||||
* fl_starving - return whether a Free List is starving.
|
||||
* @adapter: pointer to the adapter
|
||||
* @fl: the Free List
|
||||
*
|
||||
* Tests specified Free List to see whether the number of buffers
|
||||
* available to the hardware has falled below our "starvation"
|
||||
* threshold.
|
||||
*/
|
||||
static inline bool fl_starving(const struct sge_fl *fl)
|
||||
static inline bool fl_starving(const struct adapter *adapter,
|
||||
const struct sge_fl *fl)
|
||||
{
|
||||
return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
|
||||
const struct sge *s = &adapter->sge;
|
||||
|
||||
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
|
|||
|
||||
/**
|
||||
* get_buf_size - return the size of an RX Free List buffer.
|
||||
* @adapter: pointer to the associated adapter
|
||||
* @sdesc: pointer to the software buffer descriptor
|
||||
*/
|
||||
static inline int get_buf_size(const struct rx_sw_desc *sdesc)
|
||||
static inline int get_buf_size(const struct adapter *adapter,
|
||||
const struct rx_sw_desc *sdesc)
|
||||
{
|
||||
return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
|
||||
? (PAGE_SIZE << FL_PG_ORDER)
|
||||
: PAGE_SIZE;
|
||||
const struct sge *s = &adapter->sge;
|
||||
|
||||
return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
|
||||
? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
|
|||
|
||||
if (is_buf_mapped(sdesc))
|
||||
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
|
||||
get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
|
||||
get_buf_size(adapter, sdesc),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
put_page(sdesc->page);
|
||||
sdesc->page = NULL;
|
||||
if (++fl->cidx == fl->size)
|
||||
|
@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
|
|||
|
||||
if (is_buf_mapped(sdesc))
|
||||
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
|
||||
get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
|
||||
get_buf_size(adapter, sdesc),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
sdesc->page = NULL;
|
||||
if (++fl->cidx == fl->size)
|
||||
fl->cidx = 0;
|
||||
|
@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
|
|||
static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
||||
int n, gfp_t gfp)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
struct page *page;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int cred = fl->avail;
|
||||
|
@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
* If we don't support large pages, drop directly into the small page
|
||||
* allocation code.
|
||||
*/
|
||||
if (FL_PG_ORDER == 0)
|
||||
if (s->fl_pg_order == 0)
|
||||
goto alloc_small_pages;
|
||||
|
||||
while (n) {
|
||||
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
FL_PG_ORDER);
|
||||
s->fl_pg_order);
|
||||
if (unlikely(!page)) {
|
||||
/*
|
||||
* We've failed inour attempt to allocate a "large
|
||||
|
@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
fl->large_alloc_failed++;
|
||||
break;
|
||||
}
|
||||
poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
|
||||
poison_buf(page, PAGE_SIZE << s->fl_pg_order);
|
||||
|
||||
dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
|
||||
PAGE_SIZE << FL_PG_ORDER,
|
||||
PAGE_SIZE << s->fl_pg_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
|
||||
/*
|
||||
|
@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
* because DMA mapping resources are typically
|
||||
* critical resources once they become scarse.
|
||||
*/
|
||||
__free_pages(page, FL_PG_ORDER);
|
||||
__free_pages(page, s->fl_pg_order);
|
||||
goto out;
|
||||
}
|
||||
dma_addr |= RX_LARGE_BUF;
|
||||
|
@ -693,7 +689,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
fl->pend_cred += cred;
|
||||
ring_fl_db(adapter, fl);
|
||||
|
||||
if (unlikely(fl_starving(fl))) {
|
||||
if (unlikely(fl_starving(adapter, fl))) {
|
||||
smp_wmb();
|
||||
set_bit(fl->cntxt_id, adapter->sge.starving_fl);
|
||||
}
|
||||
|
@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
|
|||
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
||||
const struct cpl_rx_pkt *pkt)
|
||||
{
|
||||
struct adapter *adapter = rxq->rspq.adapter;
|
||||
struct sge *s = &adapter->sge;
|
||||
int ret;
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
|||
return;
|
||||
}
|
||||
|
||||
copy_frags(skb, gl, PKTSHIFT);
|
||||
skb->len = gl->tot_len - PKTSHIFT;
|
||||
copy_frags(skb, gl, s->pktshift);
|
||||
skb->len = gl->tot_len - s->pktshift;
|
||||
skb->data_len = skb->len;
|
||||
skb->truesize += skb->data_len;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
|
||||
(rspq->netdev->features & NETIF_F_RXCSUM);
|
||||
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
|
||||
struct adapter *adapter = rspq->adapter;
|
||||
struct sge *s = &adapter->sge;
|
||||
|
||||
/*
|
||||
* If this is a good TCP packet and we have Generic Receive Offload
|
||||
|
@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
rxq->stats.rx_drops++;
|
||||
return 0;
|
||||
}
|
||||
__skb_pull(skb, PKTSHIFT);
|
||||
__skb_pull(skb, s->pktshift);
|
||||
skb->protocol = eth_type_trans(skb, rspq->netdev);
|
||||
skb_record_rx_queue(skb, rspq->idx);
|
||||
rxq->stats.pkts++;
|
||||
|
@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
|
|||
static int process_responses(struct sge_rspq *rspq, int budget)
|
||||
{
|
||||
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
|
||||
struct adapter *adapter = rspq->adapter;
|
||||
struct sge *s = &adapter->sge;
|
||||
int budget_left = budget;
|
||||
|
||||
while (likely(budget_left)) {
|
||||
|
@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
|
|||
BUG_ON(frag >= MAX_SKB_FRAGS);
|
||||
BUG_ON(rxq->fl.avail == 0);
|
||||
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
|
||||
bufsz = get_buf_size(sdesc);
|
||||
bufsz = get_buf_size(adapter, sdesc);
|
||||
fp->page = sdesc->page;
|
||||
fp->offset = rspq->offset;
|
||||
fp->size = min(bufsz, len);
|
||||
|
@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
|
|||
*/
|
||||
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
|
||||
if (likely(ret == 0))
|
||||
rspq->offset += ALIGN(fp->size, FL_ALIGN);
|
||||
rspq->offset += ALIGN(fp->size, s->fl_align);
|
||||
else
|
||||
restore_rx_bufs(&gl, &rxq->fl, frag);
|
||||
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
|
||||
|
@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
|
|||
* schedule napi but the FL is no longer starving.
|
||||
* No biggie.
|
||||
*/
|
||||
if (fl_starving(fl)) {
|
||||
if (fl_starving(adapter, fl)) {
|
||||
struct sge_eth_rxq *rxq;
|
||||
|
||||
rxq = container_of(fl, struct sge_eth_rxq, fl);
|
||||
|
@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
int intr_dest,
|
||||
struct sge_fl *fl, rspq_handler_t hnd)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
struct fw_iq_cmd cmd, rpl;
|
||||
int ret, iqandst, flsz = 0;
|
||||
|
@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
|
||||
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
|
||||
sizeof(__be64), sizeof(struct rx_sw_desc),
|
||||
&fl->addr, &fl->sdesc, STAT_LEN);
|
||||
&fl->addr, &fl->sdesc, s->stat_len);
|
||||
if (!fl->desc) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
* free list ring) in Egress Queue Units.
|
||||
*/
|
||||
flsz = (fl->size / FL_PER_EQ_UNIT +
|
||||
STAT_LEN / EQ_UNIT);
|
||||
s->stat_len / EQ_UNIT);
|
||||
|
||||
/*
|
||||
* Fill in all the relevant firmware Ingress Queue Command
|
||||
|
@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
|
|||
struct net_device *dev, struct netdev_queue *devq,
|
||||
unsigned int iqid)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
int ret, nentries;
|
||||
struct fw_eq_eth_cmd cmd, rpl;
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
|
@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
|
|||
* Calculate the size of the hardware TX Queue (including the Status
|
||||
* Page on the end of the TX Queue) in units of TX Descriptors.
|
||||
*/
|
||||
nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
|
||||
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
|
||||
|
||||
/*
|
||||
* Allocate the hardware ring for the TX ring (with space for its
|
||||
|
@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
|
|||
txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
|
||||
sizeof(struct tx_desc),
|
||||
sizeof(struct tx_sw_desc),
|
||||
&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
|
||||
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
|
||||
if (!txq->q.desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
|
|||
*/
|
||||
static void free_txq(struct adapter *adapter, struct sge_txq *tq)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
|
||||
dma_free_coherent(adapter->pdev_dev,
|
||||
tq->size * sizeof(*tq->desc) + STAT_LEN,
|
||||
tq->size * sizeof(*tq->desc) + s->stat_len,
|
||||
tq->desc, tq->phys_addr);
|
||||
tq->cntxt_id = 0;
|
||||
tq->sdesc = NULL;
|
||||
|
@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
|
|||
static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
|
||||
struct sge_fl *fl)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
unsigned int flid = fl ? fl->cntxt_id : 0xffff;
|
||||
|
||||
t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
|
||||
|
@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
if (fl) {
|
||||
free_rx_bufs(adapter, fl, fl->avail);
|
||||
dma_free_coherent(adapter->pdev_dev,
|
||||
fl->size * sizeof(*fl->desc) + STAT_LEN,
|
||||
fl->size * sizeof(*fl->desc) + s->stat_len,
|
||||
fl->desc, fl->addr);
|
||||
kfree(fl->sdesc);
|
||||
fl->sdesc = NULL;
|
||||
|
@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
u32 fl0 = sge_params->sge_fl_buffer_size[0];
|
||||
u32 fl1 = sge_params->sge_fl_buffer_size[1];
|
||||
struct sge *s = &adapter->sge;
|
||||
unsigned int ingpadboundary, ingpackboundary;
|
||||
|
||||
/*
|
||||
* Start by vetting the basic SGE parameters which have been set up by
|
||||
|
@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
* Now translate the adapter parameters into our internal forms.
|
||||
*/
|
||||
if (fl1)
|
||||
FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
|
||||
STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
|
||||
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
|
||||
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
|
||||
? 128 : 64);
|
||||
PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
|
||||
FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
|
||||
SGE_INGPADBOUNDARY_SHIFT);
|
||||
s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
* separately. The actual Ingress Packet Data alignment boundary
|
||||
* within Packed Buffer Mode is the maximum of these two
|
||||
* specifications. (Note that it makes no real practical sense to
|
||||
* have the Pading Boudary be larger than the Packing Boundary but you
|
||||
* could set the chip up that way and, in fact, legacy T4 code would
|
||||
* end doing this because it would initialize the Padding Boundary and
|
||||
* leave the Packing Boundary initialized to 0 (16 bytes).)
|
||||
*/
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
|
||||
X_INGPADBOUNDARY_SHIFT);
|
||||
if (is_t4(adapter->params.chip)) {
|
||||
s->fl_align = ingpadboundary;
|
||||
} else {
|
||||
/* T5 has a different interpretation of one of the PCIe Packing
|
||||
* Boundary values.
|
||||
*/
|
||||
ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
|
||||
if (ingpackboundary == INGPACKBOUNDARY_16B_X)
|
||||
ingpackboundary = 16;
|
||||
else
|
||||
ingpackboundary = 1 << (ingpackboundary +
|
||||
INGPACKBOUNDARY_SHIFT_X);
|
||||
|
||||
s->fl_align = max(ingpadboundary, ingpackboundary);
|
||||
}
|
||||
|
||||
/* A FL with <= fl_starve_thres buffers is starving and a periodic
|
||||
* timer will attempt to refill it. This needs to be larger than the
|
||||
* SGE's Egress Congestion Threshold. If it isn't, then we can get
|
||||
* stuck waiting for new packets while the SGE is waiting for us to
|
||||
* give it more Free List entries. (Note that the SGE's Egress
|
||||
* Congestion Threshold is in units of 2 Free List pointers.)
|
||||
*/
|
||||
s->fl_starve_thres
|
||||
= EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
|
||||
|
||||
/*
|
||||
* Set up tasklet timers.
|
||||
|
|
|
@ -134,11 +134,13 @@ struct dev_params {
|
|||
*/
|
||||
struct sge_params {
|
||||
u32 sge_control; /* padding, boundaries, lengths, etc. */
|
||||
u32 sge_control2; /* T5: more of the same */
|
||||
u32 sge_host_page_size; /* RDMA page sizes */
|
||||
u32 sge_queues_per_page; /* RDMA queues/page */
|
||||
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
|
||||
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
|
||||
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
|
||||
u32 sge_congestion_control; /* congestion thresholds, etc. */
|
||||
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
|
||||
u32 sge_timer_value_2_and_3;
|
||||
u32 sge_timer_value_4_and_5;
|
||||
|
|
|
@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
|
|||
sge_params->sge_timer_value_2_and_3 = vals[5];
|
||||
sge_params->sge_timer_value_4_and_5 = vals[6];
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
* separately with the Padding Boundary in SGE_CONTROL and and Packing
|
||||
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
|
||||
* SGE_CONTROL in order to determine how ingress packet data will be
|
||||
* laid out in Packed Buffer Mode. Unfortunately, older versions of
|
||||
* the firmware won't let us retrieve SGE_CONTROL2 so if we get a
|
||||
* failure grabbing it we throw an error since we can't figure out the
|
||||
* right value.
|
||||
*/
|
||||
if (!is_t4(adapter->params.chip)) {
|
||||
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
|
||||
v = t4vf_query_params(adapter, 1, params, vals);
|
||||
if (v != FW_SUCCESS) {
|
||||
dev_err(adapter->pdev_dev,
|
||||
"Unable to get SGE Control2; "
|
||||
"probably old firmware.\n");
|
||||
return v;
|
||||
}
|
||||
sge_params->sge_control2 = vals[0];
|
||||
}
|
||||
|
||||
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
|
||||
v = t4vf_query_params(adapter, 1, params, vals);
|
||||
params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
|
||||
v = t4vf_query_params(adapter, 2, params, vals);
|
||||
if (v)
|
||||
return v;
|
||||
sge_params->sge_ingress_rx_threshold = vals[0];
|
||||
sge_params->sge_congestion_control = vals[1];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
|||
struct vnic_rq_buf *buf = rq->to_use;
|
||||
|
||||
if (buf->os_buf) {
|
||||
buf = buf->next;
|
||||
rq->to_use = buf;
|
||||
rq->ring.desc_avail--;
|
||||
if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
|
||||
/* Adding write memory barrier prevents compiler and/or
|
||||
* CPU reordering, thus avoiding descriptor posting
|
||||
* before descriptor is initialized. Otherwise, hardware
|
||||
* can read stale descriptor fields.
|
||||
*/
|
||||
wmb();
|
||||
iowrite32(buf->index, &rq->ctrl->posted_index);
|
||||
}
|
||||
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
|
||||
buf->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||
enic->rq_truncated_pkts++;
|
||||
}
|
||||
|
||||
pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
buf->os_buf = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||
/* Buffer overflow
|
||||
*/
|
||||
|
||||
pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
|
|||
return bufaddr;
|
||||
}
|
||||
|
||||
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
|
||||
{
|
||||
int i;
|
||||
unsigned int *src = src_buf;
|
||||
unsigned int *dst = dst_buf;
|
||||
|
||||
for (i = 0; i < len; i += 4, src++, dst++)
|
||||
*dst = swab32p(src);
|
||||
}
|
||||
|
||||
static void fec_dump(struct net_device *ndev)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
|
|||
}
|
||||
|
||||
static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
|
||||
struct bufdesc *bdp, u32 length)
|
||||
struct bufdesc *bdp, u32 length, bool swap)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
struct sk_buff *new_skb;
|
||||
|
@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
|
|||
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
|
||||
FEC_ENET_RX_FRSIZE - fep->rx_align,
|
||||
DMA_FROM_DEVICE);
|
||||
if (!swap)
|
||||
memcpy(new_skb->data, (*skb)->data, length);
|
||||
else
|
||||
swap_buffer2(new_skb->data, (*skb)->data, length);
|
||||
*skb = new_skb;
|
||||
|
||||
return true;
|
||||
|
@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|||
u16 vlan_tag;
|
||||
int index = 0;
|
||||
bool is_copybreak;
|
||||
bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
|
||||
|
||||
#ifdef CONFIG_M532x
|
||||
flush_cache_all();
|
||||
|
@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|||
* include that when passing upstream as it messes up
|
||||
* bridging applications.
|
||||
*/
|
||||
is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
|
||||
is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
|
||||
need_swap);
|
||||
if (!is_copybreak) {
|
||||
skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
||||
if (unlikely(!skb_new)) {
|
||||
|
@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
skb_put(skb, pkt_len - 4);
|
||||
data = skb->data;
|
||||
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
|
||||
if (!is_copybreak && need_swap)
|
||||
swap_buffer(data, pkt_len);
|
||||
|
||||
/* Extract the enhanced buffer descriptor */
|
||||
|
@ -3343,11 +3358,10 @@ static int __maybe_unused fec_suspend(struct device *dev)
|
|||
netif_device_detach(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
fec_stop(ndev);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
fec_enet_clk_enable(ndev, false);
|
||||
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
if (fep->reg_phy)
|
||||
regulator_disable(fep->reg_phy);
|
||||
|
@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
pinctrl_pm_select_default_state(&fep->pdev->dev);
|
||||
ret = fec_enet_clk_enable(ndev, true);
|
||||
if (ret)
|
||||
goto failed_clk;
|
||||
|
||||
rtnl_lock();
|
||||
if (netif_running(ndev)) {
|
||||
pinctrl_pm_select_default_state(&fep->pdev->dev);
|
||||
ret = fec_enet_clk_enable(ndev, true);
|
||||
if (ret) {
|
||||
rtnl_unlock();
|
||||
goto failed_clk;
|
||||
}
|
||||
fec_restart(ndev);
|
||||
netif_tx_lock_bh(ndev);
|
||||
netif_device_attach(ndev);
|
||||
|
|
|
@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
|||
**/
|
||||
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status;
|
||||
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
|
||||
bool autoneg = false;
|
||||
ixgbe_link_speed speed;
|
||||
|
@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
|||
|
||||
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, autoneg_reg);
|
||||
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
|||
int tx_index;
|
||||
struct tx_desc *desc;
|
||||
u32 cmd_sts;
|
||||
struct sk_buff *skb;
|
||||
|
||||
tx_index = txq->tx_used_desc;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
|
@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
|||
reclaimed++;
|
||||
txq->tx_desc_count--;
|
||||
|
||||
skb = NULL;
|
||||
if (cmd_sts & TX_LAST_DESC)
|
||||
skb = __skb_dequeue(&txq->tx_skb);
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr))
|
||||
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
|
||||
if (cmd_sts & TX_ENABLE_INTERRUPT) {
|
||||
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
|
||||
|
||||
if (!WARN_ON(!skb))
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
if (cmd_sts & ERROR_SUMMARY) {
|
||||
netdev_info(mp->dev, "tx error\n");
|
||||
mp->dev->stats.tx_errors++;
|
||||
}
|
||||
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr))
|
||||
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
__netif_tx_unlock_bh(nq);
|
||||
|
|
|
@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
|||
{
|
||||
struct mvpp2_prs_entry *pe;
|
||||
int tid_aux, tid;
|
||||
int ret = 0;
|
||||
|
||||
pe = mvpp2_prs_vlan_find(priv, tpid, ai);
|
||||
|
||||
|
@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
|||
break;
|
||||
}
|
||||
|
||||
if (tid <= tid_aux)
|
||||
return -EINVAL;
|
||||
if (tid <= tid_aux) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
|
||||
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
|
||||
|
@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
|||
|
||||
mvpp2_prs_hw_write(priv, pe);
|
||||
|
||||
error:
|
||||
kfree(pe);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get first free double vlan ai number */
|
||||
|
@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
|||
unsigned int port_map)
|
||||
{
|
||||
struct mvpp2_prs_entry *pe;
|
||||
int tid_aux, tid, ai;
|
||||
int tid_aux, tid, ai, ret = 0;
|
||||
|
||||
pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
|
||||
|
||||
|
@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
|||
|
||||
/* Set ai value for new double vlan entry */
|
||||
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
|
||||
if (ai < 0)
|
||||
return ai;
|
||||
if (ai < 0) {
|
||||
ret = ai;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Get first single/triple vlan tid */
|
||||
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
|
||||
|
@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
|||
break;
|
||||
}
|
||||
|
||||
if (tid >= tid_aux)
|
||||
return -ERANGE;
|
||||
if (tid >= tid_aux) {
|
||||
ret = -ERANGE;
|
||||
goto error;
|
||||
}
|
||||
|
||||
memset(pe, 0, sizeof(struct mvpp2_prs_entry));
|
||||
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
|
||||
|
@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
|||
mvpp2_prs_tcam_port_map_set(pe, port_map);
|
||||
mvpp2_prs_hw_write(priv, pe);
|
||||
|
||||
error:
|
||||
kfree(pe);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* IPv4 header parsing for fragmentation and L4 offset */
|
||||
|
|
|
@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
|
|||
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
|
||||
VXLAN_STEER_BY_OUTER_MAC, 1);
|
||||
out:
|
||||
if (ret)
|
||||
if (ret) {
|
||||
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/* set offloads */
|
||||
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
|
||||
priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
|
||||
|
@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
|
|||
int ret;
|
||||
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
||||
vxlan_del_task);
|
||||
/* unset offloads */
|
||||
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
|
||||
priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
|
||||
priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
|
||||
|
||||
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
|
||||
VXLAN_STEER_BY_OUTER_MAC, 0);
|
||||
|
@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
||||
dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
|
||||
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
dev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
mdev->pndev[port] = dev;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
|
|
@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
|||
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
|
||||
name, pci_name(dev->pdev));
|
||||
eq->eqn = out.eq_number;
|
||||
eq->irqn = vecidx;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
|
||||
eq->name, eq);
|
||||
if (err)
|
||||
goto err_eq;
|
||||
|
||||
eq->irqn = vecidx;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
|
||||
err = mlx5_debug_eq_add(dev, eq);
|
||||
if (err)
|
||||
goto err_irq;
|
||||
|
|
|
@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
|
|||
dev->profile = &profile[prof_sel];
|
||||
dev->event = mlx5_core_event;
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
err = mlx5_dev_init(dev, pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
err = mlx5_register_device(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
|
||||
|
|
|
@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
|
|||
if (test_bit(__NX_RESETTING, &adapter->state))
|
||||
goto reschedule;
|
||||
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state)) {
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state) &&
|
||||
!(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
|
||||
if (!adapter->has_link_events) {
|
||||
|
||||
netxen_nic_handle_phy_intr(adapter);
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
config NET_VENDOR_QUALCOMM
|
||||
bool "Qualcomm devices"
|
||||
default y
|
||||
depends on SPI_MASTER && OF_GPIO
|
||||
---help---
|
||||
If you have a network (Ethernet) card belonging to this class, say Y
|
||||
and read the Ethernet-HOWTO, available from
|
||||
|
@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
|
|||
|
||||
config QCA7000
|
||||
tristate "Qualcomm Atheros QCA7000 support"
|
||||
depends on SPI_MASTER && OF_GPIO
|
||||
depends on SPI_MASTER && OF
|
||||
---help---
|
||||
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
|
||||
|
||||
|
|
|
@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
|
|||
EFX_MAX_CHANNELS,
|
||||
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
|
||||
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
|
||||
BUG_ON(efx->max_channels == 0);
|
||||
if (WARN_ON(efx->max_channels == 0))
|
||||
return -EIO;
|
||||
|
||||
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
|
||||
if (!nic_data)
|
||||
|
|
|
@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
|
|||
const struct of_device_id *match = NULL;
|
||||
struct smc_local *lp;
|
||||
struct net_device *ndev;
|
||||
struct resource *res, *ires;
|
||||
struct resource *res;
|
||||
unsigned int __iomem *addr;
|
||||
unsigned long irq_flags = SMC_IRQ_FLAGS;
|
||||
unsigned long irq_resflags;
|
||||
int ret;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct smc_local));
|
||||
|
@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
|
|||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!ires) {
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq <= 0) {
|
||||
ret = -ENODEV;
|
||||
goto out_release_io;
|
||||
}
|
||||
|
||||
ndev->irq = ires->start;
|
||||
|
||||
if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = ires->flags & IRQF_TRIGGER_MASK;
|
||||
/*
|
||||
* If this platform does not specify any special irqflags, or if
|
||||
* the resource supplies a trigger, override the irqflags with
|
||||
* the trigger flags from the resource.
|
||||
*/
|
||||
irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
|
||||
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
|
||||
|
||||
ret = smc_request_attrib(pdev, ndev);
|
||||
if (ret)
|
||||
|
|
|
@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
|
|||
spin_unlock(&pdata->mac_lock);
|
||||
}
|
||||
|
||||
static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!pdata->phy_dev)
|
||||
return rc;
|
||||
|
||||
/* If the internal PHY is in General Power-Down mode, all, except the
|
||||
* management interface, is powered-down and stays in that condition as
|
||||
* long as Phy register bit 0.11 is HIGH.
|
||||
*
|
||||
* In that case, clear the bit 0.11, so the PHY powers up and we can
|
||||
* access to the phy registers.
|
||||
*/
|
||||
rc = phy_read(pdata->phy_dev, MII_BMCR);
|
||||
if (rc < 0) {
|
||||
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* If the PHY general power-down bit is not set is not necessary to
|
||||
* disable the general power down-mode.
|
||||
*/
|
||||
if (rc & BMCR_PDOWN) {
|
||||
rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
|
||||
if (rc < 0) {
|
||||
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
|
||||
return rc;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
|
||||
{
|
||||
int rc = 0;
|
||||
|
@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* If energy is detected the PHY is already awake so is not necessary
|
||||
* to disable the energy detect power-down mode.
|
||||
*/
|
||||
if ((rc & MII_LAN83C185_EDPWRDOWN) &&
|
||||
!(rc & MII_LAN83C185_ENERGYON)) {
|
||||
/* Only disable if energy detect mode is already enabled */
|
||||
if (rc & MII_LAN83C185_EDPWRDOWN) {
|
||||
/* Disable energy detect mode for this SMSC Transceivers */
|
||||
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
|
||||
rc & (~MII_LAN83C185_EDPWRDOWN));
|
||||
|
@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
|
|||
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
|
||||
return rc;
|
||||
}
|
||||
|
||||
mdelay(1);
|
||||
/* Allow PHY to wakeup */
|
||||
mdelay(2);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
|
|||
|
||||
/* Only enable if energy detect mode is already disabled */
|
||||
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
|
||||
mdelay(100);
|
||||
/* Enable energy detect mode for this SMSC Transceivers */
|
||||
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
|
||||
rc | MII_LAN83C185_EDPWRDOWN);
|
||||
|
@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
|
|||
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
|
||||
return rc;
|
||||
}
|
||||
|
||||
mdelay(1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1414,6 +1443,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
|
|||
unsigned int temp;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Make sure to power-up the PHY chip before doing a reset, otherwise
|
||||
* the reset fails.
|
||||
*/
|
||||
ret = smsc911x_phy_general_power_up(pdata);
|
||||
if (ret) {
|
||||
SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
|
||||
* are initialized in a Energy Detect Power-Down mode that prevents
|
||||
|
|
|
@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
|
|||
bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
{
|
||||
char *phy_bus_name = priv->plat->phy_bus_name;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
/* Using PCS we cannot dial with the phy registers at this stage
|
||||
|
@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|||
* changed).
|
||||
* In that case the driver disable own timers.
|
||||
*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (priv->eee_active) {
|
||||
pr_debug("stmmac: disable EEE\n");
|
||||
del_timer_sync(&priv->eee_ctrl_timer);
|
||||
|
@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|||
tx_lpi_timer);
|
||||
}
|
||||
priv->eee_active = 0;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
goto out;
|
||||
}
|
||||
/* Activate the EEE and start timers */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!priv->eee_active) {
|
||||
priv->eee_active = 1;
|
||||
init_timer(&priv->eee_ctrl_timer);
|
||||
|
@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|||
/* Set HW EEE according to the speed */
|
||||
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
|
||||
|
||||
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
||||
|
||||
ret = true;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
|
|||
if (new_state && netif_msg_link(priv))
|
||||
phy_print_status(phydev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* At this stage, it could be needed to setup the EEE or adjust some
|
||||
* MAC related HW registers.
|
||||
*/
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
|
|||
}
|
||||
|
||||
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
int i)
|
||||
int i, gfp_t flags)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
|
||||
GFP_KERNEL);
|
||||
flags);
|
||||
if (!skb) {
|
||||
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
|
|||
* and allocates the socket buffers. It suppors the chained and ring
|
||||
* modes.
|
||||
*/
|
||||
static int init_dma_desc_rings(struct net_device *dev)
|
||||
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
|
||||
{
|
||||
int i;
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
|
|||
else
|
||||
p = priv->dma_rx + i;
|
||||
|
||||
ret = stmmac_init_rx_buffers(priv, p, i);
|
||||
ret = stmmac_init_rx_buffers(priv, p, i, flags);
|
||||
if (ret)
|
||||
goto err_init_rx_buffers;
|
||||
|
||||
|
@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
|
|||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = init_dma_desc_rings(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
/* DMA initialization and SW reset */
|
||||
ret = stmmac_init_dma_engine(priv);
|
||||
if (ret < 0) {
|
||||
|
@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
|
|||
}
|
||||
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
|
||||
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
|
||||
priv->rx_riwt = MAX_DMA_RIWT;
|
||||
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
|
||||
|
@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
|
|||
goto dma_desc_error;
|
||||
}
|
||||
|
||||
ret = init_dma_desc_rings(dev, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
ret = stmmac_hw_setup(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: Hw setup failed\n", __func__);
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
if (priv->phydev)
|
||||
phy_start(priv->phydev);
|
||||
|
||||
|
@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned int nopaged_len = skb_headlen(skb);
|
||||
unsigned int enh_desc = priv->plat->enh_desc;
|
||||
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
|
||||
spin_unlock(&priv->tx_lock);
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
netif_stop_queue(dev);
|
||||
/* This is a hard error, log it. */
|
||||
|
@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
if (priv->tx_path_in_lpi_mode)
|
||||
stmmac_disable_eee_mode(priv);
|
||||
|
||||
|
@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
|
||||
dma_map_err:
|
||||
spin_unlock(&priv->tx_lock);
|
||||
dev_err(priv->device, "Tx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
priv->dev->stats.tx_dropped++;
|
||||
|
@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
|
|||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
priv->hw->mac->set_filter(priv->hw, dev);
|
||||
spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
|
|||
stmmac_set_mac(priv->ioaddr, false);
|
||||
pinctrl_pm_select_sleep_state(priv->device);
|
||||
/* Disable clock in case of PWM is off */
|
||||
clk_disable_unprepare(priv->stmmac_clk);
|
||||
clk_disable(priv->stmmac_clk);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
|
@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
|
|||
} else {
|
||||
pinctrl_pm_select_default_state(priv->device);
|
||||
/* enable the clk prevously disabled */
|
||||
clk_prepare_enable(priv->stmmac_clk);
|
||||
clk_enable(priv->stmmac_clk);
|
||||
/* reset the phy so that it's ready */
|
||||
if (priv->mii)
|
||||
stmmac_mdio_reset(priv->mii);
|
||||
|
@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
|
|||
|
||||
netif_device_attach(ndev);
|
||||
|
||||
init_dma_desc_rings(ndev, GFP_ATOMIC);
|
||||
stmmac_hw_setup(ndev);
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
|
|
|
@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
|
|||
HMD(("init rxring, "));
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
struct sk_buff *skb;
|
||||
u32 mapping;
|
||||
|
||||
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
|
@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
|
|||
|
||||
/* Because we reserve afterwards. */
|
||||
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
|
||||
mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(hp->dma_dev, mapping)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
|
||||
continue;
|
||||
}
|
||||
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
|
||||
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
|
||||
dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
|
||||
DMA_FROM_DEVICE));
|
||||
mapping);
|
||||
skb_reserve(skb, RX_OFFSET);
|
||||
}
|
||||
|
||||
|
@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
|
|||
skb = hp->rx_skbs[elem];
|
||||
if (len > RX_COPY_THRESHOLD) {
|
||||
struct sk_buff *new_skb;
|
||||
u32 mapping;
|
||||
|
||||
/* Now refill the entry, if we can. */
|
||||
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
|
||||
|
@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
|
|||
drops++;
|
||||
goto drop_it;
|
||||
}
|
||||
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
|
||||
mapping = dma_map_single(hp->dma_dev, new_skb->data,
|
||||
RX_BUF_ALLOC_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
|
||||
dev_kfree_skb_any(new_skb);
|
||||
drops++;
|
||||
goto drop_it;
|
||||
}
|
||||
|
||||
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
|
||||
hp->rx_skbs[elem] = new_skb;
|
||||
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
|
||||
hme_write_rxd(hp, this,
|
||||
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
|
||||
dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
|
||||
DMA_FROM_DEVICE));
|
||||
mapping);
|
||||
skb_reserve(new_skb, RX_OFFSET);
|
||||
|
||||
/* Trim the original skb for the netif. */
|
||||
|
@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
|
|||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
|
||||
u32 first_len, u32 first_entry, u32 entry)
|
||||
{
|
||||
struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
|
||||
|
||||
dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
|
||||
|
||||
first_entry = NEXT_TX(first_entry);
|
||||
while (first_entry != entry) {
|
||||
struct happy_meal_txd *this = &txbase[first_entry];
|
||||
u32 addr, len;
|
||||
|
||||
addr = hme_read_desc32(hp, &this->tx_addr);
|
||||
len = hme_read_desc32(hp, &this->tx_flags);
|
||||
len &= TXFLAG_SIZE;
|
||||
dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
|
@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
|||
|
||||
len = skb->len;
|
||||
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
|
||||
goto out_dma_error;
|
||||
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
|
||||
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
|
||||
(tx_flags | (len & TXFLAG_SIZE)),
|
||||
|
@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
|||
first_len = skb_headlen(skb);
|
||||
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
|
||||
goto out_dma_error;
|
||||
entry = NEXT_TX(entry);
|
||||
|
||||
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
|
||||
|
@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
|||
len = skb_frag_size(this_frag);
|
||||
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
|
||||
0, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
|
||||
unmap_partial_tx_skb(hp, first_mapping, first_len,
|
||||
first_entry, entry);
|
||||
goto out_dma_error;
|
||||
}
|
||||
this_txflags = tx_flags;
|
||||
if (frag == skb_shinfo(skb)->nr_frags - 1)
|
||||
this_txflags |= TXFLAG_EOP;
|
||||
|
@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
|
|||
|
||||
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
out_dma_error:
|
||||
hp->tx_skbs[hp->tx_new] = NULL;
|
||||
spin_unlock_irq(&hp->happy_lock);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
|
||||
|
|
|
@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
|
|||
{
|
||||
if (!ale)
|
||||
return -EINVAL;
|
||||
cpsw_ale_stop(ale);
|
||||
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
|
||||
kfree(ale);
|
||||
return 0;
|
||||
|
|
|
@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
|||
|
||||
switch (ptp_class & PTP_CLASS_PMASK) {
|
||||
case PTP_CLASS_IPV4:
|
||||
offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
|
||||
offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
|
||||
break;
|
||||
case PTP_CLASS_IPV6:
|
||||
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
|
||||
|
|
|
@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
|
|||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
|
||||
if (vlan_tx_tag_present(skb))
|
||||
vnet_hdr->csum_start += VLAN_HLEN;
|
||||
vnet_hdr->csum_offset = skb->csum_offset;
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
|
|
|
@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
|
|||
|
||||
switch (type & PTP_CLASS_PMASK) {
|
||||
case PTP_CLASS_IPV4:
|
||||
offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
|
||||
offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
|
||||
break;
|
||||
case PTP_CLASS_IPV6:
|
||||
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
|
||||
|
@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
|
|||
|
||||
switch (type & PTP_CLASS_PMASK) {
|
||||
case PTP_CLASS_IPV4:
|
||||
offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
|
||||
offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
|
||||
break;
|
||||
case PTP_CLASS_IPV6:
|
||||
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
|
||||
|
|
|
@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
|
|||
{
|
||||
struct mii_ioctl_data *mii_data = if_mii(ifr);
|
||||
u16 val = mii_data->val_in;
|
||||
bool change_autoneg = false;
|
||||
|
||||
switch (cmd) {
|
||||
case SIOCGMIIPHY:
|
||||
|
@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
|
|||
if (mii_data->phy_id == phydev->addr) {
|
||||
switch (mii_data->reg_num) {
|
||||
case MII_BMCR:
|
||||
if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
|
||||
if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
|
||||
if (phydev->autoneg == AUTONEG_ENABLE)
|
||||
change_autoneg = true;
|
||||
phydev->autoneg = AUTONEG_DISABLE;
|
||||
else
|
||||
phydev->autoneg = AUTONEG_ENABLE;
|
||||
if (!phydev->autoneg && (val & BMCR_FULLDPLX))
|
||||
if (val & BMCR_FULLDPLX)
|
||||
phydev->duplex = DUPLEX_FULL;
|
||||
else
|
||||
phydev->duplex = DUPLEX_HALF;
|
||||
if (!phydev->autoneg && (val & BMCR_SPEED1000))
|
||||
if (val & BMCR_SPEED1000)
|
||||
phydev->speed = SPEED_1000;
|
||||
else if (!phydev->autoneg &&
|
||||
(val & BMCR_SPEED100))
|
||||
else if (val & BMCR_SPEED100)
|
||||
phydev->speed = SPEED_100;
|
||||
else phydev->speed = SPEED_10;
|
||||
}
|
||||
else {
|
||||
if (phydev->autoneg == AUTONEG_DISABLE)
|
||||
change_autoneg = true;
|
||||
phydev->autoneg = AUTONEG_ENABLE;
|
||||
}
|
||||
break;
|
||||
case MII_ADVERTISE:
|
||||
phydev->advertising = val;
|
||||
phydev->advertising = mii_adv_to_ethtool_adv_t(val);
|
||||
change_autoneg = true;
|
||||
break;
|
||||
default:
|
||||
/* do nothing */
|
||||
|
@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
|
|||
if (mii_data->reg_num == MII_BMCR &&
|
||||
val & BMCR_RESET)
|
||||
return phy_init_hw(phydev);
|
||||
|
||||
if (change_autoneg)
|
||||
return phy_start_aneg(phydev);
|
||||
|
||||
return 0;
|
||||
|
||||
case SIOCSHWTSTAMP:
|
||||
|
|
|
@ -755,24 +755,24 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
|
||||
err = get_filter(argp, &code);
|
||||
if (err >= 0) {
|
||||
struct bpf_prog *pass_filter = NULL;
|
||||
struct sock_fprog_kern fprog = {
|
||||
.len = err,
|
||||
.filter = code,
|
||||
};
|
||||
|
||||
ppp_lock(ppp);
|
||||
if (ppp->pass_filter) {
|
||||
bpf_prog_destroy(ppp->pass_filter);
|
||||
ppp->pass_filter = NULL;
|
||||
}
|
||||
if (fprog.filter != NULL)
|
||||
err = bpf_prog_create(&ppp->pass_filter,
|
||||
&fprog);
|
||||
else
|
||||
err = 0;
|
||||
kfree(code);
|
||||
if (fprog.filter)
|
||||
err = bpf_prog_create(&pass_filter, &fprog);
|
||||
if (!err) {
|
||||
ppp_lock(ppp);
|
||||
if (ppp->pass_filter)
|
||||
bpf_prog_destroy(ppp->pass_filter);
|
||||
ppp->pass_filter = pass_filter;
|
||||
ppp_unlock(ppp);
|
||||
}
|
||||
kfree(code);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PPPIOCSACTIVE:
|
||||
|
@ -781,24 +781,24 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
|
||||
err = get_filter(argp, &code);
|
||||
if (err >= 0) {
|
||||
struct bpf_prog *active_filter = NULL;
|
||||
struct sock_fprog_kern fprog = {
|
||||
.len = err,
|
||||
.filter = code,
|
||||
};
|
||||
|
||||
ppp_lock(ppp);
|
||||
if (ppp->active_filter) {
|
||||
bpf_prog_destroy(ppp->active_filter);
|
||||
ppp->active_filter = NULL;
|
||||
}
|
||||
if (fprog.filter != NULL)
|
||||
err = bpf_prog_create(&ppp->active_filter,
|
||||
&fprog);
|
||||
else
|
||||
err = 0;
|
||||
kfree(code);
|
||||
if (fprog.filter)
|
||||
err = bpf_prog_create(&active_filter, &fprog);
|
||||
if (!err) {
|
||||
ppp_lock(ppp);
|
||||
if (ppp->active_filter)
|
||||
bpf_prog_destroy(ppp->active_filter);
|
||||
ppp->active_filter = active_filter;
|
||||
ppp_unlock(ppp);
|
||||
}
|
||||
kfree(code);
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_PPP_FILTER */
|
||||
|
|
|
@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
struct tun_pi pi = { 0, skb->protocol };
|
||||
ssize_t total = 0;
|
||||
int vlan_offset = 0, copied;
|
||||
int vlan_hlen = 0;
|
||||
int vnet_hdr_sz = 0;
|
||||
|
||||
if (vlan_tx_tag_present(skb))
|
||||
vlan_hlen = VLAN_HLEN;
|
||||
|
||||
if (tun->flags & TUN_VNET_HDR)
|
||||
vnet_hdr_sz = tun->vnet_hdr_sz;
|
||||
|
||||
if (!(tun->flags & TUN_NO_PI)) {
|
||||
if ((len -= sizeof(pi)) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (len < skb->len) {
|
||||
if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
|
||||
/* Packet will be striped */
|
||||
pi.flags |= TUN_PKT_STRIP;
|
||||
}
|
||||
|
@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
total += sizeof(pi);
|
||||
}
|
||||
|
||||
if (tun->flags & TUN_VNET_HDR) {
|
||||
if (vnet_hdr_sz) {
|
||||
struct virtio_net_hdr gso = { 0 }; /* no info leak */
|
||||
if ((len -= tun->vnet_hdr_sz) < 0)
|
||||
if ((len -= vnet_hdr_sz) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
|
@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
gso.csum_start = skb_checksum_start_offset(skb);
|
||||
gso.csum_start = skb_checksum_start_offset(skb) +
|
||||
vlan_hlen;
|
||||
gso.csum_offset = skb->csum_offset;
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
|
@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
|
||||
sizeof(gso))))
|
||||
return -EFAULT;
|
||||
total += tun->vnet_hdr_sz;
|
||||
total += vnet_hdr_sz;
|
||||
}
|
||||
|
||||
copied = total;
|
||||
total += skb->len;
|
||||
if (!vlan_tx_tag_present(skb)) {
|
||||
len = min_t(int, skb->len, len);
|
||||
} else {
|
||||
len = min_t(int, skb->len + vlan_hlen, len);
|
||||
total += skb->len + vlan_hlen;
|
||||
if (vlan_hlen) {
|
||||
int copy, ret;
|
||||
struct {
|
||||
__be16 h_vlan_proto;
|
||||
|
@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
|
||||
|
||||
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
|
||||
len = min_t(int, skb->len + VLAN_HLEN, len);
|
||||
total += VLAN_HLEN;
|
||||
|
||||
copy = min_t(int, vlan_offset, len);
|
||||
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
|
||||
|
|
|
@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
msleep(150);
|
||||
|
||||
ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
msleep(150);
|
||||
|
||||
ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
|
||||
ax88772_reset(dev);
|
||||
|
||||
/* Read PHYID register *AFTER* the PHY was reset properly */
|
||||
phyid = asix_get_phyid(dev);
|
||||
|
|
|
@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
|
|||
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
|
||||
}
|
||||
|
||||
/* Find VXLAN socket based on network namespace and UDP port */
|
||||
static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
|
||||
/* Find VXLAN socket based on network namespace, address family and UDP port */
|
||||
static struct vxlan_sock *vxlan_find_sock(struct net *net,
|
||||
sa_family_t family, __be16 port)
|
||||
{
|
||||
struct vxlan_sock *vs;
|
||||
|
||||
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
|
||||
if (inet_sk(vs->sock->sk)->inet_sport == port)
|
||||
if (inet_sk(vs->sock->sk)->inet_sport == port &&
|
||||
inet_sk(vs->sock->sk)->sk.sk_family == family)
|
||||
return vs;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
|
|||
}
|
||||
|
||||
/* Look up VNI in a per net namespace table */
|
||||
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
|
||||
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
|
||||
sa_family_t family, __be16 port)
|
||||
{
|
||||
struct vxlan_sock *vs;
|
||||
|
||||
vs = vxlan_find_sock(net, port);
|
||||
vs = vxlan_find_sock(net, family, port);
|
||||
if (!vs)
|
||||
return NULL;
|
||||
|
||||
|
@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
|
||||
int err = -ENOSYS;
|
||||
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
|
||||
type = eh->h_proto;
|
||||
|
||||
|
@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
struct vxlan_dev *dst_vxlan;
|
||||
|
||||
ip_rt_put(rt);
|
||||
dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
|
||||
dst_vxlan = vxlan_find_vni(vxlan->net, vni,
|
||||
dst->sa.sa_family, dst_port);
|
||||
if (!dst_vxlan)
|
||||
goto tx_error;
|
||||
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
|
||||
|
@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
struct vxlan_dev *dst_vxlan;
|
||||
|
||||
dst_release(ndst);
|
||||
dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
|
||||
dst_vxlan = vxlan_find_vni(vxlan->net, vni,
|
||||
dst->sa.sa_family, dst_port);
|
||||
if (!dst_vxlan)
|
||||
goto tx_error;
|
||||
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
|
||||
|
@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev)
|
|||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
struct vxlan_sock *vs;
|
||||
bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
|
||||
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
|
||||
vxlan->dst_port);
|
||||
if (vs) {
|
||||
/* If we have a socket with same port already, reuse it */
|
||||
atomic_inc(&vs->refcnt);
|
||||
|
@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
|
|||
{
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
struct vxlan_sock *vs;
|
||||
bool ipv6 = flags & VXLAN_F_IPV6;
|
||||
|
||||
vs = vxlan_socket_create(net, port, rcv, data, flags);
|
||||
if (!IS_ERR(vs))
|
||||
|
@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
|
|||
return vs;
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
vs = vxlan_find_sock(net, port);
|
||||
vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
|
||||
if (vs) {
|
||||
if (vs->rcv == rcv)
|
||||
atomic_inc(&vs->refcnt);
|
||||
|
@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
|||
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
|
||||
vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
|
||||
|
||||
if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
|
||||
if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
|
||||
vxlan->dst_port)) {
|
||||
pr_info("duplicate VNI %u\n", vni);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
|
|
@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON_ONCE(mvm->init_ucode_complete))
|
||||
if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
|
||||
return 0;
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait,
|
||||
|
@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
goto out;
|
||||
}
|
||||
|
||||
mvm->calibrating = true;
|
||||
|
||||
/* Send TX valid antennas before triggering calibrations */
|
||||
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
|
||||
if (ret)
|
||||
|
@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
MVM_UCODE_CALIB_TIMEOUT);
|
||||
if (!ret)
|
||||
mvm->init_ucode_complete = true;
|
||||
|
||||
if (ret && iwl_mvm_is_radio_killed(mvm)) {
|
||||
IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
|
||||
ret = 1;
|
||||
}
|
||||
goto out;
|
||||
|
||||
error:
|
||||
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
|
||||
out:
|
||||
mvm->calibrating = false;
|
||||
if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
|
||||
/* we want to debug INIT and we have no NVM - fake */
|
||||
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
|
||||
|
|
|
@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
|||
|
||||
mvm->scan_status = IWL_MVM_SCAN_NONE;
|
||||
mvm->ps_disabled = false;
|
||||
mvm->calibrating = false;
|
||||
|
||||
/* just in case one was running */
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
|
|
|
@ -548,6 +548,7 @@ struct iwl_mvm {
|
|||
enum iwl_ucode_type cur_ucode;
|
||||
bool ucode_loaded;
|
||||
bool init_ucode_complete;
|
||||
bool calibrating;
|
||||
u32 error_event_table;
|
||||
u32 log_event_table;
|
||||
u32 umac_error_event_table;
|
||||
|
|
|
@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
}
|
||||
mvm->sf_state = SF_UNINIT;
|
||||
mvm->low_latency_agg_frame_limit = 6;
|
||||
mvm->cur_ucode = IWL_UCODE_INIT;
|
||||
|
||||
mutex_init(&mvm->mutex);
|
||||
mutex_init(&mvm->d0i3_suspend_mutex);
|
||||
|
@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
|
|||
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
bool calibrating = ACCESS_ONCE(mvm->calibrating);
|
||||
|
||||
if (state)
|
||||
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
|
||||
|
@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
|||
|
||||
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
|
||||
|
||||
return state && mvm->cur_ucode != IWL_UCODE_INIT;
|
||||
/* iwl_run_init_mvm_ucode is waiting for results, abort it */
|
||||
if (calibrating)
|
||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||
|
||||
/*
|
||||
* Stop the device if we run OPERATIONAL firmware or if we are in the
|
||||
* middle of the calibrations.
|
||||
*/
|
||||
return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
|
||||
}
|
||||
|
||||
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
||||
|
|
|
@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|||
* restart. So don't process again if the device is
|
||||
* already dead.
|
||||
*/
|
||||
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
|
||||
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
|
||||
IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
|
||||
iwl_pcie_tx_stop(trans);
|
||||
iwl_pcie_rx_stop(trans);
|
||||
|
||||
|
@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|||
/* clear all status bits */
|
||||
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
||||
clear_bit(STATUS_INT_ENABLED, &trans->status);
|
||||
clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
|
||||
clear_bit(STATUS_TPOWER_PMI, &trans->status);
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
|
||||
|
|
|
@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
|
|||
if (err != 0) {
|
||||
printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
|
||||
err);
|
||||
goto failed_hw;
|
||||
goto failed_bind;
|
||||
}
|
||||
|
||||
skb_queue_head_init(&data->pending);
|
||||
|
@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
|
|||
return idx;
|
||||
|
||||
failed_hw:
|
||||
device_release_driver(data->dev);
|
||||
failed_bind:
|
||||
device_unregister(data->dev);
|
||||
failed_drvdata:
|
||||
ieee80211_free_hw(hw);
|
||||
|
|
|
@ -256,7 +256,7 @@ struct ucred {
|
|||
#define MSG_EOF MSG_FIN
|
||||
|
||||
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
|
||||
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
|
||||
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
|
||||
descriptor received through
|
||||
SCM_RIGHTS */
|
||||
#if defined(CONFIG_COMPAT)
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
* @list: used to maintain a list of currently available transports
|
||||
* @name: the human-readable name of the transport
|
||||
* @maxsize: transport provided maximum packet size
|
||||
* @pref: Preferences of this transport
|
||||
* @def: set if this transport should be considered the default
|
||||
* @create: member function to create a new connection on this transport
|
||||
* @close: member function to discard a connection on this transport
|
||||
|
|
|
@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
|
|||
return iptunnel_handle_offloads(skb, udp_csum, type);
|
||||
}
|
||||
|
||||
static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
{
|
||||
struct udphdr *uh;
|
||||
|
||||
uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
|
||||
skb_shinfo(skb)->gso_type |= uh->check ?
|
||||
SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
static inline void udp_tunnel_encap_enable(struct socket *sock)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
|
|
@ -125,6 +125,7 @@ header-y += filter.h
|
|||
header-y += firewire-cdev.h
|
||||
header-y += firewire-constants.h
|
||||
header-y += flat.h
|
||||
header-y += fou.h
|
||||
header-y += fs.h
|
||||
header-y += fsl_hypervisor.h
|
||||
header-y += fuse.h
|
||||
|
@ -141,6 +142,7 @@ header-y += hid.h
|
|||
header-y += hiddev.h
|
||||
header-y += hidraw.h
|
||||
header-y += hpet.h
|
||||
header-y += hsr_netlink.h
|
||||
header-y += hyperv.h
|
||||
header-y += hysdn_if.h
|
||||
header-y += i2c-dev.h
|
||||
|
@ -251,6 +253,7 @@ header-y += mii.h
|
|||
header-y += minix_fs.h
|
||||
header-y += mman.h
|
||||
header-y += mmtimer.h
|
||||
header-y += mpls.h
|
||||
header-y += mqueue.h
|
||||
header-y += mroute.h
|
||||
header-y += mroute6.h
|
||||
|
@ -424,6 +427,7 @@ header-y += virtio_net.h
|
|||
header-y += virtio_pci.h
|
||||
header-y += virtio_ring.h
|
||||
header-y += virtio_rng.h
|
||||
header=y += vm_sockets.h
|
||||
header-y += vt.h
|
||||
header-y += wait.h
|
||||
header-y += wanrouter.h
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
#define SYSFS_BRIDGE_ATTR "bridge"
|
||||
#define SYSFS_BRIDGE_FDB "brforward"
|
||||
|
|
|
@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
|
|||
ht->shift++;
|
||||
|
||||
/* For each new bucket, search the corresponding old bucket
|
||||
* for the first entry that hashes to the new bucket, and
|
||||
* for the first entry that hashes to the new bucket, and
|
||||
* link the new bucket to that entry. Since all the entries
|
||||
* which will end up in the new bucket appear in the same
|
||||
* old bucket, this constructs an entirely valid new hash
|
||||
|
@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
|
|||
}
|
||||
|
||||
/* Publish the new table pointer. Lookups may now traverse
|
||||
* the new table, but they will not benefit from any
|
||||
* additional efficiency until later steps unzip the buckets.
|
||||
* the new table, but they will not benefit from any
|
||||
* additional efficiency until later steps unzip the buckets.
|
||||
*/
|
||||
rcu_assign_pointer(ht->tbl, new_tbl);
|
||||
|
||||
|
@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
|
|||
|
||||
ht->shift--;
|
||||
|
||||
/* Link each bucket in the new table to the first bucket
|
||||
/* Link each bucket in the new table to the first bucket
|
||||
* in the old table that contains entries which will hash
|
||||
* to the new bucket.
|
||||
*/
|
||||
for (i = 0; i < ntbl->size; i++) {
|
||||
ntbl->buckets[i] = tbl->buckets[i];
|
||||
|
||||
/* Link each bucket in the new table to the first bucket
|
||||
/* Link each bucket in the new table to the first bucket
|
||||
* in the old table that contains entries which will hash
|
||||
* to the new bucket.
|
||||
*/
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <net/netfilter/ipv6/nf_reject.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <linux/netfilter_bridge.h>
|
||||
#include "../br_private.h"
|
||||
|
||||
|
|
|
@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
|
|||
/* We could not connect to a designated PHY, so use the switch internal
|
||||
* MDIO bus instead
|
||||
*/
|
||||
if (!p->phy)
|
||||
if (!p->phy) {
|
||||
p->phy = ds->slave_mii_bus->phy_map[p->port];
|
||||
else
|
||||
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
} else {
|
||||
pr_info("attached PHY at address %d [%s]\n",
|
||||
p->phy->addr, p->phy->drv->name);
|
||||
}
|
||||
}
|
||||
|
||||
int dsa_slave_suspend(struct net_device *slave_dev)
|
||||
|
|
|
@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
int err = -ENOSYS;
|
||||
const struct net_offload **offloads;
|
||||
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
|
|
|
@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
|
|||
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
|
||||
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
|
||||
|
||||
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
|
||||
|
||||
return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
|
||||
tos, ttl, df, src_port, dst_port, xnet);
|
||||
}
|
||||
|
@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
|
|||
static void __exit geneve_cleanup_module(void)
|
||||
{
|
||||
destroy_workqueue(geneve_wq);
|
||||
unregister_pernet_subsys(&geneve_net_ops);
|
||||
}
|
||||
module_exit(geneve_cleanup_module);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
|
|||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
if (!CMSG_OK(msg, cmsg))
|
||||
return -EINVAL;
|
||||
#if defined(CONFIG_IPV6)
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (allow_ipv6 &&
|
||||
cmsg->cmsg_level == SOL_IPV6 &&
|
||||
cmsg->cmsg_type == IPV6_PKTINFO) {
|
||||
|
|
|
@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
|
|||
|
||||
/* Undo procedures. */
|
||||
|
||||
/* We can clear retrans_stamp when there are no retransmissions in the
|
||||
* window. It would seem that it is trivially available for us in
|
||||
* tp->retrans_out, however, that kind of assumptions doesn't consider
|
||||
* what will happen if errors occur when sending retransmission for the
|
||||
* second time. ...It could the that such segment has only
|
||||
* TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
||||
* the head skb is enough except for some reneging corner cases that
|
||||
* are not worth the effort.
|
||||
*
|
||||
* Main reason for all this complexity is the fact that connection dying
|
||||
* time now depends on the validity of the retrans_stamp, in particular,
|
||||
* that successive retransmissions of a segment must not advance
|
||||
* retrans_stamp under any conditions.
|
||||
*/
|
||||
static bool tcp_any_retrans_done(const struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (tp->retrans_out)
|
||||
return true;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if FASTRETRANS_DEBUG > 1
|
||||
static void DBGUNDO(struct sock *sk, const char *msg)
|
||||
{
|
||||
|
@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
|||
* is ACKed. For Reno it is MUST to prevent false
|
||||
* fast retransmits (RFC2582). SACK TCP is safe. */
|
||||
tcp_moderate_cwnd(tp);
|
||||
if (!tcp_any_retrans_done(sk))
|
||||
tp->retrans_stamp = 0;
|
||||
return true;
|
||||
}
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
|
@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* We can clear retrans_stamp when there are no retransmissions in the
|
||||
* window. It would seem that it is trivially available for us in
|
||||
* tp->retrans_out, however, that kind of assumptions doesn't consider
|
||||
* what will happen if errors occur when sending retransmission for the
|
||||
* second time. ...It could the that such segment has only
|
||||
* TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
||||
* the head skb is enough except for some reneging corner cases that
|
||||
* are not worth the effort.
|
||||
*
|
||||
* Main reason for all this complexity is the fact that connection dying
|
||||
* time now depends on the validity of the retrans_stamp, in particular,
|
||||
* that successive retransmissions of a segment must not advance
|
||||
* retrans_stamp under any conditions.
|
||||
*/
|
||||
static bool tcp_any_retrans_done(const struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (tp->retrans_out)
|
||||
return true;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Undo during loss recovery after partial ACK or using F-RTO. */
|
||||
static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||||
{
|
||||
|
|
|
@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|||
else
|
||||
dev->flags &= ~IFF_POINTOPOINT;
|
||||
|
||||
dev->iflink = p->link;
|
||||
|
||||
/* Precalculate GRE options length */
|
||||
if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
|
||||
if (t->parms.o_flags&GRE_CSUM)
|
||||
|
@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
|
|||
u64_stats_init(&ip6gre_tunnel_stats->syncp);
|
||||
}
|
||||
|
||||
dev->iflink = tunnel->parms.link;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
|
|||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->iflink = tunnel->parms.link;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
|
|||
int err;
|
||||
|
||||
t = netdev_priv(dev);
|
||||
err = ip6_tnl_dev_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
|
@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
|||
|
||||
|
||||
static const struct net_device_ops ip6_tnl_netdev_ops = {
|
||||
.ndo_init = ip6_tnl_dev_init,
|
||||
.ndo_uninit = ip6_tnl_dev_uninit,
|
||||
.ndo_start_xmit = ip6_tnl_xmit,
|
||||
.ndo_do_ioctl = ip6_tnl_ioctl,
|
||||
|
@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
|
|||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
|
||||
int err = ip6_tnl_dev_init_gen(dev);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
|
||||
ip6_tnl_link_config(t);
|
||||
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
|
|||
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
||||
int err;
|
||||
|
||||
err = vti6_dev_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
|
|||
}
|
||||
|
||||
static const struct net_device_ops vti6_netdev_ops = {
|
||||
.ndo_init = vti6_dev_init,
|
||||
.ndo_uninit = vti6_dev_uninit,
|
||||
.ndo_start_xmit = vti6_tnl_xmit,
|
||||
.ndo_do_ioctl = vti6_ioctl,
|
||||
|
@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
|
|||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
||||
int err = vti6_dev_init_gen(dev);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
|
||||
vti6_link_config(t);
|
||||
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
|||
struct sit_net *sitn = net_generic(net, sit_net_id);
|
||||
int err;
|
||||
|
||||
err = ipip6_tunnel_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
ipip6_tunnel_clone_6rd(dev, sitn);
|
||||
memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
|
||||
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
|
||||
|
||||
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
|
||||
dev->priv_flags |= IFF_ISATAP;
|
||||
|
@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
|||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
strcpy(t->parms.name, dev->name);
|
||||
ipip6_tunnel_clone_6rd(dev, sitn);
|
||||
|
||||
dev->rtnl_link_ops = &sit_link_ops;
|
||||
|
||||
dev_hold(dev);
|
||||
|
@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
|||
}
|
||||
|
||||
static const struct net_device_ops ipip6_netdev_ops = {
|
||||
.ndo_init = ipip6_tunnel_init,
|
||||
.ndo_uninit = ipip6_tunnel_uninit,
|
||||
.ndo_start_xmit = sit_tunnel_xmit,
|
||||
.ndo_do_ioctl = ipip6_tunnel_ioctl,
|
||||
|
@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
|
|||
|
||||
tunnel->dev = dev;
|
||||
tunnel->net = dev_net(dev);
|
||||
|
||||
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
|
||||
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
|
||||
strcpy(tunnel->parms.name, dev->name);
|
||||
|
||||
ipip6_tunnel_bind_dev(dev);
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
|
@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
|||
|
||||
tunnel->dev = dev;
|
||||
tunnel->net = dev_net(dev);
|
||||
strcpy(tunnel->parms.name, dev->name);
|
||||
|
||||
iph->version = 4;
|
||||
iph->protocol = IPPROTO_IPV6;
|
||||
|
|
|
@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&csa_ie, 0, sizeof(csa_ie));
|
||||
err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
|
||||
err = ieee80211_parse_ch_switch_ie(sdata, elems,
|
||||
ifibss->chandef.chan->band,
|
||||
sta_flags, ifibss->bssid, &csa_ie);
|
||||
/* can't switch to destination channel, fail */
|
||||
|
|
|
@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
|
|||
* ieee80211_parse_ch_switch_ie - parses channel switch IEs
|
||||
* @sdata: the sdata of the interface which has received the frame
|
||||
* @elems: parsed 802.11 elements received with the frame
|
||||
* @beacon: indicates if the frame was a beacon or probe response
|
||||
* @current_band: indicates the current band
|
||||
* @sta_flags: contains information about own capabilities and restrictions
|
||||
* to decide which channel switch announcements can be accepted. Only the
|
||||
|
@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
|
|||
* Return: 0 on success, <0 on error and >0 if there is nothing to parse.
|
||||
*/
|
||||
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee802_11_elems *elems, bool beacon,
|
||||
struct ieee802_11_elems *elems,
|
||||
enum ieee80211_band current_band,
|
||||
u32 sta_flags, u8 *bssid,
|
||||
struct ieee80211_csa_ie *csa_ie);
|
||||
|
|
|
@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
int i, flushed;
|
||||
struct ps_data *ps;
|
||||
struct cfg80211_chan_def chandef;
|
||||
bool cancel_scan;
|
||||
|
||||
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
||||
|
||||
if (rcu_access_pointer(local->scan_sdata) == sdata)
|
||||
cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
|
||||
if (cancel_scan)
|
||||
ieee80211_scan_cancel(local);
|
||||
|
||||
/*
|
||||
|
@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
list_del(&sdata->u.vlan.list);
|
||||
mutex_unlock(&local->mtx);
|
||||
RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
|
||||
/* see comment in the default case below */
|
||||
ieee80211_free_keys(sdata, true);
|
||||
/* no need to tell driver */
|
||||
break;
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
|
@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
/*
|
||||
* When we get here, the interface is marked down.
|
||||
* Free the remaining keys, if there are any
|
||||
* (shouldn't be, except maybe in WDS mode?)
|
||||
* (which can happen in AP mode if userspace sets
|
||||
* keys before the interface is operating, and maybe
|
||||
* also in WDS mode)
|
||||
*
|
||||
* Force the key freeing to always synchronize_net()
|
||||
* to wait for the RX path in case it is using this
|
||||
* interface enqueuing frames * at this very time on
|
||||
* interface enqueuing frames at this very time on
|
||||
* another CPU.
|
||||
*/
|
||||
ieee80211_free_keys(sdata, true);
|
||||
|
||||
/* fall through */
|
||||
case NL80211_IFTYPE_AP:
|
||||
skb_queue_purge(&sdata->skb_queue);
|
||||
}
|
||||
|
||||
|
@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
ieee80211_recalc_ps(local, -1);
|
||||
|
||||
if (cancel_scan)
|
||||
flush_delayed_work(&local->scan_work);
|
||||
|
||||
if (local->open_count == 0) {
|
||||
ieee80211_stop_device(local);
|
||||
|
||||
|
|
|
@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&csa_ie, 0, sizeof(csa_ie));
|
||||
err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band,
|
||||
err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
|
||||
sta_flags, sdata->vif.addr,
|
||||
&csa_ie);
|
||||
if (err < 0)
|
||||
|
|
|
@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
current_band = cbss->channel->band;
|
||||
memset(&csa_ie, 0, sizeof(csa_ie));
|
||||
res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
|
||||
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
|
||||
ifmgd->flags,
|
||||
ifmgd->associated->bssid, &csa_ie);
|
||||
if (res < 0)
|
||||
|
@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
|
||||
else
|
||||
mod_timer(&ifmgd->chswitch_timer,
|
||||
TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
|
||||
TU_TO_EXP_TIME((csa_ie.count - 1) *
|
||||
cbss->beacon_interval));
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
|
@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
|||
sc = le16_to_cpu(hdr->seq_ctrl);
|
||||
frag = sc & IEEE80211_SCTL_FRAG;
|
||||
|
||||
if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
|
||||
is_multicast_ether_addr(hdr->addr1))) {
|
||||
/* not fragmented */
|
||||
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
|
||||
goto out;
|
||||
|
||||
if (is_multicast_ether_addr(hdr->addr1)) {
|
||||
rx->local->dot11MulticastReceivedFrameCount++;
|
||||
goto out;
|
||||
}
|
||||
|
||||
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
|
||||
|
||||
if (skb_linearize(rx->skb))
|
||||
|
@ -1775,9 +1778,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
|||
out:
|
||||
if (rx->sta)
|
||||
rx->sta->rx_packets++;
|
||||
if (is_multicast_ether_addr(hdr->addr1))
|
||||
rx->local->dot11MulticastReceivedFrameCount++;
|
||||
else
|
||||
ieee80211_led_rx(rx->local);
|
||||
return RX_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "wme.h"
|
||||
|
||||
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee802_11_elems *elems, bool beacon,
|
||||
struct ieee802_11_elems *elems,
|
||||
enum ieee80211_band current_band,
|
||||
u32 sta_flags, u8 *bssid,
|
||||
struct ieee80211_csa_ie *csa_ie)
|
||||
|
@ -91,18 +91,12 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!beacon && sec_chan_offs) {
|
||||
if (sec_chan_offs) {
|
||||
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
|
||||
} else if (beacon && ht_oper) {
|
||||
secondary_channel_offset =
|
||||
ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
|
||||
} else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
|
||||
/* If it's not a beacon, HT is enabled and the IE not present,
|
||||
* it's 20 MHz, 802.11-2012 8.5.2.6:
|
||||
* This element [the Secondary Channel Offset Element] is
|
||||
* present when switching to a 40 MHz channel. It may be
|
||||
* present when switching to a 20 MHz channel (in which
|
||||
* case the secondary channel offset is set to SCN).
|
||||
/* If the secondary channel offset IE is not present,
|
||||
* we can't know what's the post-CSA offset, so the
|
||||
* best we can do is use 20MHz.
|
||||
*/
|
||||
secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
|
||||
}
|
||||
|
|
|
@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
|
|||
return;
|
||||
|
||||
for (undo = 0; undo < group; undo++)
|
||||
if (test_bit(group, &groups))
|
||||
if (test_bit(undo, &groups))
|
||||
nlk->netlink_unbind(undo);
|
||||
}
|
||||
|
||||
|
@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
|
|||
netlink_insert(sk, net, nladdr->nl_pid) :
|
||||
netlink_autobind(sock);
|
||||
if (err) {
|
||||
netlink_unbind(nlk->ngroups - 1, groups, nlk);
|
||||
netlink_unbind(nlk->ngroups, groups, nlk);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
|
|||
nl_table[unit].module = module;
|
||||
if (cfg) {
|
||||
nl_table[unit].bind = cfg->bind;
|
||||
nl_table[unit].unbind = cfg->unbind;
|
||||
nl_table[unit].flags = cfg->flags;
|
||||
if (cfg->compare)
|
||||
nl_table[unit].compare = cfg->compare;
|
||||
|
|
|
@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
|
|||
list_add(&cur_key->key_list, sh_keys);
|
||||
|
||||
cur_key->key = key;
|
||||
sctp_auth_key_hold(key);
|
||||
|
||||
return 0;
|
||||
nomem:
|
||||
if (!replace)
|
||||
|
|
|
@ -2609,6 +2609,9 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
addr_param = param.v + sizeof(sctp_addip_param_t);
|
||||
|
||||
af = sctp_get_af_specific(param_type2af(param.p->type));
|
||||
if (af == NULL)
|
||||
break;
|
||||
|
||||
af->from_addr_param(&addr, addr_param,
|
||||
htons(asoc->peer.port), 0);
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
|
|||
struct tpacket2_hdr *header = ring;
|
||||
int count = 0;
|
||||
|
||||
while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) {
|
||||
while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
|
||||
count++;
|
||||
header = ring + (count * getpagesize());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user