forked from luck/tmp_suning_uos_patched
net: Add generic ndo_select_queue functions
This patch adds a generic version of the ndo_select_queue functions for either returning 0 or selecting a queue based on the processor ID. This is generally meant to just reduce the number of functions we have to change in the future when we have to deal with ndo_select_queue changes. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
eadec877ce
commit
a4ea8a3dac
|
@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16
|
|
||||||
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
||||||
void *accel_priv, select_queue_fallback_t fallback)
|
|
||||||
{
|
|
||||||
/* we are currently only using the first queue */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
ltq_etop_init(struct net_device *dev)
|
ltq_etop_init(struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
|
||||||
.ndo_set_mac_address = ltq_etop_set_mac_address,
|
.ndo_set_mac_address = ltq_etop_set_mac_address,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
|
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
|
||||||
.ndo_select_queue = ltq_etop_select_queue,
|
.ndo_select_queue = dev_pick_tx_zero,
|
||||||
.ndo_init = ltq_etop_init,
|
.ndo_init = ltq_etop_init,
|
||||||
.ndo_tx_timeout = ltq_etop_tx_timeout,
|
.ndo_tx_timeout = ltq_etop_tx_timeout,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
||||||
void *accel_priv,
|
|
||||||
select_queue_fallback_t fallback)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||||
void *type_data)
|
void *type_data)
|
||||||
{
|
{
|
||||||
|
@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
|
||||||
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
|
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
|
||||||
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
|
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
|
||||||
.ndo_tx_timeout = netcp_ndo_tx_timeout,
|
.ndo_tx_timeout = netcp_ndo_tx_timeout,
|
||||||
.ndo_select_queue = netcp_select_queue,
|
.ndo_select_queue = dev_pick_tx_zero,
|
||||||
.ndo_setup_tc = netcp_setup_tc,
|
.ndo_setup_tc = netcp_setup_tc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|
||||||
void *accel_priv,
|
|
||||||
select_queue_fallback_t fallback)
|
|
||||||
{
|
|
||||||
return (u16)smp_processor_id();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void xlr_hw_set_mac_addr(struct net_device *ndev)
|
static void xlr_hw_set_mac_addr(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct xlr_net_priv *priv = netdev_priv(ndev);
|
struct xlr_net_priv *priv = netdev_priv(ndev);
|
||||||
|
@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = {
|
||||||
.ndo_open = xlr_net_open,
|
.ndo_open = xlr_net_open,
|
||||||
.ndo_stop = xlr_net_stop,
|
.ndo_stop = xlr_net_stop,
|
||||||
.ndo_start_xmit = xlr_net_start_xmit,
|
.ndo_start_xmit = xlr_net_start_xmit,
|
||||||
.ndo_select_queue = xlr_net_select_queue,
|
.ndo_select_queue = dev_pick_tx_cpu_id,
|
||||||
.ndo_set_mac_address = xlr_net_set_mac_addr,
|
.ndo_set_mac_address = xlr_net_set_mac_addr,
|
||||||
.ndo_set_rx_mode = xlr_set_rx_mode,
|
.ndo_set_rx_mode = xlr_set_rx_mode,
|
||||||
.ndo_get_stats64 = xlr_stats,
|
.ndo_get_stats64 = xlr_stats,
|
||||||
|
|
|
@ -2567,6 +2567,10 @@ void dev_close(struct net_device *dev);
|
||||||
void dev_close_many(struct list_head *head, bool unlink);
|
void dev_close_many(struct list_head *head, bool unlink);
|
||||||
void dev_disable_lro(struct net_device *dev);
|
void dev_disable_lro(struct net_device *dev);
|
||||||
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
|
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
|
||||||
|
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
void *accel_priv, select_queue_fallback_t fallback);
|
||||||
|
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
void *accel_priv, select_queue_fallback_t fallback);
|
||||||
int dev_queue_xmit(struct sk_buff *skb);
|
int dev_queue_xmit(struct sk_buff *skb);
|
||||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||||
|
|
|
@ -3617,6 +3617,20 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
void *accel_priv, select_queue_fallback_t fallback)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dev_pick_tx_zero);
|
||||||
|
|
||||||
|
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
void *accel_priv, select_queue_fallback_t fallback)
|
||||||
|
{
|
||||||
|
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
||||||
|
|
||||||
static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||||
struct net_device *sb_dev)
|
struct net_device *sb_dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -277,7 +277,7 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
|
||||||
|
|
||||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
|
return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user