forked from luck/tmp_suning_uos_patched
net: Restrict receive packets queuing to housekeeping CPUs
With the existing implementation of store_rps_map(), packets are queued in the receive path on the backlog queues of other CPUs irrespective of whether they are isolated or not. This could add a latency overhead to any RT workload that is running on the same CPU. Ensure that store_rps_map() only uses available housekeeping CPUs for storing the rps_map. Signed-off-by: Alex Belits <abelits@marvell.com> Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200625223443.2684-4-nitesh@redhat.com
This commit is contained in:
parent
69a18b1869
commit
07bbecb341
|
@ -11,6 +11,7 @@
|
|||
#include <linux/if_arp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/net_namespace.h>
|
||||
|
@ -741,7 +742,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|||
{
|
||||
struct rps_map *old_map, *map;
|
||||
cpumask_var_t mask;
|
||||
int err, cpu, i;
|
||||
int err, cpu, i, hk_flags;
|
||||
static DEFINE_MUTEX(rps_map_mutex);
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
|
@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|||
return err;
|
||||
}
|
||||
|
||||
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||
cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
|
||||
if (cpumask_empty(mask)) {
|
||||
free_cpumask_var(mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = kzalloc(max_t(unsigned int,
|
||||
RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
|
||||
GFP_KERNEL);
|
||||
|
|
Loading…
Reference in New Issue
Block a user