i40e: protect ring accesses with READ- and WRITE_ONCE

READ_ONCE should be used when reading rings prior to accessing the
statistics pointer. Introduce this as well as the corresponding WRITE_ONCE
usage when allocating and freeing the rings, to ensure protected access.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Ciara Loftus 2020-06-09 13:19:44 +00:00 committed by Jeff Kirsher
parent f140ad9fe2
commit d59e267912

View File

@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
i40e_get_netdev_stats_struct_tx(ring, stats); i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) { if (i40e_enabled_xdp_vsi(vsi)) {
ring++; ring = READ_ONCE(vsi->xdp_rings[i]);
if (!ring)
continue;
i40e_get_netdev_stats_struct_tx(ring, stats); i40e_get_netdev_stats_struct_tx(ring, stats);
} }
ring++; ring = READ_ONCE(vsi->rx_rings[i]);
if (!ring)
continue;
do { do {
start = u64_stats_fetch_begin_irq(&ring->syncp); start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets; packets = ring->stats.packets;
@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) { for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */ /* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]); p = READ_ONCE(vsi->tx_rings[q]);
if (!p)
continue;
do { do {
start = u64_stats_fetch_begin_irq(&p->syncp); start = u64_stats_fetch_begin_irq(&p->syncp);
@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_linearize += p->tx_stats.tx_linearize; tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb; tx_force_wb += p->tx_stats.tx_force_wb;
/* Rx queue is part of the same block as Tx queue */ /* locate Rx ring */
p = &p[1]; p = READ_ONCE(vsi->rx_rings[q]);
if (!p)
continue;
do { do {
start = u64_stats_fetch_begin_irq(&p->syncp); start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets; packets = p->stats.packets;
@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
if (vsi->tx_rings && vsi->tx_rings[0]) { if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) { for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu); kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL; WRITE_ONCE(vsi->tx_rings[i], NULL);
vsi->rx_rings[i] = NULL; WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings) if (vsi->xdp_rings)
vsi->xdp_rings[i] = NULL; WRITE_ONCE(vsi->xdp_rings[i], NULL);
} }
} }
} }
@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default; ring->itr_setting = pf->tx_itr_default;
vsi->tx_rings[i] = ring++; WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi)) if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx; goto setup_rx;
@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring); set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default; ring->itr_setting = pf->tx_itr_default;
vsi->xdp_rings[i] = ring++; WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx: setup_rx:
ring->queue_index = i; ring->queue_index = i;
@ -10892,7 +10901,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->size = 0; ring->size = 0;
ring->dcb_tc = 0; ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default; ring->itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = ring; WRITE_ONCE(vsi->rx_rings[i], ring);
} }
return 0; return 0;