forked from luck/tmp_suning_uos_patched
dpaa2-eth: add XDP_REDIRECT support
Implement support for the XDP_REDIRECT action. The redirected frame is transmitted and confirmed on the regular Tx/Tx conf queues. Frame is marked with the "XDP" type in the software annotation, since it requires special treatment. We don't have good hardware support for TX batching, so the XDP_XMIT_FLUSH flag doesn't make a difference for now; ndo_xdp_xmit performs the actual Tx operation on the spot. Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com> Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e3fdf6ba09
commit
d678be1dc1
|
@ -296,6 +296,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
|
|||
xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
|
||||
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.rxq = &ch->xdp_rxq;
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
|
@ -328,8 +329,20 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
|
|||
xdp_release_buf(priv, ch, addr);
|
||||
ch->stats.xdp_drop++;
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
dma_unmap_page(priv->net_dev->dev.parent, addr,
|
||||
DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
|
||||
ch->buf_count--;
|
||||
xdp.data_hard_start = vaddr;
|
||||
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
|
||||
if (unlikely(err))
|
||||
ch->stats.xdp_drop++;
|
||||
else
|
||||
ch->stats.xdp_redirect++;
|
||||
break;
|
||||
}
|
||||
|
||||
ch->xdp.res |= xdp_act;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return xdp_act;
|
||||
|
@ -657,27 +670,35 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
|
|||
* dpaa2_eth_tx().
|
||||
*/
|
||||
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
const struct dpaa2_fd *fd, bool in_napi)
|
||||
{
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
dma_addr_t fd_addr;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned char *buffer_start;
|
||||
struct dpaa2_eth_swa *swa;
|
||||
u8 fd_format = dpaa2_fd_get_format(fd);
|
||||
u32 fd_len = dpaa2_fd_get_len(fd);
|
||||
|
||||
fd_addr = dpaa2_fd_get_addr(fd);
|
||||
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
|
||||
swa = (struct dpaa2_eth_swa *)buffer_start;
|
||||
|
||||
if (fd_format == dpaa2_fd_single) {
|
||||
skb = swa->single.skb;
|
||||
/* Accessing the skb buffer is safe before dma unmap, because
|
||||
* we didn't map the actual skb shell.
|
||||
*/
|
||||
dma_unmap_single(dev, fd_addr,
|
||||
skb_tail_pointer(skb) - buffer_start,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (swa->type == DPAA2_ETH_SWA_SINGLE) {
|
||||
skb = swa->single.skb;
|
||||
/* Accessing the skb buffer is safe before dma unmap,
|
||||
* because we didn't map the actual skb shell.
|
||||
*/
|
||||
dma_unmap_single(dev, fd_addr,
|
||||
skb_tail_pointer(skb) - buffer_start,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
|
||||
dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
} else if (fd_format == dpaa2_fd_sg) {
|
||||
skb = swa->sg.skb;
|
||||
|
||||
|
@ -694,6 +715,16 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
|
|||
return;
|
||||
}
|
||||
|
||||
if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
|
||||
fq->dq_frames++;
|
||||
fq->dq_bytes += fd_len;
|
||||
}
|
||||
|
||||
if (swa->type == DPAA2_ETH_SWA_XDP) {
|
||||
xdp_return_frame(swa->xdp.xdpf);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the timestamp value */
|
||||
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
|
@ -793,7 +824,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
|
|||
if (unlikely(err < 0)) {
|
||||
percpu_stats->tx_errors++;
|
||||
/* Clean up everything, including freeing the skb */
|
||||
free_tx_fd(priv, &fd, false);
|
||||
free_tx_fd(priv, fq, &fd, false);
|
||||
} else {
|
||||
fd_len = dpaa2_fd_get_len(&fd);
|
||||
percpu_stats->tx_packets++;
|
||||
|
@ -830,12 +861,9 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
|
|||
percpu_extras->tx_conf_frames++;
|
||||
percpu_extras->tx_conf_bytes += fd_len;
|
||||
|
||||
fq->dq_frames++;
|
||||
fq->dq_bytes += fd_len;
|
||||
|
||||
/* Check frame errors in the FD field */
|
||||
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
|
||||
free_tx_fd(priv, fd, true);
|
||||
free_tx_fd(priv, fq, fd, true);
|
||||
|
||||
if (likely(!fd_errors))
|
||||
return;
|
||||
|
@ -1083,6 +1111,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
int err;
|
||||
|
||||
ch = container_of(napi, struct dpaa2_eth_channel, napi);
|
||||
ch->xdp.res = 0;
|
||||
priv = ch->priv;
|
||||
|
||||
do {
|
||||
|
@ -1128,7 +1157,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
work_done = max(rx_cleaned, 1);
|
||||
|
||||
out:
|
||||
if (txc_fq) {
|
||||
if (txc_fq && txc_fq->dq_frames) {
|
||||
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
|
||||
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
|
||||
txc_fq->dq_bytes);
|
||||
|
@ -1136,6 +1165,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
txc_fq->dq_bytes = 0;
|
||||
}
|
||||
|
||||
if (ch->xdp.res & XDP_REDIRECT)
|
||||
xdp_do_flush_map();
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -1730,6 +1762,105 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
|
||||
struct xdp_frame *xdpf)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
|
||||
struct device *dev = net_dev->dev.parent;
|
||||
struct rtnl_link_stats64 *percpu_stats;
|
||||
struct dpaa2_eth_drv_stats *percpu_extras;
|
||||
unsigned int needed_headroom;
|
||||
struct dpaa2_eth_swa *swa;
|
||||
struct dpaa2_eth_fq *fq;
|
||||
struct dpaa2_fd fd;
|
||||
void *buffer_start, *aligned_start;
|
||||
dma_addr_t addr;
|
||||
int err, i;
|
||||
|
||||
/* We require a minimum headroom to be able to transmit the frame.
|
||||
* Otherwise return an error and let the original net_device handle it
|
||||
*/
|
||||
needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
|
||||
if (xdpf->headroom < needed_headroom)
|
||||
return -EINVAL;
|
||||
|
||||
percpu_stats = this_cpu_ptr(priv->percpu_stats);
|
||||
percpu_extras = this_cpu_ptr(priv->percpu_extras);
|
||||
|
||||
/* Setup the FD fields */
|
||||
memset(&fd, 0, sizeof(fd));
|
||||
|
||||
/* Align FD address, if possible */
|
||||
buffer_start = xdpf->data - needed_headroom;
|
||||
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
|
||||
DPAA2_ETH_TX_BUF_ALIGN);
|
||||
if (aligned_start >= xdpf->data - xdpf->headroom)
|
||||
buffer_start = aligned_start;
|
||||
|
||||
swa = (struct dpaa2_eth_swa *)buffer_start;
|
||||
/* fill in necessary fields here */
|
||||
swa->type = DPAA2_ETH_SWA_XDP;
|
||||
swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
|
||||
swa->xdp.xdpf = xdpf;
|
||||
|
||||
addr = dma_map_single(dev, buffer_start,
|
||||
swa->xdp.dma_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
||||
percpu_stats->tx_dropped++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dpaa2_fd_set_addr(&fd, addr);
|
||||
dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
|
||||
dpaa2_fd_set_len(&fd, xdpf->len);
|
||||
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
|
||||
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
|
||||
|
||||
fq = &priv->fq[smp_processor_id()];
|
||||
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
|
||||
err = priv->enqueue(priv, fq, &fd, 0);
|
||||
if (err != -EBUSY)
|
||||
break;
|
||||
}
|
||||
percpu_extras->tx_portal_busy += i;
|
||||
if (unlikely(err < 0)) {
|
||||
percpu_stats->tx_errors++;
|
||||
/* let the Rx device handle the cleanup */
|
||||
return err;
|
||||
}
|
||||
|
||||
percpu_stats->tx_packets++;
|
||||
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
|
||||
struct xdp_frame **frames, u32 flags)
|
||||
{
|
||||
int drops = 0;
|
||||
int i, err;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
if (!netif_running(net_dev))
|
||||
return -ENETDOWN;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
|
||||
err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
|
||||
if (err) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
}
|
||||
|
||||
static const struct net_device_ops dpaa2_eth_ops = {
|
||||
.ndo_open = dpaa2_eth_open,
|
||||
.ndo_start_xmit = dpaa2_eth_tx,
|
||||
|
@ -1741,6 +1872,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
|
|||
.ndo_do_ioctl = dpaa2_eth_ioctl,
|
||||
.ndo_change_mtu = dpaa2_eth_change_mtu,
|
||||
.ndo_bpf = dpaa2_eth_xdp,
|
||||
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
|
||||
};
|
||||
|
||||
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
|
||||
|
@ -2353,6 +2485,21 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* xdp_rxq setup */
|
||||
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
|
||||
fq->flowid);
|
||||
if (err) {
|
||||
dev_err(dev, "xdp_rxq_info_reg failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
|
||||
MEM_TYPE_PAGE_ORDER0, NULL);
|
||||
if (err) {
|
||||
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@
|
|||
enum dpaa2_eth_swa_type {
|
||||
DPAA2_ETH_SWA_SINGLE,
|
||||
DPAA2_ETH_SWA_SG,
|
||||
DPAA2_ETH_SWA_XDP,
|
||||
};
|
||||
|
||||
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
|
||||
|
@ -110,6 +111,10 @@ struct dpaa2_eth_swa {
|
|||
int num_sg;
|
||||
int sgt_size;
|
||||
} sg;
|
||||
struct {
|
||||
int dma_size;
|
||||
struct xdp_frame *xdpf;
|
||||
} xdp;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -273,6 +278,7 @@ struct dpaa2_eth_ch_stats {
|
|||
__u64 xdp_drop;
|
||||
__u64 xdp_tx;
|
||||
__u64 xdp_tx_err;
|
||||
__u64 xdp_redirect;
|
||||
};
|
||||
|
||||
/* Maximum number of queues associated with a DPNI */
|
||||
|
@ -312,6 +318,7 @@ struct dpaa2_eth_ch_xdp {
|
|||
struct bpf_prog *prog;
|
||||
u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
|
||||
int drop_cnt;
|
||||
unsigned int res;
|
||||
};
|
||||
|
||||
struct dpaa2_eth_channel {
|
||||
|
@ -326,6 +333,7 @@ struct dpaa2_eth_channel {
|
|||
int buf_count;
|
||||
struct dpaa2_eth_ch_stats stats;
|
||||
struct dpaa2_eth_ch_xdp xdp;
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
};
|
||||
|
||||
struct dpaa2_eth_dist_fields {
|
||||
|
@ -446,6 +454,12 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
|
|||
{
|
||||
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
|
||||
|
||||
/* If we don't have an skb (e.g. XDP buffer), we only need space for
|
||||
* the software annotation area
|
||||
*/
|
||||
if (!skb)
|
||||
return headroom;
|
||||
|
||||
/* For non-linear skbs we have no headroom requirement, as we build a
|
||||
* SG frame with a newly allocated SGT buffer
|
||||
*/
|
||||
|
|
|
@ -48,6 +48,7 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
|
|||
"[drv] xdp drop",
|
||||
"[drv] xdp tx",
|
||||
"[drv] xdp tx errors",
|
||||
"[drv] xdp redirect",
|
||||
/* FQ stats */
|
||||
"[qbman] rx pending frames",
|
||||
"[qbman] rx pending bytes",
|
||||
|
|
Loading…
Reference in New Issue
Block a user