Merge branch 'Add-PTP-support-for-Octeontx2'

Subbaraya Sundeep says:

====================
Add PTP support for Octeontx2

This patchset adds PTP support for Octeontx2 platform.
PTP is an independent coprocessor block from which
CGX block fetches timestamp and prepends it to the
packet before sending to NIX block. Patches are as
follows:

Patch 1: Patch to enable/disable packet timstamping
         in CGX upon mailbox request. It also adjusts
         packet parser (NPC) for the 8 bytes timestamp
         appearing before the packet.

Patch 2: Patch adding PTP pci driver which configures
         the PTP block and hooks up to RVU AF driver.
         It also exposes a mailbox call to adjust PTP
         hardware clock.

Patch 3: Patch adding PTP clock driver for PF netdev.

v8:
 Added missing header file reported by kernel test robot
 in patch 2
v7:
 As per Jesse Brandeburg comments:
 Simplified functions in patch 1
 Replaced magic numbers with macros
 Added Copyrights
 Added code comments wherever required
 Modified commit description of patch 2
v6:
 Resent after net-next is open
v5:
 As suggested by David separated the fix (adding rtnl lock/unlock)
 and submitted to net.
 https://www.spinics.net/lists/netdev/msg669617.html
v4:
 Added rtnl_lock/unlock in otx2_reset to protect against
 network stack ndo_open and close calls
 Added NULL check after ptp_clock_register in otx2_ptp.c
v3:
 Fixed sparse error in otx2_txrx.c
 Removed static inlines in otx2_txrx.c
v2:
 Fixed kernel build robot reported error by
 adding timecounter.h to otx2_common.h
====================

Acked-by: Richard Cochran <richardcochran@gmail.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-24 18:15:45 -07:00
commit 64d123fc25
20 changed files with 1031 additions and 10 deletions

View File

@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o

View File

@ -468,6 +468,35 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
}
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx)
return;
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
} else {
/* Disable inbound PTP stamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
}
}
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{

View File

@ -58,8 +58,10 @@
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
@ -139,4 +141,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
#endif /* CGX_H */

View File

@ -127,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@ -143,6 +144,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@ -213,6 +216,8 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
@ -858,4 +863,20 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
};
struct ptp_rsp {
struct mbox_msghdr hdr;
u64 clk;
};
#endif /* MBOX_H */

View File

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell PTP driver
*
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "ptp.h"
#include "mbox.h"
#include "rvu.h"
#define DRV_NAME "Marvell PTP Driver"
#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_PTP_BAR_NO 0
#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define RST_BOOT 0x1600ULL
#define RST_MUL_BITS GENMASK_ULL(38, 33)
#define CLOCK_BASE_RATE 50000000ULL
static u64 get_clock_rate(void)
{
u64 cfg, ret = CLOCK_BASE_RATE * 16;
struct pci_dev *pdev;
void __iomem *base;
/* To get the input clock frequency with which PTP co-processor
* block is running the base frequency(50 MHz) needs to be multiplied
* with multiplier bits present in RST_BOOT register of RESET block.
* Hence below code gets the multiplier bits from the RESET PCI
* device present in the system.
*/
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RST, NULL);
if (!pdev)
goto error;
base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
if (!base)
goto error_put_pdev;
cfg = readq(base + RST_BOOT);
ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
iounmap(base);
error_put_pdev:
pci_dev_put(pdev);
error:
return ret;
}
struct ptp *ptp_get(void)
{
struct pci_dev *pdev;
struct ptp *ptp;
/* If the PTP pci device is found on the system and ptp
* driver is bound to it then the PTP pci device is returned
* to the caller(rvu driver).
*/
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_PTP, NULL);
if (!pdev)
return ERR_PTR(-ENODEV);
ptp = pci_get_drvdata(pdev);
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
if (IS_ERR(ptp))
pci_dev_put(pdev);
return ptp;
}
void ptp_put(struct ptp *ptp)
{
if (!ptp)
return;
pci_dev_put(ptp->pdev);
}
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
u64 comp;
u64 adj;
s64 ppb;
if (scaled_ppm < 0) {
neg_adj = true;
scaled_ppm = -scaled_ppm;
}
/* The hardware adds the clock compensation value to the PTP clock
* on every coprocessor clock cycle. Typical convention is that it
* represent number of nanosecond betwen each cycle. In this
* convention compensation value is in 64 bit fixed-point
* representation where upper 32 bits are number of nanoseconds
* and lower is fractions of nanosecond.
* The scaled_ppm represent the ratio in "parts per million" by which
* the compensation value should be corrected.
* To calculate new compenstation value we use 64bit fixed point
* arithmetic on following formula
* comp = tbase + tbase * scaled_ppm / (1M * 2^16)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
adj = comp * ppb;
adj = div_u64(adj, 1000000000ull);
comp = neg_adj ? comp - adj : comp + adj;
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
}
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
*clk = readq(ptp->reg_base + PTP_CLOCK_HI);
return 0;
}
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
u64 clock_comp;
u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
err = -ENOMEM;
goto error;
}
ptp->pdev = pdev;
err = pcim_enable_device(pdev);
if (err)
goto error_free;
err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
if (err)
goto error_free;
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
ptp->clock_rate = get_clock_rate();
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
pci_set_drvdata(pdev, ptp);
return 0;
error_free:
devm_kfree(dev, ptp);
error:
/* For `ptp_get()` we need to differentiate between the case
* when the core has not tried to probe this device and the case when
* the probe failed. In the later case we pretend that the
* initialization was successful and keep the error in
* `dev->driver_data`.
*/
pci_set_drvdata(pdev, ERR_PTR(err));
return 0;
}
static void ptp_remove(struct pci_dev *pdev)
{
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
if (IS_ERR_OR_NULL(ptp))
return;
/* Disable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
}
static const struct pci_device_id ptp_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
{ 0, }
};
struct pci_driver ptp_driver = {
.name = DRV_NAME,
.id_table = ptp_id_table,
.probe = ptp_probe,
.remove = ptp_remove,
};
int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
struct ptp_rsp *rsp)
{
int err = 0;
/* This function is the PTP mailbox handler invoked when
* called by AF consumers/netdev drivers via mailbox mechanism.
* It is used by netdev driver to get the PTP clock and to set
* frequency adjustments. Since mailbox can be called without
* notion of whether the driver is bound to ptp device below
* validation is needed as first step.
*/
if (!rvu->ptp)
return -ENODEV;
switch (req->op) {
case PTP_OP_ADJFINE:
err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
break;
case PTP_OP_GET_CLOCK:
err = ptp_get_clock(rvu->ptp, &rsp->clk);
break;
default:
err = -EINVAL;
break;
}
return err;
}

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell PTP driver
*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef PTP_H
#define PTP_H
#include <linux/timecounter.h>
#include <linux/time64.h>
#include <linux/spinlock.h>
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
u32 clock_rate;
};
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
extern struct pci_driver ptp_driver;
#endif

View File

@ -18,6 +18,7 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@ -2565,13 +2566,21 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
rvu->ptp = ptp_get();
if (IS_ERR(rvu->ptp)) {
err = PTR_ERR(rvu->ptp);
if (err == -EPROBE_DEFER)
goto err_release_regions;
rvu->ptp = NULL;
}
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
goto err_release_regions;
goto err_put_ptp;
}
/* Store module params in rvu structure */
@ -2586,7 +2595,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
goto err_release_regions;
goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@ -2626,6 +2635,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
err_put_ptp:
ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@ -2651,6 +2662,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@ -2676,9 +2688,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
err = pci_register_driver(&ptp_driver);
if (err < 0)
goto ptp_err;
err = pci_register_driver(&rvu_driver);
if (err < 0)
pci_unregister_driver(&cgx_driver);
goto rvu_err;
return 0;
rvu_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
return err;
}
@ -2686,6 +2708,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}

View File

@ -289,6 +289,8 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
struct ptp;
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@ -337,6 +339,8 @@ struct rvu {
/* Firmware data */
struct rvu_fwdata *fwdata;
struct ptp *ptp;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@ -469,6 +473,7 @@ int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,

View File

@ -509,6 +509,45 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
void *cgxd;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
cgx_lmac_ptp_config(cgxd, lmac_id, enable);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
return -EINVAL;
return 0;
}
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
}
int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
}
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);

View File

@ -3319,6 +3319,49 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf);
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr;
int nixlf;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
if (enable)
cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
else
cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
return 0;
}
int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
}
int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
}
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)

View File

@ -27,6 +27,7 @@
#define NIXLF_PROMISC_ENTRY 2
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
#define NPC_HW_TSTAMP_OFFSET 8
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
@ -61,6 +62,36 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
{
int pkind, blkaddr;
u64 val;
pkind = rvu_npc_get_pkind(rvu, pf);
if (pkind < 0) {
dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
return -EINVAL;
}
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
return -EINVAL;
}
val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
/* If timestamp is enabled then configure NPC to shift 8 bytes */
if (enable)
val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
NPC_HW_TSTAMP_OFFSET);
rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
return 0;
}
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{

View File

@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_ptp.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af

View File

@ -671,6 +671,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
if (pfvf->ptp) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
return err;
}
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;

View File

@ -13,6 +13,9 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <mbox.h>
#include "otx2_reg.h"
@ -209,6 +212,17 @@ struct refill_work {
struct otx2_nic *pf;
};
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct otx2_nic *nic;
struct cyclecounter cycle_counter;
struct timecounter time_counter;
};
#define OTX2_HW_TIMESTAMP_LEN 8
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@ -216,6 +230,8 @@ struct otx2_nic {
u16 max_frs;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
@ -251,6 +267,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)

View File

@ -13,8 +13,10 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
@ -663,6 +665,31 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
static int otx2_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
if (!pfvf->ptp)
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = otx2_ptp_clock_index(pfvf);
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
return 0;
}
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@ -687,6 +714,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
};
void otx2_set_ethtool_ops(struct net_device *netdev)

View File

@ -21,6 +21,7 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@ -41,6 +42,9 @@ enum {
TYPE_PFVF,
};
static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@ -1281,7 +1285,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
OTX2_ETH_HLEN);
mutex_lock(&mbox->lock);
/* NPA init */
@ -1547,6 +1552,16 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
/* When reinitializing enable time stamping if it is enabled before */
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
otx2_config_hw_tx_tstamp(pf, true);
}
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
otx2_config_hw_rx_tstamp(pf, true);
}
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@ -1738,6 +1753,143 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
{
struct msg_req *req;
int err;
if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
return 0;
mutex_lock(&pfvf->mbox.lock);
if (enable)
req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
else
req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
mutex_unlock(&pfvf->mbox.lock);
return err;
}
mutex_unlock(&pfvf->mbox.lock);
if (enable)
pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
else
pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
return 0;
}
static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
{
struct msg_req *req;
int err;
if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
return 0;
mutex_lock(&pfvf->mbox.lock);
if (enable)
req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
else
req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
mutex_unlock(&pfvf->mbox.lock);
return err;
}
mutex_unlock(&pfvf->mbox.lock);
if (enable)
pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
else
pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
return 0;
}
static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
if (!pfvf->ptp)
return -ENODEV;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
otx2_config_hw_tx_tstamp(pfvf, false);
break;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
otx2_config_hw_rx_tstamp(pfvf, false);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
otx2_config_hw_rx_tstamp(pfvf, true);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
memcpy(&pfvf->tstamp, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
switch (cmd) {
case SIOCSHWTSTAMP:
return otx2_config_hwtstamp(netdev, req);
case SIOCGHWTSTAMP:
return copy_to_user(req->ifr_data, cfg,
sizeof(*cfg)) ? -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@ -1748,6 +1900,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *pf)
@ -1920,6 +2073,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
/* Don't check for error. Proceed without ptp */
otx2_ptp_init(pf);
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@ -1952,7 +2108,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_detach_rsrc;
goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
@ -1972,6 +2128,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@ -2113,6 +2271,11 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@ -2122,6 +2285,7 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
otx2_ptp_destroy(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);

View File

@ -0,0 +1,212 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 PTP support for ethernet driver
*
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "otx2_common.h"
#include "otx2_ptp.h"
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct ptp_req *req;
int err;
if (!ptp->nic)
return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
return -ENOMEM;
req->op = PTP_OP_ADJFINE;
req->scaled_ppm = scaled_ppm;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
return err;
return 0;
}
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
struct ptp_rsp *rsp;
int err;
if (!ptp->nic)
return 0;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
return 0;
req->op = PTP_OP_GET_CLOCK;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
return 0;
rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
&req->hdr);
if (IS_ERR(rsp))
return 0;
return rsp->clk;
}
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
mutex_lock(&pfvf->mbox.lock);
timecounter_adjtime(&ptp->time_counter, delta);
mutex_unlock(&pfvf->mbox.lock);
return 0;
}
static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
u64 nsec;
mutex_lock(&pfvf->mbox.lock);
nsec = timecounter_read(&ptp->time_counter);
mutex_unlock(&pfvf->mbox.lock);
*ts = ns_to_timespec64(nsec);
return 0;
}
static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
const struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
u64 nsec;
nsec = timespec64_to_ns(ts);
mutex_lock(&pfvf->mbox.lock);
timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
mutex_unlock(&pfvf->mbox.lock);
return 0;
}
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
int otx2_ptp_init(struct otx2_nic *pfvf)
{
struct otx2_ptp *ptp_ptr;
struct cyclecounter *cc;
struct ptp_req *req;
int err;
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->op = PTP_OP_GET_CLOCK;
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
mutex_unlock(&pfvf->mbox.lock);
return err;
}
mutex_unlock(&pfvf->mbox.lock);
ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
if (!ptp_ptr) {
err = -ENOMEM;
goto error;
}
ptp_ptr->nic = pfvf;
cc = &ptp_ptr->cycle_counter;
cc->read = ptp_cc_read;
cc->mask = CYCLECOUNTER_MASK(64);
cc->mult = 1;
cc->shift = 0;
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
};
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
kfree(ptp_ptr);
goto error;
}
pfvf->ptp = ptp_ptr;
error:
return err;
}
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
struct otx2_ptp *ptp = pfvf->ptp;
if (!ptp)
return;
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
}
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
if (!pfvf->ptp)
return -ENODEV;
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
{
if (!pfvf->ptp)
return -ENODEV;
*tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
return 0;
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 PTP support for ethernet driver */
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
int otx2_ptp_clock_index(struct otx2_nic *pfvf);
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
#endif

View File

@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@ -81,8 +82,11 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
u64 timestamp, tsns;
struct sg_list *sg;
int err;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@ -94,6 +98,18 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
ts.hwtstamp = ns_to_ktime(tsns);
skb_tstamp_tx(skb, &ts);
}
}
}
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@ -101,16 +117,47 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
u64 tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
/* The first 8 bytes is the timestamp */
err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
if (err)
return;
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
u64 iova, int len)
u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
if (likely(!skb_shinfo(skb)->nr_frags)) {
/* Check if data starts at some nonzero offset
* from the start of the buffer. For now the
* only possible offset is 8 bytes in the case
* where packet is prepended by a timestamp.
*/
if (parse->laptr) {
otx2_set_rxtstamp(pfvf, skb, va);
off = OTX2_HW_TIMESTAMP_LEN;
}
}
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page), len, pfvf->rbsize);
va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
@ -239,7 +286,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@ -482,10 +529,27 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
}
} else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
ext->tstmp = 1;
}
*offset += sizeof(*ext);
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
int alg, u64 iova)
{
struct nix_sqe_mem_s *mem;
mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
mem->subdc = NIX_SUBDC_MEM;
mem->alg = alg;
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
*offset += sizeof(*mem);
}
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@ -736,6 +800,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
u64 iova;
if (!skb_shinfo(skb)->gso_size &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
} else {
skb_tx_timestamp(skb);
}
}
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@ -789,6 +868,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
otx2_set_txtstamp(pfvf, skb, sq, &offset);
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);

View File

@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;