Skip to content

Commit

Permalink
octeontx2-af: Modify SMQ flush sequence to drop packets
Browse files Browse the repository at this point in the history
[ Upstream commit 019aba0 ]

The current implementation of SMQ flush sequence waits for the packets
in the TM pipeline to be transmitted out of the link. This sequence
doesn't succeed in HW when there is any issue with link such as lack of
link credits, link down or any other traffic that is fully occupying the
link bandwidth (QoS). This patch modifies the SMQ flush sequence to
drop the packets after TL1 level (SQM) instead of polling for the packets
to be sent out of RPM/CGX link.

Fixes: 5d9b976 ("octeontx2-af: Support fixed transmit scheduler topology")
Signed-off-by: Naveen Mamindlapalli <[email protected]>
Reviewed-by: Sunil Kovvuri Goutham <[email protected]>
Link: https://patch.msgid.link/[email protected]
Signed-off-by: Paolo Abeni <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
  • Loading branch information
Naveen Mamindlapalli authored and gregkh committed Sep 18, 2024
1 parent aca06c6 commit 57db476
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 14 deletions.
3 changes: 1 addition & 2 deletions drivers/net/ethernet/marvell/octeontx2/af/rvu.h
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,7 @@ struct nix_mark_format {

/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
Expand All @@ -327,8 +328,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
u16 tl1_schq;
u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};

Expand Down
59 changes: 47 additions & 12 deletions drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
Original file line number Diff line number Diff line change
Expand Up @@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) {
smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0;
parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq);
Expand Down Expand Up @@ -2301,25 +2300,26 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int tl2, tl2_schq;
u64 regoff;
int tl2;

nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;

/* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */
if (tl2 == smq_flush_ctx->tl2_schq)
if (tl2 == tl2_schq)
continue;
/* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue;
/* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
(TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
(TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK)))
continue;
/* enable/disable XOFF */
Expand Down Expand Up @@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
struct nix_smq_flush_ctx *smq_flush_ctx;
int err, restore_tx_en = 0, i;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
int err, restore_tx_en = 0;
u64 cfg;
u16 tl2_tl3_link_schq;
u8 link, link_level;
u64 cfg, bmap = 0;

if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */
Expand All @@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);

cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);

/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);

link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;

/* SMQ set enqueue xoff */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
cfg |= BIT_ULL(50);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);

/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
if (!(cfg & BIT_ULL(12)))
continue;
bmap |= (1 << i);
cfg &= ~BIT_ULL(12);
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
}

/* Do SMQ flush and set enqueue xoff */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);

/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
Expand All @@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq);

/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
if (!(bmap & (1 << i)))
continue;
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
cfg |= BIT_ULL(12);
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
}

/* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
Expand Down

0 comments on commit 57db476

Please sign in to comment.