Skip to content

Commit

Permalink
net: mana: Refactor RX buffer allocation code to prepare for various MTU
Browse files Browse the repository at this point in the history
Move out common buffer allocation code from mana_process_rx_cqe() and
mana_alloc_rx_wqe() to helper functions.
Refactor related variables so they can be changed in one place, and buffer
sizes are in sync.

Signed-off-by: Haiyang Zhang <[email protected]>
Reviewed-by: Jesse Brandeburg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
haiyangz authored and davem330 committed Apr 14, 2023
1 parent ce518bc commit a2917b2
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 69 deletions.
154 changes: 90 additions & 64 deletions drivers/net/ethernet/microsoft/mana/mana_en.c
Original file line number Diff line number Diff line change
Expand Up @@ -1282,14 +1282,64 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
u64_stats_update_end(&rx_stats->syncp);

drop:
WARN_ON_ONCE(rxq->xdp_save_page);
rxq->xdp_save_page = virt_to_page(buf_va);
WARN_ON_ONCE(rxq->xdp_save_va);
/* Save for reuse */
rxq->xdp_save_va = buf_va;

++ndev->stats.rx_dropped;

return;
}

static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
dma_addr_t *da, bool is_napi)
{
struct page *page;
void *va;

/* Reuse XDP dropped page if available */
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
rxq->xdp_save_va = NULL;
} else {
page = dev_alloc_page();
if (!page)
return NULL;

va = page_to_virt(page);
}

*da = dma_map_single(dev, va + XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);

if (dma_mapping_error(dev, *da)) {
put_page(virt_to_head_page(va));
return NULL;
}

return va;
}

/* Allocate frag for rx buffer, and save the old buf */
static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq,
struct mana_recv_buf_oob *rxoob, void **old_buf)
{
dma_addr_t da;
void *va;

va = mana_get_rxfrag(rxq, dev, &da, true);

if (!va)
return;

dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;

rxoob->buf_va = va;
rxoob->sgl[0].address = da;
}

static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_comp *cqe)
{
Expand All @@ -1299,10 +1349,8 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct mana_recv_buf_oob *rxbuf_oob;
struct mana_port_context *apc;
struct device *dev = gc->dev;
void *new_buf, *old_buf;
struct page *new_page;
void *old_buf = NULL;
u32 curr, pktlen;
dma_addr_t da;

apc = netdev_priv(ndev);

Expand Down Expand Up @@ -1345,40 +1393,11 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);

/* Reuse XDP dropped page if available */
if (rxq->xdp_save_page) {
new_page = rxq->xdp_save_page;
rxq->xdp_save_page = NULL;
} else {
new_page = alloc_page(GFP_ATOMIC);
}

if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);

if (dma_mapping_error(dev, da)) {
__free_page(new_page);
new_page = NULL;
}
}

new_buf = new_page ? page_to_virt(new_page) : NULL;

if (new_buf) {
dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
DMA_FROM_DEVICE);

old_buf = rxbuf_oob->buf_va;

/* refresh the rxbuf_oob with the new page */
rxbuf_oob->buf_va = new_buf;
rxbuf_oob->buf_dma_addr = da;
rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
} else {
old_buf = NULL; /* drop the packet if no memory */
}
mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf);

/* Unsuccessful refill will have old_buf == NULL.
* In this case, mana_rx_skb() will drop the packet.
*/
mana_rx_skb(old_buf, oob, rxq);

drop:
Expand Down Expand Up @@ -1659,19 +1678,19 @@ static void mana_destroy_rxq(struct mana_port_context *apc,

mana_deinit_cq(apc, &rxq->rx_cq);

if (rxq->xdp_save_page)
__free_page(rxq->xdp_save_page);
if (rxq->xdp_save_va)
put_page(virt_to_head_page(rxq->xdp_save_va));

for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];

if (!rx_oob->buf_va)
continue;

dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
DMA_FROM_DEVICE);
dma_unmap_single(dev, rx_oob->sgl[0].address,
rx_oob->sgl[0].size, DMA_FROM_DEVICE);

free_page((unsigned long)rx_oob->buf_va);
put_page(virt_to_head_page(rx_oob->buf_va));
rx_oob->buf_va = NULL;
}

Expand All @@ -1681,6 +1700,26 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
kfree(rxq);
}

static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
struct mana_rxq *rxq, struct device *dev)
{
dma_addr_t da;
void *va;

va = mana_get_rxfrag(rxq, dev, &da, false);

if (!va)
return -ENOMEM;

rx_oob->buf_va = va;

rx_oob->sgl[0].address = da;
rx_oob->sgl[0].size = rxq->datasize;
rx_oob->sgl[0].mem_key = mem_key;

return 0;
}

#define MANA_WQE_HEADER_SIZE 16
#define MANA_WQE_SGE_SIZE 16

Expand All @@ -1690,9 +1729,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct mana_recv_buf_oob *rx_oob;
struct device *dev = gc->dev;
struct page *page;
dma_addr_t da;
u32 buf_idx;
int ret;

WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);

Expand All @@ -1703,25 +1741,12 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
rx_oob = &rxq->rx_oobs[buf_idx];
memset(rx_oob, 0, sizeof(*rx_oob));

page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;

da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);

if (dma_mapping_error(dev, da)) {
__free_page(page);
return -ENOMEM;
}

rx_oob->buf_va = page_to_virt(page);
rx_oob->buf_dma_addr = da;

rx_oob->num_sge = 1;
rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
rx_oob->sgl[0].size = rxq->datasize;
rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;

ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
dev);
if (ret)
return ret;

rx_oob->wqe_req.sgl = rx_oob->sgl;
rx_oob->wqe_req.num_sge = rx_oob->num_sge;
Expand Down Expand Up @@ -1780,9 +1805,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
rxq->rxobj = INVALID_MANA_HANDLE;

rxq->datasize = ALIGN(ETH_FRAME_LEN, 64);

err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;
Expand Down
6 changes: 1 addition & 5 deletions include/net/mana/mana.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,6 @@ enum TRI_STATE {

#define COMP_ENTRY_SIZE 64

#define ADAPTER_MTU_SIZE 1500
#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)

#define RX_BUFFERS_PER_QUEUE 512

#define MAX_SEND_BUFFERS_PER_QUEUE 256
Expand Down Expand Up @@ -282,7 +279,6 @@ struct mana_recv_buf_oob {
struct gdma_wqe_request wqe_req;

void *buf_va;
dma_addr_t buf_dma_addr;

/* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge;
Expand Down Expand Up @@ -322,7 +318,7 @@ struct mana_rxq {

struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
struct page *xdp_save_page;
void *xdp_save_va; /* for reusing */
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */

Expand Down

0 comments on commit a2917b2

Please sign in to comment.