Skip to content

Commit

Permalink
virtio_net: introduce receive_small_xdp()
Browse files Browse the repository at this point in the history
The purpose of this patch is to simplify the receive_small().
Separate all the logic of XDP of small into a function.

Signed-off-by: Xuan Zhuo <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
fengidri authored and kuba-moo committed May 10, 2023
1 parent 59ba3b1 commit c5f3e72
Showing 1 changed file with 100 additions and 65 deletions.
165 changes: 100 additions & 65 deletions drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -931,6 +931,99 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
return NULL;
}

static struct sk_buff *receive_small_xdp(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
struct bpf_prog *xdp_prog,
void *buf,
unsigned int xdp_headroom,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
unsigned int headroom = vi->hdr_len + header_offset;
struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
struct page *page = virt_to_head_page(buf);
struct page *xdp_page;
unsigned int buflen;
struct xdp_buff xdp;
struct sk_buff *skb;
unsigned int delta = 0;
unsigned int metasize = 0;
void *orig_data;
u32 act;

if (unlikely(hdr->hdr.gso_type))
goto err_xdp;

buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
int num_buf = 1;

xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
xdp_page = xdp_linearize_page(rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
goto err_xdp;

buf = page_address(xdp_page);
put_page(page);
page = xdp_page;
}

xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
xdp_headroom, len, true);
orig_data = xdp.data;

act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);

switch (act) {
case XDP_PASS:
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
metasize = xdp.data - xdp.data_meta;
break;

case XDP_TX:
case XDP_REDIRECT:
goto xdp_xmit;

default:
goto err_xdp;
}

skb = build_skb(buf, buflen);
if (!skb)
goto err;

skb_reserve(skb, headroom - delta);
skb_put(skb, len);
if (metasize)
skb_metadata_set(skb, metasize);

return skb;

err_xdp:
stats->xdp_drops++;
err:
stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
}

static struct sk_buff *receive_small(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
Expand All @@ -947,9 +1040,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf);
unsigned int delta = 0;
struct page *xdp_page;
unsigned int metasize = 0;

len -= vi->hdr_len;
stats->bytes += len;
Expand All @@ -969,82 +1059,27 @@ static struct sk_buff *receive_small(struct net_device *dev,
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
struct xdp_buff xdp;
void *orig_data;
u32 act;

if (unlikely(hdr->hdr.gso_type))
goto err_xdp;

if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
int num_buf = 1;

xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
xdp_page = xdp_linearize_page(rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
goto err_xdp;

buf = page_address(xdp_page);
put_page(page);
page = xdp_page;
}

xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
xdp_headroom, len, true);
orig_data = xdp.data;

act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);

switch (act) {
case XDP_PASS:
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
case XDP_REDIRECT:
rcu_read_unlock();
goto xdp_xmit;
default:
goto err_xdp;
}
skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom,
len, xdp_xmit, stats);
rcu_read_unlock();
return skb;
}
rcu_read_unlock();

skip_xdp:
skb = build_skb(buf, buflen);
if (!skb)
goto err;
skb_reserve(skb, headroom - delta);
skb_reserve(skb, headroom);
skb_put(skb, len);
if (!xdp_prog) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
} /* keep zeroed vnet hdr since XDP is loaded */

if (metasize)
skb_metadata_set(skb, metasize);

buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
return skb;

err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
err:
stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
}

Expand Down

0 comments on commit c5f3e72

Please sign in to comment.