Skip to content

Commit

Permalink
net: Store virtual address instead of page in netdev_alloc_cache
Browse files Browse the repository at this point in the history
This change makes it so that we store the virtual address of the page
in the netdev_alloc_cache instead of the page pointer.  The idea behind
this is to avoid multiple calls to page_address since the virtual address
is required for every access, but the page pointer is only needed at
allocation or reset of the page.

While I was at it I also reordered the netdev_alloc_cache structure a bit
so that the size is always 16 bytes by dropping size in the case where
PAGE_SIZE is greater than or equal to 32KB.

Signed-off-by: Alexander Duyck <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
Alexander Duyck authored and davem330 committed May 12, 2015
1 parent 2ee52ad commit 0e39250
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 26 deletions.
5 changes: 2 additions & 3 deletions include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -2128,9 +2128,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}

#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
#define NETDEV_FRAG_PAGE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(NETDEV_FRAG_PAGE_MAX_SIZE)

void *netdev_alloc_frag(unsigned int fragsz);

Expand Down
55 changes: 32 additions & 23 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,13 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
EXPORT_SYMBOL(build_skb);

struct netdev_alloc_cache {
struct page_frag frag;
void * va;
#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
__u16 offset;
__u16 size;
#else
__u32 offset;
#endif
/* we maintain a pagecount bias, so that we dont dirty cache line
* containing page->_count every time we allocate a fragment.
*/
Expand All @@ -361,41 +367,41 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp_mask)
{
const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
struct page *page = NULL;
gfp_t gfp = gfp_mask;

if (order) {
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
__GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
nc->frag.size = PAGE_SIZE << (page ? order : 0);
}

#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
__GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
NETDEV_FRAG_PAGE_MAX_ORDER);
nc->size = page ? NETDEV_FRAG_PAGE_MAX_SIZE : PAGE_SIZE;
#endif
if (unlikely(!page))
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);

nc->frag.page = page;
nc->va = page ? page_address(page) : NULL;

return page;
}

static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
unsigned int fragsz, gfp_t gfp_mask)
{
struct page *page = nc->frag.page;
unsigned int size;
unsigned int size = PAGE_SIZE;
struct page *page;
int offset;

if (unlikely(!page)) {
if (unlikely(!nc->va)) {
refill:
page = __page_frag_refill(nc, gfp_mask);
if (!page)
return NULL;

/* if size can vary use frag.size else just use PAGE_SIZE */
size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;

#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
/* if size can vary use size else just use PAGE_SIZE */
size = nc->size;
#endif
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
Expand All @@ -404,17 +410,20 @@ static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page->pfmemalloc;
nc->pagecnt_bias = size;
nc->frag.offset = size;
nc->offset = size;
}

offset = nc->frag.offset - fragsz;
offset = nc->offset - fragsz;
if (unlikely(offset < 0)) {
page = virt_to_page(nc->va);

if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
goto refill;

/* if size can vary use frag.size else just use PAGE_SIZE */
size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;

#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
/* if size can vary use size else just use PAGE_SIZE */
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
atomic_set(&page->_count, size);

Expand All @@ -424,9 +433,9 @@ static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
}

nc->pagecnt_bias--;
nc->frag.offset = offset;
nc->offset = offset;

return page_address(page) + offset;
return nc->va + offset;
}

static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
Expand Down

0 comments on commit 0e39250

Please sign in to comment.