Skip to content

Commit

Permalink
mm: percpu-vmap fix RCU list walking
Browse files Browse the repository at this point in the history
RCU list walking of the per-cpu vmap cache was broken.  It did not use
RCU primitives, and also the union of free_list and rcu_head is
obviously wrong (because free_list is indeed the list we are RCU
walking).

While we are there, remove a couple of unused fields from an earlier
iteration.

These APIs aren't actually used anywhere, because of problems with the
XFS conversion.  Christoph has now verified that the problems are solved
with these patches.  Also it is an exported interface, so I think it
will be good to be merged now (and Christoph wants to get the XFS
changes into their local tree).

Cc: [email protected]
Cc: [email protected]
Tested-by: Christoph Hellwig <[email protected]>
Signed-off-by: Nick Piggin <[email protected]>
--
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Nick Piggin authored and torvalds committed Feb 2, 2010
1 parent 489b24f commit de56042
Showing 1 changed file with 6 additions and 14 deletions.
20 changes: 6 additions & 14 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,8 +667,6 @@ static bool vmap_initialized __read_mostly = false;
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
struct list_head dirty;
unsigned int nr_dirty;
};

struct vmap_block {
Expand All @@ -678,10 +676,8 @@ struct vmap_block {
unsigned long free, dirty;
DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
union {
struct list_head free_list;
struct rcu_head rcu_head;
};
struct list_head free_list;
struct rcu_head rcu_head;
};

/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
Expand Down Expand Up @@ -757,7 +753,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
vbq = &get_cpu_var(vmap_block_queue);
vb->vbq = vbq;
spin_lock(&vbq->lock);
list_add(&vb->free_list, &vbq->free);
list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
put_cpu_var(vmap_block_queue);

Expand All @@ -776,8 +772,6 @@ static void free_vmap_block(struct vmap_block *vb)
struct vmap_block *tmp;
unsigned long vb_idx;

BUG_ON(!list_empty(&vb->free_list));

vb_idx = addr_to_vb_idx(vb->va->va_start);
spin_lock(&vmap_block_tree_lock);
tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
Expand Down Expand Up @@ -816,7 +810,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
vb->free -= 1UL << order;
if (vb->free == 0) {
spin_lock(&vbq->lock);
list_del_init(&vb->free_list);
list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock);
}
spin_unlock(&vb->lock);
Expand Down Expand Up @@ -860,11 +854,11 @@ static void vb_free(const void *addr, unsigned long size)
BUG_ON(!vb);

spin_lock(&vb->lock);
bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));

vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
BUG_ON(vb->free || !list_empty(&vb->free_list));
BUG_ON(vb->free);
spin_unlock(&vb->lock);
free_vmap_block(vb);
} else
Expand Down Expand Up @@ -1033,8 +1027,6 @@ void __init vmalloc_init(void)
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
INIT_LIST_HEAD(&vbq->dirty);
vbq->nr_dirty = 0;
}

/* Import existing vmlist entries. */
Expand Down

0 comments on commit de56042

Please sign in to comment.