Skip to content

Commit

Permalink
percpu: allow pcpu_alloc() to be called with IRQs off
Browse files Browse the repository at this point in the history
pcpu_alloc() and pcpu_extend_area_map() perform a series of
spin_lock_irq()/spin_unlock_irq() calls, which make them unsafe
with respect to being called from contexts which have IRQs off.

This patch converts the code to perform save/restore of flags instead,
making pcpu_alloc() (or __alloc_percpu() respectively) to be called
from early kernel startup stage, where IRQs are off.

This is needed for proper initialization of per-cpu rq_weight data from
sched_init().

tj: added comment explaining why irqsave/restore is used in alloc path.

Signed-off-by: Jiri Kosina <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Signed-off-by: Tejun Heo <[email protected]>
  • Loading branch information
Jiri Kosina authored and htejun committed Oct 28, 2009
1 parent 1a0c329 commit 403a91b
Showing 1 changed file with 17 additions and 13 deletions.
30 changes: 17 additions & 13 deletions mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,10 @@ static int pcpu_reserved_chunk_limit;
*
* During allocation, pcpu_alloc_mutex is kept locked all the time and
* pcpu_lock is grabbed and released as necessary. All actual memory
* allocations are done using GFP_KERNEL with pcpu_lock released.
* allocations are done using GFP_KERNEL with pcpu_lock released. In
* general, percpu memory can't be allocated with irq off but
* irqsave/restore are still used in alloc path so that it can be used
* from early init path - sched_init() specifically.
*
* Free path accesses and alters only the index data structures, so it
* can be safely called from atomic context. When memory needs to be
Expand Down Expand Up @@ -366,7 +369,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
* RETURNS:
* 0 if noop, 1 if successfully extended, -errno on failure.
*/
static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags)
{
int new_alloc;
int *new;
Expand All @@ -376,15 +379,15 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
if (chunk->map_alloc >= chunk->map_used + 2)
return 0;

spin_unlock_irq(&pcpu_lock);
spin_unlock_irqrestore(&pcpu_lock, *flags);

new_alloc = PCPU_DFL_MAP_ALLOC;
while (new_alloc < chunk->map_used + 2)
new_alloc *= 2;

new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
if (!new) {
spin_lock_irq(&pcpu_lock);
spin_lock_irqsave(&pcpu_lock, *flags);
return -ENOMEM;
}

Expand All @@ -393,7 +396,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
* could have happened inbetween, so map_used couldn't have
* grown.
*/
spin_lock_irq(&pcpu_lock);
spin_lock_irqsave(&pcpu_lock, *flags);
BUG_ON(new_alloc < chunk->map_used + 2);

size = chunk->map_alloc * sizeof(chunk->map[0]);
Expand Down Expand Up @@ -1047,6 +1050,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
struct pcpu_chunk *chunk;
const char *err;
int slot, off;
unsigned long flags;

if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
Expand All @@ -1055,13 +1059,13 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
}

mutex_lock(&pcpu_alloc_mutex);
spin_lock_irq(&pcpu_lock);
spin_lock_irqsave(&pcpu_lock, flags);

/* serve reserved allocations from the reserved chunk if available */
if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk;
if (size > chunk->contig_hint ||
pcpu_extend_area_map(chunk) < 0) {
pcpu_extend_area_map(chunk, &flags) < 0) {
err = "failed to extend area map of reserved chunk";
goto fail_unlock;
}
Expand All @@ -1079,7 +1083,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
if (size > chunk->contig_hint)
continue;

switch (pcpu_extend_area_map(chunk)) {
switch (pcpu_extend_area_map(chunk, &flags)) {
case 0:
break;
case 1:
Expand All @@ -1096,24 +1100,24 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
}

/* hmmm... no space left, create a new chunk */
spin_unlock_irq(&pcpu_lock);
spin_unlock_irqrestore(&pcpu_lock, flags);

chunk = alloc_pcpu_chunk();
if (!chunk) {
err = "failed to allocate new chunk";
goto fail_unlock_mutex;
}

spin_lock_irq(&pcpu_lock);
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_relocate(chunk, -1);
goto restart;

area_found:
spin_unlock_irq(&pcpu_lock);
spin_unlock_irqrestore(&pcpu_lock, flags);

/* populate, map and clear the area */
if (pcpu_populate_chunk(chunk, off, size)) {
spin_lock_irq(&pcpu_lock);
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_free_area(chunk, off);
err = "failed to populate";
goto fail_unlock;
Expand All @@ -1125,7 +1129,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
return __addr_to_pcpu_ptr(chunk->base_addr + off);

fail_unlock:
spin_unlock_irq(&pcpu_lock);
spin_unlock_irqrestore(&pcpu_lock, flags);
fail_unlock_mutex:
mutex_unlock(&pcpu_alloc_mutex);
if (warn_limit) {
Expand Down

0 comments on commit 403a91b

Please sign in to comment.