Skip to content

Commit

Permalink
x86, bts, mm: clean up buffer allocation
Browse files Browse the repository at this point in the history
The current mm interface is asymetric. One function allocates a locked
buffer, another function only refunds the memory.

Change this to have two functions for accounting and refunding locked
memory, respectively; and do the actual buffer allocation in ptrace.

[ Impact: refactor BTS buffer allocation code ]

Signed-off-by: Markus Metzger <[email protected]>
Acked-by: Andrew Morton <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
markus-metzger authored and Ingo Molnar committed Apr 24, 2009
1 parent 7e0bfad commit 1cb81b1
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 34 deletions.
39 changes: 26 additions & 13 deletions arch/x86/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,17 +617,28 @@ struct bts_context {
struct work_struct work;
};

static inline void alloc_bts_buffer(struct bts_context *context,
unsigned int size)
static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
{
void *buffer;
void *buffer = NULL;
int err = -ENOMEM;

buffer = alloc_locked_buffer(size);
if (buffer) {
context->buffer = buffer;
context->size = size;
context->mm = get_task_mm(current);
}
err = account_locked_memory(current->mm, current->signal->rlim, size);
if (err < 0)
return err;

buffer = kzalloc(size, GFP_KERNEL);
if (!buffer)
goto out_refund;

context->buffer = buffer;
context->size = size;
context->mm = get_task_mm(current);

return 0;

out_refund:
refund_locked_memory(current->mm, size);
return err;
}

static inline void free_bts_buffer(struct bts_context *context)
Expand All @@ -638,7 +649,7 @@ static inline void free_bts_buffer(struct bts_context *context)
kfree(context->buffer);
context->buffer = NULL;

refund_locked_buffer_memory(context->mm, context->size);
refund_locked_memory(context->mm, context->size);
context->size = 0;

mmput(context->mm);
Expand Down Expand Up @@ -786,13 +797,15 @@ static int ptrace_bts_config(struct task_struct *child,
context->tracer = NULL;

if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
int err;

free_bts_buffer(context);
if (!cfg.size)
return 0;

alloc_bts_buffer(context, cfg.size);
if (!context->buffer)
return -ENOMEM;
err = alloc_bts_buffer(context, cfg.size);
if (err < 0)
return err;
}

if (cfg.flags & PTRACE_BTS_O_TRACE)
Expand Down
6 changes: 4 additions & 2 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
struct rlimit;

#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
Expand Down Expand Up @@ -1319,7 +1320,8 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);

extern void *alloc_locked_buffer(size_t size);
extern void refund_locked_buffer_memory(struct mm_struct *mm, size_t size);
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
36 changes: 17 additions & 19 deletions mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user)
free_uid(user);
}

void *alloc_locked_buffer(size_t size)
int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size)
{
unsigned long rlim, vm, pgsz;
void *buffer = NULL;
unsigned long lim, vm, pgsz;
int error = -ENOMEM;

pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;

down_write(&current->mm->mmap_sem);

rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->total_vm + pgsz;
if (rlim < vm)
goto out;
down_write(&mm->mmap_sem);

rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
vm = current->mm->locked_vm + pgsz;
if (rlim < vm)
lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = mm->total_vm + pgsz;
if (lim < vm)
goto out;

buffer = kzalloc(size, GFP_KERNEL);
if (!buffer)
lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
vm = mm->locked_vm + pgsz;
if (lim < vm)
goto out;

current->mm->total_vm += pgsz;
current->mm->locked_vm += pgsz;
mm->total_vm += pgsz;
mm->locked_vm += pgsz;

error = 0;
out:
up_write(&current->mm->mmap_sem);
return buffer;
up_write(&mm->mmap_sem);
return error;
}

void refund_locked_buffer_memory(struct mm_struct *mm, size_t size)
void refund_locked_memory(struct mm_struct *mm, size_t size)
{
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;

Expand Down

0 comments on commit 1cb81b1

Please sign in to comment.