Skip to content

Commit

Permalink
mm: memcg: move vmstats structs definition above flushing code
Browse files Browse the repository at this point in the history
The following patch will make use of those structs in the flushing code,
so move their definitions (and a few other dependencies) a little bit up
to reduce the diff noise in the following patch.

No functional change intended.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Yosry Ahmed <[email protected]>
Tested-by: Domenico Cerasuolo <[email protected]>
Acked-by: Shakeel Butt <[email protected]>
Cc: Chris Li <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: Ivan Babrou <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Michal Koutny <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Wei Xu <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
yosrym93 authored and akpm00 committed Dec 20, 2023
1 parent 508bed8 commit e0bf1dc
Showing 1 changed file with 74 additions and 74 deletions.
148 changes: 74 additions & 74 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -573,6 +573,80 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
return mz;
}

/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
PGPGIN,
PGPGOUT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
PGFAULT,
PGMAJFAULT,
PGREFILL,
PGACTIVATE,
PGDEACTIVATE,
PGLAZYFREE,
PGLAZYFREED,
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
ZSWPIN,
ZSWPOUT,
ZSWPWB,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
THP_COLLAPSE_ALLOC,
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
};

#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;

static void init_memcg_events(void)
{
int i;

for (i = 0; i < NR_MEMCG_EVENTS; ++i)
mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
}

static inline int memcg_events_index(enum vm_event_item idx)
{
return mem_cgroup_events_index[idx] - 1;
}

struct memcg_vmstats_percpu {
/* Local (CPU and cgroup) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];

/* Delta calculation for lockless upward propagation */
long state_prev[MEMCG_NR_STAT];
unsigned long events_prev[NR_MEMCG_EVENTS];

/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};

struct memcg_vmstats {
/* Aggregated (CPU and subtree) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];

/* Non-hierarchical (CPU aggregated) page state & events */
long state_local[MEMCG_NR_STAT];
unsigned long events_local[NR_MEMCG_EVENTS];

/* Pending child counts during tree propagation */
long state_pending[MEMCG_NR_STAT];
unsigned long events_pending[NR_MEMCG_EVENTS];
};

/*
* memcg and lruvec stats flushing
*
Expand Down Expand Up @@ -684,80 +758,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
}

/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
PGPGIN,
PGPGOUT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
PGFAULT,
PGMAJFAULT,
PGREFILL,
PGACTIVATE,
PGDEACTIVATE,
PGLAZYFREE,
PGLAZYFREED,
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
ZSWPIN,
ZSWPOUT,
ZSWPWB,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
THP_COLLAPSE_ALLOC,
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
};

#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;

static void init_memcg_events(void)
{
int i;

for (i = 0; i < NR_MEMCG_EVENTS; ++i)
mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
}

static inline int memcg_events_index(enum vm_event_item idx)
{
return mem_cgroup_events_index[idx] - 1;
}

struct memcg_vmstats_percpu {
/* Local (CPU and cgroup) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];

/* Delta calculation for lockless upward propagation */
long state_prev[MEMCG_NR_STAT];
unsigned long events_prev[NR_MEMCG_EVENTS];

/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};

struct memcg_vmstats {
/* Aggregated (CPU and subtree) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];

/* Non-hierarchical (CPU aggregated) page state & events */
long state_local[MEMCG_NR_STAT];
unsigned long events_local[NR_MEMCG_EVENTS];

/* Pending child counts during tree propagation */
long state_pending[MEMCG_NR_STAT];
unsigned long events_pending[NR_MEMCG_EVENTS];
};

unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
long x = READ_ONCE(memcg->vmstats->state[idx]);
Expand Down

0 comments on commit e0bf1dc

Please sign in to comment.