Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2024-07-10-13-19' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "21 hotfixes, 15 of which are cc:stable.

  No identifiable theme here - all are singleton patches, 19 are for MM"

* tag 'mm-hotfixes-stable-2024-07-10-13-19' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits)
  mm/hugetlb: fix kernel NULL pointer dereference when migrating hugetlb folio
  mm/hugetlb: fix potential race in __update_and_free_hugetlb_folio()
  filemap: replace pte_offset_map() with pte_offset_map_nolock()
  arch/xtensa: always_inline get_current() and current_thread_info()
  sched.h: always_inline alloc_tag_{save|restore} to fix modpost warnings
  MAINTAINERS: mailmap: update Lorenzo Stoakes's email address
  mm: fix crashes from deferred split racing folio migration
  lib/build_OID_registry: avoid non-destructive substitution for Perl < 5.13.2 compat
  mm: gup: stop abusing try_grab_folio
  nilfs2: fix kernel bug on rename operation of broken directory
  mm/hugetlb_vmemmap: fix race with speculative PFN walkers
  cachestat: do not flush stats in recency check
  mm/shmem: disable PMD-sized page cache if needed
  mm/filemap: skip to create PMD-sized page cache if needed
  mm/readahead: limit page cache size in page_cache_ra_order()
  mm/filemap: make MAX_PAGECACHE_ORDER acceptable to xarray
  mm/damon/core: merge regions aggressively when max_nr_regions is unmet
  Fix userfaultfd_api to return EINVAL as expected
  mm: vmalloc: check if a hash-index is in cpu_possible_mask
  mm: prevent derefencing NULL ptr in pfn_section_valid()
  ...
  • Loading branch information
torvalds committed Jul 10, 2024
2 parents ef2b7eb + f708f69 commit 9d9a2f2
Show file tree
Hide file tree
Showing 25 changed files with 339 additions and 286 deletions.
1 change: 1 addition & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,7 @@ Li Yang <[email protected]> <[email protected]>
Li Yang <[email protected]> <[email protected]>
Lior David <[email protected]> <[email protected]>
Lorenzo Pieralisi <[email protected]> <[email protected]>
Lorenzo Stoakes <[email protected]> <[email protected]>
Luca Ceresoli <[email protected]> <[email protected]>
Lukasz Luba <[email protected]> <[email protected]>
Luo Jie <[email protected]> <[email protected]>
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -14472,7 +14472,7 @@ MEMORY MAPPING
M: Andrew Morton <[email protected]>
R: Liam R. Howlett <[email protected]>
R: Vlastimil Babka <[email protected]>
R: Lorenzo Stoakes <lstoakes@gmail.com>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
L: [email protected]
S: Maintained
W: http://www.linux-mm.org
Expand Down
2 changes: 1 addition & 1 deletion arch/xtensa/include/asm/current.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

struct task_struct;

static inline struct task_struct *get_current(void)
static __always_inline struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/xtensa/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ struct thread_info {
}

/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
static __always_inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
Expand Down
32 changes: 30 additions & 2 deletions fs/nilfs2/dir.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,11 +383,39 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,

struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop)
{
struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop);
struct folio *folio;
struct nilfs_dir_entry *de, *next_de;
size_t limit;
char *msg;

de = nilfs_get_folio(dir, 0, &folio);
if (IS_ERR(de))
return NULL;
return nilfs_next_entry(de);

limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */
if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
!nilfs_match(1, ".", de))) {
msg = "missing '.'";
goto fail;
}

next_de = nilfs_next_entry(de);
/*
* If "next_de" has not reached the end of the chunk, there is
* at least one more record. Check whether it matches "..".
*/
if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
!nilfs_match(2, "..", next_de))) {
msg = "missing '..'";
goto fail;
}
*foliop = folio;
return next_de;

fail:
nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
folio_release_kmap(folio, de);
return NULL;
}

ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
Expand Down
7 changes: 6 additions & 1 deletion fs/userfaultfd.c
Original file line number Diff line number Diff line change
Expand Up @@ -2057,7 +2057,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
goto out;
features = uffdio_api.features;
ret = -EINVAL;
if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
if (uffdio_api.api != UFFD_API)
goto err_out;
ret = -EPERM;
if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
Expand All @@ -2081,6 +2081,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
#endif

ret = -EINVAL;
if (features & ~uffdio_api.features)
goto err_out;

uffdio_api.ioctls = UFFD_API_IOCTLS;
ret = -EFAULT;
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
Expand Down
3 changes: 2 additions & 1 deletion include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -1979,8 +1979,9 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
struct mem_section_usage *usage = READ_ONCE(ms->usage);

return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
return usage ? test_bit(idx, usage->subsection_map) : 0;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
Expand Down
57 changes: 9 additions & 48 deletions include/linux/page_ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio)

static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
bool ret = atomic_add_unless(&page->_refcount, nr, u);
bool ret = false;

rcu_read_lock();
/* avoid writing to the vmemmap area being remapped */
if (!page_is_fake_head(page) && page_ref_count(page) != u)
ret = atomic_add_unless(&page->_refcount, nr, u);
rcu_read_unlock();

if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
Expand Down Expand Up @@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio)
return folio_ref_add_unless(folio, 1, 0);
}

static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
{
#ifdef CONFIG_TINY_RCU
/*
* The caller guarantees the folio will not be freed from interrupt
* context, so (on !SMP) we only need preemption to be disabled
* and TINY_RCU does that for us.
*/
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
folio_ref_add(folio, count);
#else
if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
/* Either the folio has been freed, or will be freed. */
return false;
}
#endif
return true;
}

/**
* folio_try_get_rcu - Attempt to increase the refcount on a folio.
* @folio: The folio.
*
* This is a version of folio_try_get() optimised for non-SMP kernels.
* If you are still holding the rcu_read_lock() after looking up the
* page and know that the page cannot have its refcount decreased to
* zero in interrupt context, you can use this instead of folio_try_get().
*
* Example users include get_user_pages_fast() (as pages are not unmapped
* from interrupt context) and the page cache lookups (as pages are not
* truncated from interrupt context). We also know that pages are not
* frozen in interrupt context for the purposes of splitting or migration.
*
* You can also use this function if you're holding a lock that prevents
* pages being frozen & removed; eg the i_pages lock for the page cache
* or the mmap_lock or page table lock for page tables. In this case,
* it will always succeed, and you could have used a plain folio_get(),
* but it's sometimes more convenient to have a common function called
* from both locked and RCU-protected contexts.
*
* Return: True if the reference count was successfully incremented.
*/
static inline bool folio_try_get_rcu(struct folio *folio)
static inline bool folio_ref_try_add(struct folio *folio, int count)
{
return folio_ref_try_add_rcu(folio, 1);
return folio_ref_add_unless(folio, count, 0);
}

static inline int page_ref_freeze(struct page *page, int count)
Expand Down
11 changes: 9 additions & 2 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -354,11 +354,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define MAX_PAGECACHE_ORDER 8
#define PREFERRED_MAX_PAGECACHE_ORDER 8
#endif

/*
* xas_split_alloc() does not support arbitrary orders. This implies no
* 512MB THP on ARM64 with 64KB base page size.
*/
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)

/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
Expand Down
4 changes: 2 additions & 2 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2192,13 +2192,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
extern void sched_set_stop_task(int cpu, struct task_struct *stop);

#ifdef CONFIG_MEM_ALLOC_PROFILING
static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}

static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
Expand Down
3 changes: 2 additions & 1 deletion include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,8 @@ static inline swp_entry_t page_swap_entry(struct page *page)
}

/* linux/mm/workingset.c */
bool workingset_test_recent(void *shadow, bool file, bool *workingset);
bool workingset_test_recent(void *shadow, bool file, bool *workingset,
bool flush);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
Expand Down
4 changes: 3 additions & 1 deletion lib/build_OID_registry
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ close IN_FILE || die;
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
print C_FILE " * Automatically generated by ", $0 =~ s#^\Q$abs_srctree/\E##r, ". Do not edit\n";
my $scriptname = $0;
$scriptname =~ s#^\Q$abs_srctree/\E##;
print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
print C_FILE " */\n";

#
Expand Down
23 changes: 20 additions & 3 deletions mm/damon/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1358,14 +1358,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
* access frequencies are similar. This is for minimizing the monitoring
* overhead under the dynamically changeable access pattern. If a merge was
* unnecessarily made, later 'kdamond_split_regions()' will revert it.
*
* The total number of regions could be higher than the user-defined limit,
* max_nr_regions for some cases. For example, the user can update
* max_nr_regions to a number that lower than the current number of regions
* while DAMON is running. For such a case, repeat merging until the limit is
* met while increasing @threshold up to possible maximum level.
*/
static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
unsigned long sz_limit)
{
struct damon_target *t;

damon_for_each_target(t, c)
damon_merge_regions_of(t, threshold, sz_limit);
unsigned int nr_regions;
unsigned int max_thres;

max_thres = c->attrs.aggr_interval /
(c->attrs.sample_interval ? c->attrs.sample_interval : 1);
do {
nr_regions = 0;
damon_for_each_target(t, c) {
damon_merge_regions_of(t, threshold, sz_limit);
nr_regions += damon_nr_regions(t);
}
threshold = max(1, threshold * 2);
} while (nr_regions > c->attrs.max_nr_regions &&
threshold / 2 < max_thres);
}

/*
Expand Down
20 changes: 12 additions & 8 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1847,7 +1847,7 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
if (!folio || xa_is_value(folio))
goto out;

if (!folio_try_get_rcu(folio))
if (!folio_try_get(folio))
goto repeat;

if (unlikely(folio != xas_reload(&xas))) {
Expand Down Expand Up @@ -2001,7 +2001,7 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
if (!folio || xa_is_value(folio))
return folio;

if (!folio_try_get_rcu(folio))
if (!folio_try_get(folio))
goto reset;

if (unlikely(folio != xas_reload(xas))) {
Expand Down Expand Up @@ -2181,7 +2181,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;

if (!folio_try_get_rcu(folio))
if (!folio_try_get(folio))
goto retry;

if (unlikely(folio != xas_reload(&xas)))
Expand Down Expand Up @@ -2313,7 +2313,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
break;
if (xa_is_sibling(folio))
break;
if (!folio_try_get_rcu(folio))
if (!folio_try_get(folio))
goto retry;

if (unlikely(folio != xas_reload(&xas)))
Expand Down Expand Up @@ -3124,7 +3124,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */
if (vm_flags & VM_HUGEPAGE) {
if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR;
Expand Down Expand Up @@ -3231,7 +3231,8 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
return 0;

ptep = pte_offset_map(vmf->pmd, vmf->address);
ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (unlikely(!ptep))
return VM_FAULT_NOPAGE;

Expand Down Expand Up @@ -3472,7 +3473,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
continue;
if (folio_test_locked(folio))
continue;
if (!folio_try_get_rcu(folio))
if (!folio_try_get(folio))
continue;
/* Has the page moved or been split? */
if (unlikely(folio != xas_reload(xas)))
Expand Down Expand Up @@ -4248,6 +4249,9 @@ static void filemap_cachestat(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, first_index);
struct folio *folio;

/* Flush stats (and potentially sleep) outside the RCU read section. */
mem_cgroup_flush_stats_ratelimited(NULL);

rcu_read_lock();
xas_for_each(&xas, folio, last_index) {
int order;
Expand Down Expand Up @@ -4311,7 +4315,7 @@ static void filemap_cachestat(struct address_space *mapping,
goto resched;
}
#endif
if (workingset_test_recent(shadow, true, &workingset))
if (workingset_test_recent(shadow, true, &workingset, false))
cs->nr_recently_evicted += nr_pages;

goto resched;
Expand Down
Loading

0 comments on commit 9d9a2f2

Please sign in to comment.