Skip to content

Commit

Permalink
tracing: incorrect gfp_t conversion
Browse files Browse the repository at this point in the history
Fixes the following sparse warnings:

include/trace/events/*: sparse: cast to restricted gfp_t
include/trace/events/*: sparse: restricted gfp_t degrades to integer

gfp_t type is bitwise and requires __force attributes for any casts.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Vasily Averin <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Vasily Averin authored and akpm00 committed May 13, 2022
1 parent e7be8d1 commit fe57332
Show file tree
Hide file tree
Showing 7 changed files with 59 additions and 59 deletions.
2 changes: 1 addition & 1 deletion include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;

/* Group based on mobility */
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
Expand Down
4 changes: 2 additions & 2 deletions include/trace/events/btrfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1344,13 +1344,13 @@ TRACE_EVENT(alloc_extent_state,

TP_STRUCT__entry(
__field(const struct extent_state *, state)
__field(gfp_t, mask)
__field(unsigned long, mask)
__field(const void*, ip)
),

TP_fast_assign(
__entry->state = state,
__entry->mask = mask,
__entry->mask = (__force unsigned long)mask,
__entry->ip = (const void *)IP
),

Expand Down
4 changes: 2 additions & 2 deletions include/trace/events/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,

TP_STRUCT__entry(
__field(int, order)
__field(gfp_t, gfp_mask)
__field(unsigned long, gfp_mask)
__field(int, prio)
),

TP_fast_assign(
__entry->order = order;
__entry->gfp_mask = gfp_mask;
__entry->gfp_mask = (__force unsigned long)gfp_mask;
__entry->prio = prio;
),

Expand Down
12 changes: 6 additions & 6 deletions include/trace/events/kmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( gfp_t, gfp_flags )
__field( unsigned long, gfp_flags )
),

TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
),

TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Expand Down Expand Up @@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( gfp_t, gfp_flags )
__field( unsigned long, gfp_flags )
__field( int, node )
),

Expand All @@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
),

Expand Down Expand Up @@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc,
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( unsigned long, gfp_flags )
__field( int, migratetype )
),

TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->migratetype = migratetype;
),

Expand Down
78 changes: 39 additions & 39 deletions include/trace/events/mmflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,48 +14,48 @@
*/

#define __def_gfpflag_names \
{(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
{(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
{(unsigned long)GFP_USER, "GFP_USER"}, \
{(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
{(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
{(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
{(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
{(unsigned long)GFP_DMA, "GFP_DMA"}, \
{(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
{(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
{(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
{(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
{(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
{(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
{(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
{(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \
{(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
{(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
{(__force unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
{(__force unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
{(__force unsigned long)GFP_USER, "GFP_USER"}, \
{(__force unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
{(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
{(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \
{(__force unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
{(__force unsigned long)GFP_NOIO, "GFP_NOIO"}, \
{(__force unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
{(__force unsigned long)GFP_DMA, "GFP_DMA"}, \
{(__force unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
{(__force unsigned long)GFP_DMA32, "GFP_DMA32"}, \
{(__force unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
{(__force unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
{(__force unsigned long)__GFP_IO, "__GFP_IO"}, \
{(__force unsigned long)__GFP_FS, "__GFP_FS"}, \
{(__force unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
{(__force unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(__force unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
{(__force unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
{(__force unsigned long)__GFP_COMP, "__GFP_COMP"}, \
{(__force unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
{(__force unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
{(__force unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
{(__force unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
{(__force unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
{(__force unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
{(__force unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
{(__force unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
{(__force unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(__force unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(__force unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
{(__force unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
{(__force unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \

#ifdef CONFIG_KASAN_HW_TAGS
#define __def_gfpflag_names_kasan , \
{(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \
{(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \
{(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
{(__force unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \
{(__force unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \
{(__force unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
#else
#define __def_gfpflag_names_kasan
#endif
Expand Down
16 changes: 8 additions & 8 deletions include/trace/events/vmscan.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
__field( int, nid )
__field( int, zid )
__field( int, order )
__field( gfp_t, gfp_flags )
__field( unsigned long, gfp_flags )
),

TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
),

TP_printk("nid=%d order=%d gfp_flags=%s",
Expand All @@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,

TP_STRUCT__entry(
__field( int, order )
__field( gfp_t, gfp_flags )
__field( unsigned long, gfp_flags )
),

TP_fast_assign(
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
),

TP_printk("order=%d gfp_flags=%s",
Expand Down Expand Up @@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(void *, shrink)
__field(int, nid)
__field(long, nr_objects_to_shrink)
__field(gfp_t, gfp_flags)
__field(unsigned long, gfp_flags)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
Expand All @@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->shrink = shr->scan_objects;
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
__entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
Expand Down Expand Up @@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
__field(gfp_t, gfp_flags)
__field(unsigned long, gfp_flags)
),

TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
),

TP_printk("nid=%d order=%d gfp_flags=%s",
Expand Down
2 changes: 1 addition & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -2569,7 +2569,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture)
{
int may_perform_io = gfp_mask & __GFP_IO;
int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
Expand Down

0 comments on commit fe57332

Please sign in to comment.