Skip to content

Commit

Permalink
Merge branch 'for-next/mte' into for-next/core
Browse files Browse the repository at this point in the history
* for-next/mte:
  kasan: Extend KASAN mode kernel parameter
  arm64: mte: Add asymmetric mode support
  arm64: mte: CPU feature detection for Asymm MTE
  arm64: mte: Bitfield definitions for Asymm MTE
  kasan: Remove duplicate of kasan_flag_async
  arm64: kasan: mte: move GCR_EL1 switch to task switch when KASAN disabled
  • Loading branch information
willdeacon committed Oct 29, 2021
2 parents dc6bab1 + 2d27e58 commit 7066248
Show file tree
Hide file tree
Showing 15 changed files with 151 additions and 39 deletions.
7 changes: 5 additions & 2 deletions Documentation/dev-tools/kasan.rst
Original file line number Diff line number Diff line change
Expand Up @@ -194,14 +194,17 @@ additional boot parameters that allow disabling KASAN or controlling features:

- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).

- ``kasan.mode=sync`` or ``=async`` controls whether KASAN is configured in
synchronous or asynchronous mode of execution (default: ``sync``).
- ``kasan.mode=sync``, ``=async`` or ``=asymm`` controls whether KASAN
is configured in synchronous, asynchronous or asymmetric mode of
execution (default: ``sync``).
Synchronous mode: a bad access is detected immediately when a tag
check fault occurs.
Asynchronous mode: a bad access detection is delayed. When a tag check
fault occurs, the information is stored in hardware (in the TFSR_EL1
register for arm64). The kernel periodically checks the hardware and
only reports tag faults during these checks.
Asymmetric mode: a bad access is detected synchronously on reads and
asynchronously on writes.

- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
traces collection (default: ``on``).
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/include/asm/mte-kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,

void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);

#else /* CONFIG_ARM64_MTE */

Expand Down Expand Up @@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void)
{
}

static inline void mte_enable_kernel_asymm(void)
{
}

#endif /* CONFIG_ARM64_MTE */

#endif /* __ASSEMBLY__ */
Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/include/asm/mte.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,11 +88,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,

#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_mode);
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);

static inline bool system_uses_mte_async_mode(void)
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return static_branch_unlikely(&mte_async_mode);
return static_branch_unlikely(&mte_async_or_asymm_mode);
}

void mte_check_tfsr_el1(void);
Expand Down Expand Up @@ -121,7 +121,7 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1();
}
#else
static inline bool system_uses_mte_async_mode(void)
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/sysreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -626,6 +626,7 @@
#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)

#define SCTLR_ELx_ENIA_SHIFT 31
Expand Down Expand Up @@ -671,6 +672,7 @@
#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)

#define SCTLR_EL1_BT1 (BIT(36))
Expand Down Expand Up @@ -812,6 +814,7 @@
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3

/* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,13 +191,13 @@ static inline void __uaccess_enable_tco(void)
*/
static inline void __uaccess_disable_tco_async(void)
{
if (system_uses_mte_async_mode())
if (system_uses_mte_async_or_asymm_mode())
__uaccess_disable_tco();
}

static inline void __uaccess_enable_tco_async(void)
{
if (system_uses_mte_async_mode())
if (system_uses_mte_async_or_asymm_mode())
__uaccess_enable_tco();
}

Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -2331,6 +2331,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte,
},
{
.desc = "Asymmetric MTE Tag Check Fault",
.capability = ARM64_MTE_ASYMM,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT,
.min_field_value = ID_AA64PFR1_MTE_ASYMM,
.sign = FTR_UNSIGNED,
},
#endif /* CONFIG_ARM64_MTE */
{
.desc = "RCpc load-acquire (LDAPR)",
Expand Down
10 changes: 5 additions & 5 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -168,20 +168,20 @@ alternative_else_nop_endif

.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE
alternative_cb kasan_hw_tags_enable
b 1f
alternative_else_nop_endif
alternative_cb_end
mov \tmp, KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, \tmp
1:
#endif
.endm

.macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_ARM64_MTE
alternative_if_not ARM64_MTE
#ifdef CONFIG_KASAN_HW_TAGS
alternative_cb kasan_hw_tags_enable
b 1f
alternative_else_nop_endif
alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL]

mte_set_gcr \tmp, \tmp2
Expand Down
67 changes: 61 additions & 6 deletions arch/arm64/kernel/mte.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,12 @@
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);

#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode);
/*
* The asynchronous and asymmetric MTE modes have the same behavior for
* store operations. This flag is set when either of these modes is enabled.
*/
DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif

static void mte_sync_page_tags(struct page *page, pte_t old_pte,
Expand Down Expand Up @@ -116,7 +119,7 @@ void mte_enable_kernel_sync(void)
* Make sure we enter this function when no PE has set
* async mode previously.
*/
WARN_ONCE(system_uses_mte_async_mode(),
WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!");

__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
Expand All @@ -134,8 +137,34 @@ void mte_enable_kernel_async(void)
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_mode())
static_branch_enable(&mte_async_mode);
if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_or_asymm_mode);
}

void mte_enable_kernel_asymm(void)
{
if (cpus_have_cap(ARM64_MTE_ASYMM)) {
__mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);

/*
* MTE asymm mode behaves as async mode for store
* operations. The mode is set system wide by the
* first PE that executes this function.
*
* Note: If in future KASAN acquires a runtime switching
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_or_asymm_mode);
} else {
/*
* If the CPU does not support MTE asymmetric mode the
* kernel falls back on synchronous mode which is the
* default for kasan=on.
*/
mte_enable_kernel_sync();
}
}
#endif

Expand Down Expand Up @@ -179,6 +208,30 @@ static void mte_update_sctlr_user(struct task_struct *task)
task->thread.sctlr_user = sctlr;
}

static void mte_update_gcr_excl(struct task_struct *task)
{
/*
* SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
* mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
*/
if (kasan_hw_tags_enabled())
return;

write_sysreg_s(
((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
SYS_GCR_EL1);
}

void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1); /* Branch -> NOP */

if (kasan_hw_tags_enabled())
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}

void mte_thread_init_user(void)
{
if (!system_supports_mte())
Expand All @@ -198,6 +251,7 @@ void mte_thread_switch(struct task_struct *next)
return;

mte_update_sctlr_user(next);
mte_update_gcr_excl(next);

/*
* Check if an async tag exception occurred at EL1.
Expand Down Expand Up @@ -243,6 +297,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task == current) {
preempt_disable();
mte_update_sctlr_user(task);
mte_update_gcr_excl(task);
update_sctlr_el1(task->thread.sctlr_user);
preempt_enable();
}
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/tools/cpucaps
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ HW_DBM
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
MTE
MTE_ASYMM
SPECTRE_V2
SPECTRE_V3A
SPECTRE_V4
Expand Down
9 changes: 7 additions & 2 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled);
}

static inline bool kasan_has_integrated_init(void)
static inline bool kasan_hw_tags_enabled(void)
{
return kasan_enabled();
}
Expand All @@ -104,7 +104,7 @@ static inline bool kasan_enabled(void)
return IS_ENABLED(CONFIG_KASAN);
}

static inline bool kasan_has_integrated_init(void)
static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
Expand All @@ -125,6 +125,11 @@ static __always_inline void kasan_free_pages(struct page *page,

#endif /* CONFIG_KASAN_HW_TAGS */

static inline bool kasan_has_integrated_init(void)
{
return kasan_hw_tags_enabled();
}

#ifdef CONFIG_KASAN

struct kasan_cache {
Expand Down
2 changes: 1 addition & 1 deletion lib/test_kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static void kasan_test_exit(struct kunit *test)
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) \
kasan_sync_fault_possible()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
barrier(); \
Expand Down
29 changes: 19 additions & 10 deletions mm/kasan/hw_tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ enum kasan_arg_mode {
KASAN_ARG_MODE_DEFAULT,
KASAN_ARG_MODE_SYNC,
KASAN_ARG_MODE_ASYNC,
KASAN_ARG_MODE_ASYMM,
};

enum kasan_arg_stacktrace {
Expand All @@ -45,9 +46,9 @@ static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
EXPORT_SYMBOL(kasan_flag_enabled);

/* Whether the asynchronous mode is enabled. */
bool kasan_flag_async __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_flag_async);
/* Whether the selected mode is synchronous/asynchronous/asymmetric.*/
enum kasan_mode kasan_mode __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_mode);

/* Whether to collect alloc/free stack traces. */
DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
Expand All @@ -69,7 +70,7 @@ static int __init early_kasan_flag(char *arg)
}
early_param("kasan", early_kasan_flag);

/* kasan.mode=sync/async */
/* kasan.mode=sync/async/asymm */
static int __init early_kasan_mode(char *arg)
{
if (!arg)
Expand All @@ -79,6 +80,8 @@ static int __init early_kasan_mode(char *arg)
kasan_arg_mode = KASAN_ARG_MODE_SYNC;
else if (!strcmp(arg, "async"))
kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
else if (!strcmp(arg, "asymm"))
kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
else
return -EINVAL;

Expand Down Expand Up @@ -116,11 +119,13 @@ void kasan_init_hw_tags_cpu(void)
return;

/*
* Enable async mode only when explicitly requested through
* the command line.
* Enable async or asymm modes only when explicitly requested
* through the command line.
*/
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
hw_enable_tagging_async();
else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
hw_enable_tagging_asymm();
else
hw_enable_tagging_sync();
}
Expand All @@ -143,15 +148,19 @@ void __init kasan_init_hw_tags(void)
case KASAN_ARG_MODE_DEFAULT:
/*
* Default to sync mode.
* Do nothing, kasan_flag_async keeps its default value.
*/
break;
fallthrough;
case KASAN_ARG_MODE_SYNC:
/* Do nothing, kasan_flag_async keeps its default value. */
/* Sync mode enabled. */
kasan_mode = KASAN_MODE_SYNC;
break;
case KASAN_ARG_MODE_ASYNC:
/* Async mode enabled. */
kasan_flag_async = true;
kasan_mode = KASAN_MODE_ASYNC;
break;
case KASAN_ARG_MODE_ASYMM:
/* Asymm mode enabled. */
kasan_mode = KASAN_MODE_ASYMM;
break;
}

Expand Down
Loading

0 comments on commit 7066248

Please sign in to comment.