Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge branch 'for-next/stage1-lpa2' into for-next/core
Browse files Browse the repository at this point in the history
* for-next/stage1-lpa2: (48 commits)
  : Add support for LPA2 and WXN and stage 1
  arm64/mm: Avoid ID mapping of kpti flag if it is no longer needed
  arm64/mm: Use generic __pud_free() helper in pud_free() implementation
  arm64: gitignore: ignore relacheck
  arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and VARange
  arm64: mm: Make PUD folding check in set_pud() a runtime check
  arm64: mm: add support for WXN memory translation attribute
  mm: add arch hook to validate mmap() prot flags
  arm64: defconfig: Enable LPA2 support
  arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs
  arm64: kvm: avoid CONFIG_PGTABLE_LEVELS for runtime levels
  arm64: ptdump: Deal with translation levels folded at runtime
  arm64: ptdump: Disregard unaddressable VA space
  arm64: mm: Add support for folding PUDs at runtime
  arm64: kasan: Reduce minimum shadow alignment and enable 5 level paging
  arm64: mm: Add 5 level paging support to fixmap and swapper handling
  arm64: Enable LPA2 at boot if supported by the system
  arm64: mm: add LPA2 and 5 level paging support to G-to-nG conversion
  arm64: mm: Add definitions to support 5 levels of paging
  arm64: mm: Add LPA2 support to phys<->pte conversion routines
  arm64: mm: Wire up TCR.DS bit to PTE shareability fields
  ...
  • Loading branch information
ctmarinas committed Mar 7, 2024
2 parents 0c5ade7 + 27f2b9f commit 88f0912
Show file tree
Hide file tree
Showing 55 changed files with 1,949 additions and 1,124 deletions.
38 changes: 27 additions & 11 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ config ARM64
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
Expand Down Expand Up @@ -370,7 +370,9 @@ config PGTABLE_LEVELS
default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if ARM64_16K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
default 5 if ARM64_4K_PAGES && ARM64_VA_BITS_52

config ARCH_SUPPORTS_UPROBES
def_bool y
Expand Down Expand Up @@ -398,13 +400,13 @@ config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
config KASAN_SHADOW_OFFSET
hex
depends on KASAN_GENERIC || KASAN_SW_TAGS
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && !KASAN_SW_TAGS
default 0xdfffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && !KASAN_SW_TAGS
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
default 0xefff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && KASAN_SW_TAGS
default 0xefffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && KASAN_SW_TAGS
default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
Expand Down Expand Up @@ -1280,9 +1282,7 @@ endchoice

choice
prompt "Virtual address space size"
default ARM64_VA_BITS_39 if ARM64_4K_PAGES
default ARM64_VA_BITS_47 if ARM64_16K_PAGES
default ARM64_VA_BITS_42 if ARM64_64K_PAGES
default ARM64_VA_BITS_52
help
Allows choosing one of multiple possible virtual address
space sizes. The level of translation table is determined by
Expand All @@ -1309,7 +1309,7 @@ config ARM64_VA_BITS_48

config ARM64_VA_BITS_52
bool "52-bit"
depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
help
Enable 52-bit virtual addressing for userspace when explicitly
requested via a hint to mmap(). The kernel will also use 52-bit
Expand Down Expand Up @@ -1356,10 +1356,11 @@ choice

config ARM64_PA_BITS_48
bool "48-bit"
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52

config ARM64_PA_BITS_52
bool "52-bit (ARMv8.2)"
depends on ARM64_64K_PAGES
bool "52-bit"
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
help
Enable support for a 52-bit physical address space, introduced as
Expand All @@ -1376,6 +1377,10 @@ config ARM64_PA_BITS
default 48 if ARM64_PA_BITS_48
default 52 if ARM64_PA_BITS_52

config ARM64_LPA2
def_bool y
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES

choice
prompt "Endianness"
default CPU_LITTLE_ENDIAN
Expand Down Expand Up @@ -1602,6 +1607,17 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.

config ARM64_WXN
bool "Enable WXN attribute so all writable mappings are non-exec"
help
Set the WXN bit in the SCTLR system register so that all writable
mappings are treated as if the PXN/UXN bit is set as well.
If this is set to Y, it can still be disabled at runtime by
passing 'arm64.nowxn' on the kernel command line.

This should only be set if no software needs to be supported that
relies on being able to execute from writable mappings.

config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/configs/defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VISCONTI=y
CONFIG_ARCH_XGENE=y
CONFIG_ARCH_ZYNQMP=y
CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_SCHED_SMT=y
CONFIG_NUMA=y
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/archrandom.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}

u64 kaslr_early_init(void *fdt);

#endif /* _ASM_ARCHRANDOM_H */
55 changes: 19 additions & 36 deletions arch/arm64/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -341,20 +341,6 @@ alternative_cb_end
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm

/*
* idmap_get_t0sz - get the T0SZ value needed to cover the ID map
*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
* the physical address of _end.
*/
.macro idmap_get_t0sz, reg
adrp \reg, _end
orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
clz \reg, \reg
.endm

/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
Expand Down Expand Up @@ -586,18 +572,27 @@ alternative_endif
.endm

/*
* Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
* If the kernel is built for 52-bit virtual addressing but the hardware only
* supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
* but we have to add an offset so that the TTBR1 address corresponds with the
* pgdir entry that covers the lowest 48-bit addressable VA.
*
* Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
* additional paging level, and on LPA2/16k pages, we would end up with a root
* level table with only 2 entries, which is suboptimal in terms of TLB
* utilization, so there we fall back to 47 bits of translation if LPA2 is not
* supported.
*
* orr is used as it can cover the immediate value (and is idempotent).
* In future this may be nop'ed out when dealing with 52-bit kernel VAs.
* ttbr: Value of ttbr to set, modified.
*/
.macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ :
#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
mrs \tmp, tcr_el1
and \tmp, \tmp, #TCR_T1SZ_MASK
cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
csel \ttbr, \tmp, \ttbr, eq
#endif
.endm

Expand All @@ -619,25 +614,13 @@ alternative_endif

.macro phys_to_pte, pte, phys
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* We assume \phys is 64K aligned and this is guaranteed by only
* supporting this configuration with 64K pages.
*/
orr \pte, \phys, \phys, lsr #36
and \pte, \pte, #PTE_ADDR_MASK
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
#else
mov \pte, \phys
#endif
.endm

.macro pte_to_phys, phys, pte
and \phys, \pte, #PTE_ADDR_MASK
#ifdef CONFIG_ARM64_PA_BITS_52
orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
#endif
.endm

/*
* tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
*/
Expand Down
116 changes: 116 additions & 0 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
#define ARM64_SW_FEATURE_OVERRIDE_NOWXN 12

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -910,7 +912,9 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);

extern struct arm64_ftr_override id_aa64mmfr0_override;
extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64mmfr2_override;
extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override;
extern struct arm64_ftr_override id_aa64zfr0_override;
Expand All @@ -920,9 +924,121 @@ extern struct arm64_ftr_override id_aa64isar2_override;

extern struct arm64_ftr_override arm64_sw_feature_override;

static inline
u64 arm64_apply_feature_override(u64 val, int feat, int width,
const struct arm64_ftr_override *override)
{
u64 oval = override->val;

/*
* When it encounters an invalid override (e.g., an override that
* cannot be honoured due to a missing CPU feature), the early idreg
* override code will set the mask to 0x0 and the value to non-zero for
* the field in question. In order to determine whether the override is
* valid or not for the field we are interested in, we first need to
* disregard bits belonging to other fields.
*/
oval &= GENMASK_ULL(feat + width - 1, feat);

/*
* The override is valid if all value bits are accounted for in the
* mask. If so, replace the masked bits with the override value.
*/
if (oval == (oval & override->mask)) {
val &= ~override->mask;
val |= oval;
}

/* Extract the field from the updated value */
return cpuid_feature_extract_unsigned_field(val, feat);
}

static inline bool arm64_test_sw_feature_override(int feat)
{
/*
* Software features are pseudo CPU features that have no underlying
* CPUID system register value to apply the override to.
*/
return arm64_apply_feature_override(0, feat, 4,
&arm64_sw_feature_override);
}

static inline bool kaslr_disabled_cmdline(void)
{
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
}

static inline bool arm64_wxn_enabled(void)
{
if (!IS_ENABLED(CONFIG_ARM64_WXN))
return false;
return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN);
}

u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);

static inline bool cpu_has_bti(void)
{
if (!IS_ENABLED(CONFIG_ARM64_BTI))
return false;

return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
ID_AA64PFR1_EL1_BT_SHIFT, 4,
&id_aa64pfr1_override);
}

static inline bool cpu_has_pac(void)
{
u64 isar1, isar2;

if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
return false;

isar1 = read_cpuid(ID_AA64ISAR1_EL1);
isar2 = read_cpuid(ID_AA64ISAR2_EL1);

if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
&id_aa64isar1_override))
return true;

if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
&id_aa64isar1_override))
return true;

return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
&id_aa64isar2_override);
}

static inline bool cpu_has_lva(void)
{
u64 mmfr2;

mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
mmfr2 &= ~id_aa64mmfr2_override.mask;
mmfr2 |= id_aa64mmfr2_override.val;
return cpuid_feature_extract_unsigned_field(mmfr2,
ID_AA64MMFR2_EL1_VARange_SHIFT);
}

static inline bool cpu_has_lpa2(void)
{
#ifdef CONFIG_ARM64_LPA2
u64 mmfr0;
int feat;

mmfr0 = read_sysreg(id_aa64mmfr0_el1);
mmfr0 &= ~id_aa64mmfr0_override.mask;
mmfr0 |= id_aa64mmfr0_override.val;
feat = cpuid_feature_extract_signed_field(mmfr0,
ID_AA64MMFR0_EL1_TGRAN_SHIFT);

return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
#else
return false;
#endif
}

#endif /* __ASSEMBLY__ */

#endif
13 changes: 5 additions & 8 deletions arch/arm64/include/asm/esr.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,15 +117,9 @@
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_FSC_SEA_TTW0 (0x14)
#define ESR_ELx_FSC_SEA_TTW1 (0x15)
#define ESR_ELx_FSC_SEA_TTW2 (0x16)
#define ESR_ELx_FSC_SEA_TTW3 (0x17)
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))

/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
Expand Down Expand Up @@ -394,6 +388,9 @@ static inline bool esr_is_data_abort(unsigned long esr)

static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
/* Translation fault, level -1 */
if ((esr & ESR_ELx_FSC) == 0b101011)
return true;
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/fixmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ enum fixed_addresses {
FIX_PTE,
FIX_PMD,
FIX_PUD,
FIX_P4D,
FIX_PGD,

__end_of_fixed_addresses
Expand All @@ -100,7 +101,6 @@ enum fixed_addresses {
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)

void __init early_fixmap_init(void);
void __init fixmap_copy(pgd_t *pgdir);

#define __early_set_fixmap __set_fixmap

Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,9 @@

asmlinkage void kasan_early_init(void);
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir);

#else
static inline void kasan_init(void) { }
static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif

#endif
Expand Down
Loading

0 comments on commit 88f0912

Please sign in to comment.