Skip to content

Commit

Permalink
Merge tag 'arm-smmu-fixes' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/will/linux into iommu/fixes

Arm SMMU fixes for 5.18

- Fix off-by-one in SMMUv3 SVA TLB invalidation

- Disable large mappings to workaround nvidia erratum
  • Loading branch information
joergroedel committed Apr 28, 2022
2 parents da8669f + 4a25f2e commit e6f48be
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 1 deletion.
9 changes: 8 additions & 1 deletion drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
size_t size = end - start + 1;
size_t size;

/*
* The mm_types defines vm_end as the first byte after the end address,
* different from IOMMU subsystem using the last address of an address
* range. So do a simple translation here by calculating size correctly.
*/
size = end - start;

if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
Expand Down
30 changes: 30 additions & 0 deletions drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
dev_name(dev), err);
}

static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg,
struct device *dev)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
const struct device_node *np = smmu->dev->of_node;

/*
* Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
* entries to not be invalidated correctly. The problem is that the walk
* cache index generated for IOVA is not same across translation and
* invalidation requests. This is leading to page faults when PMD entry
* is released during unmap and populated with new PTE table during
* subsequent map request. Disabling large page mappings avoids the
* release of PMD entry and avoid translations seeing stale PMD entry in
* walk cache.
* Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
* Tegra234.
*/
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
smmu->pgsize_bitmap = PAGE_SIZE;
pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
}

return 0;
}

static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
Expand All @@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
.probe_finalize = nvidia_smmu_probe_finalize,
.init_context = nvidia_smmu_init_context,
};

static const struct arm_smmu_impl nvidia_smmu_single_impl = {
.probe_finalize = nvidia_smmu_probe_finalize,
.init_context = nvidia_smmu_init_context,
};

struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
Expand Down

0 comments on commit e6f48be

Please sign in to comment.