Skip to content

Commit

Permalink
Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-…
Browse files Browse the repository at this point in the history
…mapping

Pull dma-mapping updates from Christoph Hellwig:

 - enhance the dma pool to allow atomic allocation on x86 with AMD SEV
   (David Rientjes)

 - two small cleanups (Jason Yan and Peter Collingbourne)

* tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping:
  dma-contiguous: fix comment for dma_release_from_contiguous
  dma-pool: scale the default DMA coherent pool size with memory capacity
  x86/mm: unencrypted non-blocking DMA allocations use coherent pools
  dma-pool: add pool sizes to debugfs
  dma-direct: atomic allocations must come from atomic coherent pools
  dma-pool: dynamically expanding atomic pools
  dma-pool: add additional coherent pools to map to gfp mask
  dma-remap: separate DMA atomic pools from direct remap code
  dma-debug: make __dma_entry_alloc_check_leak() static
  • Loading branch information
torvalds committed Jun 6, 2020
2 parents e542e0d + 298f3db commit 1ee18de
Show file tree
Hide file tree
Showing 11 changed files with 327 additions and 141 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1524,6 +1524,7 @@ config X86_CPA_STATISTICS
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
select DMA_COHERENT_POOL
select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
Expand Down
5 changes: 3 additions & 2 deletions drivers/iommu/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)

/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(cpu_addr, alloc_size))
dma_free_from_pool(dev, cpu_addr, alloc_size))
return;

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Expand Down Expand Up @@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,

if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
gfp);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
Expand Down
2 changes: 2 additions & 0 deletions include/linux/dma-direct.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
}

u64 dma_direct_get_required_mask(struct device *dev);
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
Expand Down
6 changes: 3 additions & 3 deletions include/linux/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -630,9 +630,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);

bool dma_in_atomic_pool(void *start, size_t size);
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(void *start, size_t size);
void *dma_alloc_from_pool(struct device *dev, size_t size,
struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(struct device *dev, void *start, size_t size);

int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Expand Down
6 changes: 5 additions & 1 deletion kernel/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,14 @@ config DMA_REMAP
select DMA_NONCOHERENT_MMAP
bool

config DMA_DIRECT_REMAP
config DMA_COHERENT_POOL
bool
select DMA_REMAP

config DMA_DIRECT_REMAP
bool
select DMA_COHERENT_POOL

config DMA_CMA
bool "DMA Contiguous Memory Allocator"
depends on HAVE_DMA_CONTIGUOUS && CMA
Expand Down
1 change: 1 addition & 0 deletions kernel/dma/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
obj-$(CONFIG_DMA_REMAP) += remap.o
4 changes: 2 additions & 2 deletions kernel/dma/contiguous.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
* @gfp: Allocation flags.
*
* This function allocates contiguous memory buffer for specified device. It
* first tries to use device specific contiguous memory area if available or
* the default global one, then tries a fallback allocation of normal pages.
* tries to use device specific contiguous memory area if available, or the
* default global one.
*
* Note that it byapss one-page size of allocations from the global area as
* the addresses within one page are always contiguous, so there is no need
Expand Down
2 changes: 1 addition & 1 deletion kernel/dma/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}

void __dma_entry_alloc_check_leak(void)
static void __dma_entry_alloc_check_leak(void)
{
u32 tmp = nr_total_entries % nr_prealloc_entries;

Expand Down
56 changes: 44 additions & 12 deletions kernel/dma/direct.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ u64 dma_direct_get_required_mask(struct device *dev)
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}

static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_limit)
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_limit)
{
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);

Expand Down Expand Up @@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}

/*
* Decrypting memory is allowed to block, so if this device requires
* unencrypted memory it must come from atomic pools.
*/
static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
return false;
if (gfpflags_allow_blocking(gfp))
return false;
if (force_dma_unencrypted(dev))
return true;
if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
return false;
if (dma_alloc_need_uncached(dev, attrs))
return true;
return false;
}

static inline bool dma_should_free_from_pool(struct device *dev,
unsigned long attrs)
{
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
return true;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev))
return false;
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
return true;
return false;
}

struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
Expand All @@ -89,8 +122,8 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,

/* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
Expand Down Expand Up @@ -125,10 +158,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;

if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs) &&
!gfpflags_allow_blocking(gfp)) {
ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
goto done;
Expand Down Expand Up @@ -204,17 +235,18 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);

/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
if (dma_should_free_from_pool(dev, attrs) &&
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;

if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
}

if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
return;

if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);

Expand Down
Loading

0 comments on commit 1ee18de

Please sign in to comment.