dma-mapping: add support for dma-coherent-hint-cached

When clients configure their device as dma-coherent-hint-cached this will
result in the memory framework only trying to DMA map buffers as
IO-coherent if the framework is confident that the buffers are mapped as
cached in the CPU.

Clients should configure their device as dma-coherent-hint-cached instead
of dma-coherent when they need to DMA map both buffers that have cached
CPU mappings and other buffers which have uncached CPU mappings. By using
dma-coherent-hint-cached the framework will ensure that the client's
buffers with uncached CPU mappings don't get DMA mapped as IO-coherent.

Change-Id: I990184b54d4148bf952cc672ec267b51efd81473
Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
Liam Mark 2020-02-20 10:19:06 -08:00
parent 333b4ccb21
commit 707e9d34cf
10 changed files with 141 additions and 19 deletions

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-noncoherent.h>
@ -251,7 +251,8 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_sync_sg_for_device(dev, iommu_map->sgl,
iommu_map->nents, iommu_map->dir);
if (dev_is_dma_coherent(dev))
if (dev_is_dma_coherent(dev) ||
(attrs & DMA_ATTR_FORCE_COHERENT))
/*
* Ensure all outstanding changes for coherent
* buffers are applied to the cache before any

View File

@ -1015,3 +1015,28 @@ bool of_dma_is_coherent(struct device_node *np)
return false;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
#if defined(CONFIG_DMA_COHERENT_HINT_CACHED)
/**
* of_dma_is_coherent_hint_cached - Check if device is coherent hint cached
* @np: device node
*
* It returns true if "dma-coherent-hint-cached" property was found
* for this device in DT.
*/
bool of_dma_is_coherent_hint_cached(struct device_node *np)
{
struct device_node *node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent-hint-cached")) {
of_node_put(node);
return true;
}
node = of_get_next_parent(node);
}
of_node_put(node);
return false;
}
EXPORT_SYMBOL(of_dma_is_coherent_hint_cached);
#endif /* CONFIG_DMA_COHERENT_HINT_CACHED */

View File

@ -90,7 +90,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
{
u64 dma_addr, paddr, size = 0;
int ret;
bool coherent;
bool coherent, coherent_hint_cached;
unsigned long offset;
const struct iommu_ops *iommu;
u64 mask;
@ -159,6 +159,13 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
coherent_hint_cached = of_dma_is_coherent_hint_cached(np);
dev_dbg(dev, "device is%sdma coherent_hint_cached\n",
coherent_hint_cached ? " " : " not ");
dma_set_coherent_hint_cached(dev, coherent_hint_cached);
WARN(coherent && coherent_hint_cached,
"Should not set both dma-coherent and dma-coherent-hint-cached on the same device");
iommu = of_iommu_configure(dev, np);
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;

View File

@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-noncoherent.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/file.h>
@ -163,6 +164,20 @@ static struct sg_table
!hlos_accessible_buffer(buffer))
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
if ((buffer->flags & ION_FLAG_CACHED) &&
hlos_accessible_buffer(buffer) &&
dev_is_dma_coherent_hint_cached(attachment->dev))
map_attrs |= DMA_ATTR_FORCE_COHERENT;
if (((dev_is_dma_coherent(attachment->dev) &&
!(map_attrs & DMA_ATTR_FORCE_NON_COHERENT)) ||
(map_attrs & DMA_ATTR_FORCE_COHERENT)) &&
!(buffer->flags & ION_FLAG_CACHED)) {
pr_warn_ratelimited("dev:%s Cannot DMA map uncached buffer as IO-coherent attrs:0x%lx\n",
dev_name(attachment->dev), map_attrs);
return ERR_PTR(-EINVAL);
}
mutex_lock(&buffer->lock);
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
trace_ion_dma_map_cmo_skip(attachment->dev,
@ -233,6 +248,11 @@ static void msm_ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
!hlos_accessible_buffer(buffer))
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
if ((buffer->flags & ION_FLAG_CACHED) &&
hlos_accessible_buffer(buffer) &&
dev_is_dma_coherent_hint_cached(attachment->dev))
map_attrs |= DMA_ATTR_FORCE_COHERENT;
mutex_lock(&buffer->lock);
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
trace_ion_dma_unmap_cmo_skip(attachment->dev,

View File

@ -1245,7 +1245,9 @@ struct dev_links_info {
* sync_state() callback.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
*
* @dma_coherent_hint_cached: Tell the framework to try and treat the device
* as DMA coherent when working with CPU cached
* buffers.
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
* that the device model core needs to model the system. Most subsystems,
@ -1345,6 +1347,10 @@ struct device {
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
bool dma_coherent:1;
#endif
#if defined(CONFIG_DMA_COHERENT_HINT_CACHED)
bool dma_coherent_hint_cached:1;
#endif
};
static inline struct device *kobj_to_dev(struct kobject *kobj)

View File

@ -305,6 +305,19 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
size_t dma_direct_max_mapping_size(struct device *dev);
#ifdef CONFIG_DMA_COHERENT_HINT_CACHED
static inline void dma_set_coherent_hint_cached(struct device *dev,
bool hint_cached)
{
dev->dma_coherent_hint_cached = hint_cached;
}
#else
static inline void dma_set_coherent_hint_cached(struct device *dev,
bool hint_cached)
{
}
#endif
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>

View File

@ -21,6 +21,18 @@ static inline bool dev_is_dma_coherent(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
#ifdef CONFIG_DMA_COHERENT_HINT_CACHED
static inline bool dev_is_dma_coherent_hint_cached(struct device *dev)
{
return dev->dma_coherent_hint_cached;
}
#else
static inline bool dev_is_dma_coherent_hint_cached(struct device *dev)
{
return false;
}
#endif
/*
* Check if an allocation needs to be marked uncached to be coherent.
*/

View File

@ -58,6 +58,16 @@ extern struct of_pci_range *of_pci_range_parser_one(
extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#if defined(CONFIG_DMA_COHERENT_HINT_CACHED)
extern bool of_dma_is_coherent_hint_cached(struct device_node *np);
#else /* CONFIG_DMA_COHERENT_HINT_CACHED */
static inline bool of_dma_is_coherent_hint_cached(struct device_node *np)
{
return false;
}
#endif /* CONFIG_DMA_COHERENT_HINT_CACHED */
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
int index, const char *name)
@ -114,6 +124,12 @@ static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
}
static inline bool of_dma_is_coherent_hint_cached(struct device_node *np)
{
return false;
}
#endif /* CONFIG_OF_ADDRESS */
#ifdef CONFIG_OF

View File

@ -77,6 +77,16 @@ config DMA_DIRECT_REMAP
bool
select DMA_REMAP
config DMA_COHERENT_HINT_CACHED
bool "DMA coherent hint cached"
help
Enabling this feature allows client to configure their device so
that the memory framework will only try to DMA map buffers to the device
as IO-coherent if the framework is confident that the buffers are mapped
to the CPU as cached.
If unsure, say "n".
config DMA_CMA
bool "DMA Contiguous Memory Allocator"
depends on HAVE_DMA_CONTIGUOUS && CMA

View File

@ -46,6 +46,18 @@ static int dmam_match(struct device *dev, void *res, void *match_data)
return 0;
}
static bool is_dma_coherent(struct device *dev, unsigned long attrs)
{
if (attrs & DMA_ATTR_FORCE_COHERENT)
return true;
else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
return false;
else if (dev_is_dma_coherent(dev))
return true;
else
return false;
}
/**
* dmam_free_coherent - Managed dma_free_coherent()
* @dev: Device to free coherent memory for
@ -116,7 +128,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
struct page *page;
int ret;
if (!dev_is_dma_coherent(dev)) {
if (!is_dma_coherent(dev, attrs)) {
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
@ -154,6 +166,9 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dev_is_dma_coherent_hint_cached(dev))
attrs |= DMA_ATTR_FORCE_COHERENT;
if (dma_is_direct(ops))
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
@ -164,18 +179,6 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
EXPORT_SYMBOL(dma_get_sgtable_attrs);
#ifdef CONFIG_MMU
static bool is_dma_coherent(struct device *dev, unsigned long attrs)
{
if (attrs & DMA_ATTR_FORCE_COHERENT)
return true;
else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
return false;
else if (dev_is_dma_coherent(dev))
return true;
else
return false;
}
/*
* Return the page attributes used for mapping dma_alloc_* memory, either in
* kernel space if remapping is needed, or to userspace through dma_mmap_*.
@ -216,7 +219,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > count - off)
return -ENXIO;
if (!dev_is_dma_coherent(dev)) {
if (!is_dma_coherent(dev, attrs)) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
@ -275,6 +278,9 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dev_is_dma_coherent_hint_cached(dev))
attrs |= DMA_ATTR_FORCE_COHERENT;
if (dma_is_direct(ops))
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
@ -315,6 +321,9 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
WARN_ON(!of_reserved_mem_device_is_init(dev));
if (dev_is_dma_coherent_hint_cached(dev))
attrs |= DMA_ATTR_FORCE_COHERENT;
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
@ -338,6 +347,9 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dev_is_dma_coherent_hint_cached(dev))
attrs |= DMA_ATTR_FORCE_COHERENT;
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
/*