iommu/dma: Fix cache maintenance in iommu_dma_map_sg()

iommu_dma_map_sg() attempts to perform cache maintenance with
iommu_dma_sync_sg_for_device() prior to mapping the scatter gather
list to an IOVA. iommu_dma_sync_sg_for_device() uses the DMA address
in the scatter gather list structure to determine if the buffer is
mapped as cached or uncached by invoking iommu_is_iova_coherent().

Since the buffer has not been mapped yet, the DMA address in the scatter
gather list structure will be 0. If IOVA 0 is not mapped in the client's
IOMMU page tables, then iommu_is_iova_coherent() will return false,
and cache maintenance will be performed on the buffer, even for clients
that are io-coherent, which is not correct, as no cache maintenance is
required for io-coherent clients.

Ensure cache maintenance happens only after the buffer has been mapped,
so that iommu_dma_sync_sg_for_device() can correctly determine if
cache maintenance is required.

Change-Id: Ie1cec69077eaeada00b5a2709b6d1680afdaaeba
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
Isaac J. Manjarres 2020-11-13 14:52:32 -08:00
parent f16a067bfd
commit 5d5e0ccd89

View File

@ -959,6 +959,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iommu_dma_cookie *cookie;
struct iova_domain *iovad;
int prot = dma_info_to_prot(dir, is_dma_coherent(dev, attrs), attrs);
int ret;
dma_addr_t iova;
size_t iova_len;
@ -968,9 +969,6 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
cookie = domain->iova_cookie;
iovad = &cookie->iovad;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
iova_len = iommu_dma_prepare_map_sg(dev, iovad, sg, nents);
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
@ -984,7 +982,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return iommu_dma_finalise_sg(dev, sg, nents, iova);
ret = iommu_dma_finalise_sg(dev, sg, nents, iova);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
return ret;
out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len);