dma-mapping-fast: Fix iova address leak with non-zero scatterlist offset

The underlying arm-smmu hardware only supports mapping addresses
aligned to PAGE_SIZE. Thus the actual mapped region may be larger than
the range returned by iommu_map_sg():

[sg_dma_address(), sg_dma_address() + sg->length)

When unmapping, ensure the same alignment requirements are applied
in order to avoid leaking iova addresses.

Change-Id: I1f5d5185d003cfe104b4a67efc1fe88f105f015f
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2021-09-23 15:24:42 -07:00 committed by Gerrit - the friendly Code Review server
parent 8876d1c1a1
commit 6508829687

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-contiguous.h>
@ -364,7 +364,7 @@ static void fast_smmu_unmap_sg(struct device *dev,
struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
unsigned long flags;
dma_addr_t start;
size_t len;
size_t len, offset;
struct scatterlist *tmp;
int i;
@ -376,12 +376,13 @@ static void fast_smmu_unmap_sg(struct device *dev,
* contiguous IOVA allocation, so this is incredibly easy.
*/
start = sg_dma_address(sg);
offset = start & ~FAST_PAGE_MASK;
for_each_sg(sg_next(sg), tmp, nelems - 1, i) {
if (sg_dma_len(tmp) == 0)
break;
sg = tmp;
}
len = ALIGN(sg_dma_address(sg) + sg_dma_len(sg) - start,
len = ALIGN(sg_dma_address(sg) + sg_dma_len(sg) - (start - offset),
FAST_PAGE_SIZE);
av8l_fast_unmap_public(mapping->pgtbl_ops, start, len);