dma-mapping: drop the dev argument to arch_sync_dma_for_*
[ Upstream commit 56e35f9c5b87ec1ae93e483284e189c84388de16 ] These are pure cache maintainance routines, so drop the unused struct device argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch> Stable-dep-of: ab327f8acdf8 ("mips: bmips: BCM6358: disable RAC flush for TP1") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
f6e2d76aa3
commit
9690e34f22
@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
|||||||
* upper layer functions (in include/linux/dma-mapping.h)
|
* upper layer functions (in include/linux/dma-mapping.h)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
|
@ -2332,15 +2332,15 @@ void arch_teardown_dma_ops(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||||
size, dir);
|
size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||||
size, dir);
|
size, dir);
|
||||||
|
@ -70,20 +70,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
|
|||||||
* pfn_valid returns true the pages is local and we can use the native
|
* pfn_valid returns true the pages is local and we can use the native
|
||||||
* dma-direct functions, otherwise we call the Xen specific version.
|
* dma-direct functions, otherwise we call the Xen specific version.
|
||||||
*/
|
*/
|
||||||
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
|
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (pfn_valid(PFN_DOWN(handle)))
|
if (pfn_valid(PFN_DOWN(handle)))
|
||||||
arch_sync_dma_for_cpu(dev, paddr, size, dir);
|
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||||
else if (dir != DMA_TO_DEVICE)
|
else if (dir != DMA_TO_DEVICE)
|
||||||
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
|
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (pfn_valid(PFN_DOWN(handle)))
|
if (pfn_valid(PFN_DOWN(handle)))
|
||||||
arch_sync_dma_for_device(dev, paddr, size, dir);
|
arch_sync_dma_for_device(paddr, size, dir);
|
||||||
else if (dir == DMA_FROM_DEVICE)
|
else if (dir == DMA_FROM_DEVICE)
|
||||||
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
||||||
else
|
else
|
||||||
|
@ -13,14 +13,14 @@
|
|||||||
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_map_area(phys_to_virt(paddr), size, dir);
|
__dma_map_area(phys_to_virt(paddr), size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_unmap_area(phys_to_virt(paddr), size, dir);
|
__dma_unmap_area(phys_to_virt(paddr), size, dir);
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
|
|||||||
sizeof(long));
|
sizeof(long));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
static void c6x_dma_sync(phys_addr_t paddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
BUG_ON(!valid_dma_direction(dir));
|
BUG_ON(!valid_dma_direction(dir));
|
||||||
@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
return c6x_dma_sync(dev, paddr, size, dir);
|
return c6x_dma_sync(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
return c6x_dma_sync(dev, paddr, size, dir);
|
return c6x_dma_sync(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
|||||||
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
|
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
|
@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
|
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *addr = phys_to_virt(paddr);
|
void *addr = phys_to_virt(paddr);
|
||||||
|
|
||||||
|
@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
|
|||||||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||||
* flush them when they get mapped into an executable vm-area.
|
* flush them when they get mapped into an executable vm-area.
|
||||||
*/
|
*/
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned long pfn = PHYS_PFN(paddr);
|
unsigned long pfn = PHYS_PFN(paddr);
|
||||||
|
|
||||||
|
@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
|
|
||||||
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
|
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
|
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
static void __dma_sync(phys_addr_t paddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_sync(dev, paddr, size, dir);
|
__dma_sync(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_sync(dev, paddr, size, dir);
|
__dma_sync(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
|||||||
return dma_addr;
|
return dma_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu_all(struct device *dev)
|
void arch_sync_dma_for_cpu_all(void)
|
||||||
{
|
{
|
||||||
void __iomem *cbr = BMIPS_GET_CBR();
|
void __iomem *cbr = BMIPS_GET_CBR();
|
||||||
u32 cfg;
|
u32 cfg;
|
||||||
|
@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
|
|||||||
phys_addr_t phys = page_to_phys(page) + offset;
|
phys_addr_t phys = page_to_phys(page) + offset;
|
||||||
|
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
arch_sync_dma_for_device(phys, size, dir);
|
||||||
return vdma_alloc(phys, size);
|
return vdma_alloc(phys, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
|||||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
|
arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
|
||||||
vdma_free(dma_addr);
|
vdma_free(dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
|
|
||||||
for_each_sg(sglist, sg, nents, i) {
|
for_each_sg(sglist, sg, nents, i) {
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
|
arch_sync_dma_for_device(sg_phys(sg), sg->length,
|
||||||
dir);
|
dir);
|
||||||
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
||||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||||
@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
|
|
||||||
for_each_sg(sglist, sg, nents, i) {
|
for_each_sg(sglist, sg, nents, i) {
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
|
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||||
dir);
|
|
||||||
vdma_free(sg->dma_address);
|
vdma_free(sg->dma_address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
static void jazz_dma_sync_single_for_device(struct device *dev,
|
static void jazz_dma_sync_single_for_device(struct device *dev,
|
||||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
|
arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void jazz_dma_sync_single_for_cpu(struct device *dev,
|
static void jazz_dma_sync_single_for_cpu(struct device *dev,
|
||||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
|
arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void jazz_dma_sync_sg_for_device(struct device *dev,
|
static void jazz_dma_sync_sg_for_device(struct device *dev,
|
||||||
@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
for_each_sg(sgl, sg, nents, i)
|
||||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
||||||
@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
for_each_sg(sgl, sg, nents, i)
|
||||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct dma_map_ops jazz_dma_ops = {
|
const struct dma_map_ops jazz_dma_ops = {
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
|
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
|
||||||
* SGI IP32 aka O2.
|
* SGI IP32 aka O2.
|
||||||
*/
|
*/
|
||||||
static inline bool cpu_needs_post_dma_flush(struct device *dev)
|
static inline bool cpu_needs_post_dma_flush(void)
|
||||||
{
|
{
|
||||||
switch (boot_cpu_type()) {
|
switch (boot_cpu_type()) {
|
||||||
case CPU_R10000:
|
case CPU_R10000:
|
||||||
@ -118,17 +118,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
|
|||||||
} while (left);
|
} while (left);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
dma_sync_phys(paddr, size, dir);
|
dma_sync_phys(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (cpu_needs_post_dma_flush(dev))
|
if (cpu_needs_post_dma_flush())
|
||||||
dma_sync_phys(paddr, size, dir);
|
dma_sync_phys(paddr, size, dir);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
|
|||||||
} while (left);
|
} while (left);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_FROM_DEVICE:
|
case DMA_FROM_DEVICE:
|
||||||
@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *vaddr = phys_to_virt(paddr);
|
void *vaddr = phys_to_virt(paddr);
|
||||||
|
|
||||||
@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *vaddr = phys_to_virt(paddr);
|
void *vaddr = phys_to_virt(paddr);
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
free_pages_exact(vaddr, size);
|
free_pages_exact(vaddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
|
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned long cl;
|
unsigned long cl;
|
||||||
|
@ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
free_pages((unsigned long)__va(dma_handle), order);
|
free_pages((unsigned long)__va(dma_handle), order);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||||
}
|
}
|
||||||
|
@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_sync_page(paddr, size, dir);
|
__dma_sync_page(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
__dma_sync_page(paddr, size, dir);
|
__dma_sync_page(paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
* Pages from the page allocator may have data present in
|
* Pages from the page allocator may have data present in
|
||||||
* cache. So flush the cache before using uncached memory.
|
* cache. So flush the cache before using uncached memory.
|
||||||
*/
|
*/
|
||||||
arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
|
arch_sync_dma_for_device(virt_to_phys(ret), size,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
|
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
|
||||||
@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
iounmap(vaddr);
|
iounmap(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
|
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
|
||||||
|
|
||||||
|
@ -368,8 +368,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
|
|
||||||
/* IIep is write-through, not flushing on cpu to device transfer. */
|
/* IIep is write-through, not flushing on cpu to device transfer. */
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (dir != PCI_DMA_TODEVICE)
|
if (dir != PCI_DMA_TODEVICE)
|
||||||
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
||||||
|
@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
|
@ -660,7 +660,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||||
arch_sync_dma_for_cpu(dev, phys, size, dir);
|
arch_sync_dma_for_cpu(phys, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_dma_sync_single_for_device(struct device *dev,
|
static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||||
@ -672,7 +672,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
arch_sync_dma_for_device(phys, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
||||||
@ -686,7 +686,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nelems, i)
|
for_each_sg(sgl, sg, nelems, i)
|
||||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_dma_sync_sg_for_device(struct device *dev,
|
static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||||
@ -700,7 +700,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nelems, i)
|
for_each_sg(sgl, sg, nelems, i)
|
||||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||||
@ -715,7 +715,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
|||||||
dma_handle =__iommu_dma_map(dev, phys, size, prot);
|
dma_handle =__iommu_dma_map(dev, phys, size, prot);
|
||||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||||
dma_handle != DMA_MAPPING_ERROR)
|
dma_handle != DMA_MAPPING_ERROR)
|
||||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
arch_sync_dma_for_device(phys, size, dir);
|
||||||
return dma_handle;
|
return dma_handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,7 +411,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||||||
|
|
||||||
done:
|
done:
|
||||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
|
xen_dma_sync_for_device(dev_addr, phys, size, dir);
|
||||||
return dev_addr;
|
return dev_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +431,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
|
xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
|
||||||
|
|
||||||
/* NOTE: We use dev_addr here, not paddr! */
|
/* NOTE: We use dev_addr here, not paddr! */
|
||||||
if (is_xen_swiotlb_buffer(dev_addr))
|
if (is_xen_swiotlb_buffer(dev_addr))
|
||||||
@ -445,7 +445,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
|
|||||||
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
|
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
|
xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
|
||||||
|
|
||||||
if (is_xen_swiotlb_buffer(dma_addr))
|
if (is_xen_swiotlb_buffer(dma_addr))
|
||||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||||
@ -461,7 +461,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
|
|||||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
|
xen_dma_sync_for_device(dma_addr, paddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -75,29 +75,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
|
|||||||
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
|
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir);
|
enum dma_data_direction dir);
|
||||||
#else
|
#else
|
||||||
static inline void arch_sync_dma_for_device(struct device *dev,
|
static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
|
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir);
|
enum dma_data_direction dir);
|
||||||
#else
|
#else
|
||||||
static inline void arch_sync_dma_for_cpu(struct device *dev,
|
static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
|
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
|
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
|
||||||
void arch_sync_dma_for_cpu_all(struct device *dev);
|
void arch_sync_dma_for_cpu_all(void);
|
||||||
#else
|
#else
|
||||||
static inline void arch_sync_dma_for_cpu_all(struct device *dev)
|
static inline void arch_sync_dma_for_cpu_all(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
|
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
|
|
||||||
#include <linux/swiotlb.h>
|
#include <linux/swiotlb.h>
|
||||||
|
|
||||||
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
|
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir);
|
enum dma_data_direction dir);
|
||||||
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
|
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir);
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
extern int xen_swiotlb_init(int verbose, bool early);
|
extern int xen_swiotlb_init(int verbose, bool early);
|
||||||
extern const struct dma_map_ops xen_swiotlb_dma_ops;
|
extern const struct dma_map_ops xen_swiotlb_dma_ops;
|
||||||
|
@ -232,7 +232,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
|
|||||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
arch_sync_dma_for_device(dev, paddr, size, dir);
|
arch_sync_dma_for_device(paddr, size, dir);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
|
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
|
||||||
|
|
||||||
@ -250,7 +250,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
|
|||||||
dir, SYNC_FOR_DEVICE);
|
dir, SYNC_FOR_DEVICE);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
arch_sync_dma_for_device(dev, paddr, sg->length,
|
arch_sync_dma_for_device(paddr, sg->length,
|
||||||
dir);
|
dir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,8 +266,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
|
|||||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev)) {
|
if (!dev_is_dma_coherent(dev)) {
|
||||||
arch_sync_dma_for_cpu(dev, paddr, size, dir);
|
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||||
arch_sync_dma_for_cpu_all(dev);
|
arch_sync_dma_for_cpu_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||||
@ -285,7 +285,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||||||
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
|
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
|
arch_sync_dma_for_cpu(paddr, sg->length, dir);
|
||||||
|
|
||||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||||
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
||||||
@ -293,7 +293,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
arch_sync_dma_for_cpu_all(dev);
|
arch_sync_dma_for_cpu_all();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
|
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
arch_sync_dma_for_device(phys, size, dir);
|
||||||
return dma_addr;
|
return dma_addr;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_direct_map_page);
|
EXPORT_SYMBOL(dma_direct_map_page);
|
||||||
|
Loading…
Reference in New Issue
Block a user