From 03b0d29e0369054e638328b3994b8cfbb9d34a35 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 9 May 2014 15:58:16 +0100 Subject: [PATCH] arm64: Fix DMA range invalidation for cache line unaligned buffers If the buffer needing cache invalidation for inbound DMA does start or end on a cache line aligned address, we need to use the non-destructive clean&invalidate operation. This issue was introduced by commit 7363590d2c46 (arm64: Implement coherent DMA API based on swiotlb). Signed-off-by: Catalin Marinas Reported-by: Jon Medhurst (Tixy) (cherry picked from commit ebf81a938dade3b450eb11c57fa744cfac4b523f) Signed-off-by: Ryan Harkin Signed-off-by: Mark Brown --- arch/arm64/mm/cache.S | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 4726b8209d37..39b6542b138e 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -175,12 +175,19 @@ ENDPROC(__flush_dcache_area) __dma_inv_range: dcache_line_size x2, x3 sub x3, x2, #1 - bic x0, x0, x3 + tst x1, x3 // end cache line aligned? bic x1, x1, x3 -1: dc ivac, x0 // invalidate D / U line - add x0, x0, x2 + b.eq 1f + dc civac, x1 // clean & invalidate D / U line +1: tst x0, x3 // start cache line aligned? + bic x0, x0, x3 + b.eq 2f + dc civac, x0 // clean & invalidate D / U line + b 3f +2: dc ivac, x0 // invalidate D / U line +3: add x0, x0, x2 cmp x0, x1 - b.lo 1b + b.lo 2b dsb sy ret ENDPROC(__dma_inv_range) -- 2.34.1