arm64: Fix DMA range invalidation for cache line unaligned buffers
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 9 May 2014 14:58:16 +0000 (15:58 +0100)
committerMark Brown <broonie@linaro.org>
Mon, 12 May 2014 17:10:22 +0000 (18:10 +0100)
If the buffer needing cache invalidation for inbound DMA does start or
end on a cache line aligned address, we need to use the non-destructive
clean&invalidate operation. This issue was introduced by commit
7363590d2c46 (arm64: Implement coherent DMA API based on swiotlb).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Jon Medhurst (Tixy) <tixy@linaro.org>
(cherry picked from commit ebf81a938dade3b450eb11c57fa744cfac4b523f)
Signed-off-by: Ryan Harkin <ryan.harkin@linaro.org>
Signed-off-by: Mark Brown <broonie@linaro.org>
arch/arm64/mm/cache.S

index 4726b8209d379df2690db45aa85f4accf5732c71..39b6542b138ed25265cac0a2c1c1d1d700382322 100644 (file)
@@ -175,12 +175,19 @@ ENDPROC(__flush_dcache_area)
 __dma_inv_range:
        dcache_line_size x2, x3
        sub     x3, x2, #1
-       bic     x0, x0, x3
+       tst     x1, x3                          // end cache line aligned?
        bic     x1, x1, x3
-1:     dc      ivac, x0                        // invalidate D / U line
-       add     x0, x0, x2
+       b.eq    1f
+       dc      civac, x1                       // clean & invalidate D / U line
+1:     tst     x0, x3                          // start cache line aligned?
+       bic     x0, x0, x3
+       b.eq    2f
+       dc      civac, x0                       // clean & invalidate D / U line
+       b       3f
+2:     dc      ivac, x0                        // invalidate D / U line
+3:     add     x0, x0, x2
        cmp     x0, x1
-       b.lo    1b
+       b.lo    2b
        dsb     sy
        ret
 ENDPROC(__dma_inv_range)