powerpc/mm: Use the required number of VSID bits in slbmte
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Mon, 10 Sep 2012 02:52:53 +0000 (02:52 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Mon, 17 Sep 2012 06:31:50 +0000 (16:31 +1000)
ASM_VSID_SCRAMBLE can leave non-zero bits in the high 28 bits of the result
for 256MB segment (40 bits for 1T segment). Properly mask them before using
the values in slbmte

Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/mm/slb_low.S

index e132dc6ed1a9a753716be14756dea09b3993ec8b..3b75f19aaa224b67921060b5600dbedeb713b87d 100644 (file)
@@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user)
  */
 slb_finish_load:
        ASM_VSID_SCRAMBLE(r10,r9,256M)
-       rldimi  r11,r10,SLB_VSID_SHIFT,16       /* combine VSID and flags */
+       /*
+        * bits above VSID_BITS_256M need to be ignored from r10
+        * also combine VSID and flags
+        */
+       rldimi  r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
 
        /* r3 = EA, r11 = VSID data */
        /*
@@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size)
 slb_finish_load_1T:
        srdi    r10,r10,40-28           /* get 1T ESID */
        ASM_VSID_SCRAMBLE(r10,r9,1T)
-       rldimi  r11,r10,SLB_VSID_SHIFT_1T,16    /* combine VSID and flags */
+       /*
+        * bits above VSID_BITS_1T need to be ignored from r10
+        * also combine VSID and flags
+        */
+       rldimi  r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
        li      r10,MMU_SEGSIZE_1T
        rldimi  r11,r10,SLB_VSID_SSIZE_SHIFT,0  /* insert segment size */