x86: coding style fixes to arch/x86/kernel/cpu/amd.c
authorPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Fri, 22 Feb 2008 22:10:33 +0000 (23:10 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:40:50 +0000 (17:40 +0200)
Before:
   total: 42 errors, 26 warnings, 350 lines checked
After:
   total: 0 errors, 26 warnings, 352 lines checked

No code changed:

arch/x86/kernel/cpu/amd.o:

   text    data     bss     dec     hex filename
   1936     328       0    2264     8d8 amd.o.before
   1936     328       0    2264     8d8 amd.o.after

md5:
   873430a88faaf31bb4bbfe3a2a691e45  amd.o.before.asm
   873430a88faaf31bb4bbfe3a2a691e45  amd.o.after.asm

Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/amd.c

index cab4e562b5cbda0e7f4a4b67b670a7538fa5cc34..1a3e1bb4d7580a3f64d15fb51066346585a9c060 100644 (file)
@@ -20,7 +20,7 @@
  *     the chip setting when fixing the bug but they also tweaked some
  *     performance at the same time..
  */
+
 extern void vide(void);
 __asm__(".align 4\nvide: ret");
 
@@ -81,7 +81,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 #ifdef CONFIG_SMP
        unsigned long long value;
 
-       /* Disable TLB flush filter by setting HWCR.FFDIS on K8
+       /*
+        * Disable TLB flush filter by setting HWCR.FFDIS on K8
         * bit 6 of msr C001_0015
         *
         * Errata 63 for SH-B3 steppings
@@ -102,15 +103,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
         *      no bus pipeline)
         */
 
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       /*
+        * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+        * DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+        */
        clear_bit(0*32+31, c->x86_capability);
-       
+
        r = get_model_name(c);
 
-       switch(c->x86)
-       {
-               case 4:
+       switch (c->x86) {
+       case 4:
                /*
                 * General Systems BIOSen alias the cpu frequency registers
                 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
@@ -120,61 +122,60 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 #define CBAR           (0xfffc) /* Configuration Base Address  (32-bit) */
 #define CBAR_ENB       (0x80000000)
 #define CBAR_KEY       (0X000000CB)
-                       if (c->x86_model==9 || c->x86_model == 10) {
+                       if (c->x86_model == 9 || c->x86_model == 10) {
                                if (inl (CBAR) & CBAR_ENB)
                                        outl (0 | CBAR_KEY, CBAR);
                        }
                        break;
-               case 5:
-                       if( c->x86_model < 6 )
-                       {
+       case 5:
+                       if (c->x86_model < 6) {
                                /* Based on AMD doc 20734R - June 2000 */
-                               if ( c->x86_model == 0 ) {
+                               if (c->x86_model == 0) {
                                        clear_bit(X86_FEATURE_APIC, c->x86_capability);
                                        set_bit(X86_FEATURE_PGE, c->x86_capability);
                                }
                                break;
                        }
-                       
-                       if ( c->x86_model == 6 && c->x86_mask == 1 ) {
+
+                       if (c->x86_model == 6 && c->x86_mask == 1) {
                                const int K6_BUG_LOOP = 1000000;
                                int n;
                                void (*f_vide)(void);
                                unsigned long d, d2;
-                               
+
                                printk(KERN_INFO "AMD K6 stepping B detected - ");
-                               
+
                                /*
-                                * It looks like AMD fixed the 2.6.2 bug and improved indirect 
+                                * It looks like AMD fixed the 2.6.2 bug and improved indirect
                                 * calls at the same time.
                                 */
 
                                n = K6_BUG_LOOP;
                                f_vide = vide;
                                rdtscl(d);
-                               while (n--) 
+                               while (n--)
                                        f_vide();
                                rdtscl(d2);
                                d = d2-d;
 
-                               if (d > 20*K6_BUG_LOOP) 
+                               if (d > 20*K6_BUG_LOOP)
                                        printk("system stability may be impaired when more than 32 MB are used.\n");
-                               else 
+                               else
                                        printk("probably OK (after B9730xxxx).\n");
                                printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
                        }
 
                        /* K6 with old style WHCR */
                        if (c->x86_model < 8 ||
-                          (c->x86_model== 8 && c->x86_mask < 8)) {
+                          (c->x86_model == 8 && c->x86_mask < 8)) {
                                /* We can only write allocate on the low 508Mb */
-                               if(mbytes>508)
-                                       mbytes=508;
+                               if (mbytes > 508)
+                                       mbytes = 508;
 
                                rdmsr(MSR_K6_WHCR, l, h);
-                               if ((l&0x0000FFFF)==0) {
+                               if ((l&0x0000FFFF) == 0) {
                                        unsigned long flags;
-                                       l=(1<<0)|((mbytes/4)<<1);
+                                       l = (1<<0)|((mbytes/4)<<1);
                                        local_irq_save(flags);
                                        wbinvd();
                                        wrmsr(MSR_K6_WHCR, l, h);
@@ -185,17 +186,17 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                break;
                        }
 
-                       if ((c->x86_model == 8 && c->x86_mask >7) ||
+                       if ((c->x86_model == 8 && c->x86_mask > 7) ||
                             c->x86_model == 9 || c->x86_model == 13) {
                                /* The more serious chips .. */
 
-                               if(mbytes>4092)
-                                       mbytes=4092;
+                               if (mbytes > 4092)
+                                       mbytes = 4092;
 
                                rdmsr(MSR_K6_WHCR, l, h);
-                               if ((l&0xFFFF0000)==0) {
+                               if ((l&0xFFFF0000) == 0) {
                                        unsigned long flags;
-                                       l=((mbytes>>2)<<22)|(1<<16);
+                                       l = ((mbytes>>2)<<22)|(1<<16);
                                        local_irq_save(flags);
                                        wbinvd();
                                        wrmsr(MSR_K6_WHCR, l, h);
@@ -217,10 +218,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                break;
                        }
                        break;
-               case 6: /* An Athlon/Duron */
-                       /* Bit 15 of Athlon specific MSR 15, needs to be 0
-                        * to enable SSE on Palomino/Morgan/Barton CPU's.
+       case 6: /* An Athlon/Duron */
+
+                       /*
+                        * Bit 15 of Athlon specific MSR 15, needs to be 0
+                        * to enable SSE on Palomino/Morgan/Barton CPU's.
                         * If the BIOS didn't enable it already, enable it here.
                         */
                        if (c->x86_model >= 6 && c->x86_model <= 10) {
@@ -233,11 +235,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                }
                        }
 
-                       /* It's been determined by AMD that Athlons since model 8 stepping 1
+                       /*
+                        * It's been determined by AMD that Athlons since model 8 stepping 1
                         * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
                         * As per AMD technical note 27212 0.2
                         */
-                       if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
+                       if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
                                rdmsr(MSR_K7_CLK_CTL, l, h);
                                if ((l & 0xfff00000) != 0x20000000) {
                                        printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
@@ -256,7 +259,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                set_bit(X86_FEATURE_K8, c->x86_capability);
                break;
        case 6:
-               set_bit(X86_FEATURE_K7, c->x86_capability); 
+               set_bit(X86_FEATURE_K7, c->x86_capability);
                break;
        }
        if (c->x86 >= 6)
@@ -264,9 +267,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 
        display_cacheinfo(c);
 
-       if (cpuid_eax(0x80000000) >= 0x80000008) {
+       if (cpuid_eax(0x80000000) >= 0x80000008)
                c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-       }
 
 #ifdef CONFIG_X86_HT
        /*
@@ -308,14 +310,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
 }
 
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
        /* AMD errata T13 (order #21922) */
        if ((c->x86 == 6)) {
                if (c->x86_model == 3 && c->x86_mask == 0)      /* Duron Rev A0 */
                        size = 64;
                if (c->x86_model == 4 &&
-                   (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
+                   (c->x86_mask == 0 || c->x86_mask == 1))     /* Tbird rev A1/A2 */
                        size = 256;
        }
        return size;
@@ -323,16 +325,16 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned in
 
 static struct cpu_dev amd_cpu_dev __cpuinitdata = {
        .c_vendor       = "AMD",
-       .c_ident        = { "AuthenticAMD" },
+       .c_ident        = { "AuthenticAMD" },
        .c_models = {
                { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
                  {
                          [3] = "486 DX/2",
                          [7] = "486 DX/2-WB",
-                         [8] = "486 DX/4", 
-                         [9] = "486 DX/4-WB", 
+                         [8] = "486 DX/4",
+                         [9] = "486 DX/4-WB",
                          [14] = "Am5x86-WT",
-                         [15] = "Am5x86-WB" 
+                         [15] = "Am5x86-WB"
                  }
                },
        },