From: Ingo Molnar <mingo@elte.hu>
Date: Mon, 9 Feb 2009 13:58:11 +0000 (+0100)
Subject: Merge commit 'v2.6.29-rc4' into core/percpu
X-Git-Tag: firefly_0821_release~13991^2~247^2~40
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=249d51b53aea1b7cdb1be65a1a9a0c59d9e06f3e;p=firefly-linux-kernel-4.4.55.git

Merge commit 'v2.6.29-rc4' into core/percpu

Conflicts:
	arch/x86/mach-voyager/voyager_smp.c
	arch/x86/mm/fault.c
---

249d51b53aea1b7cdb1be65a1a9a0c59d9e06f3e
diff --cc arch/arm/kernel/irq.c
index 4bb723eadad1,363db186cb93..45eacb5a2ecd
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@@ -101,14 -101,9 +101,14 @@@ unlock
  /* Handle bad interrupts */
  static struct irq_desc bad_irq_desc = {
  	.handle_irq = handle_bad_irq,
- 	.lock = SPIN_LOCK_UNLOCKED
+ 	.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
  };
  
 +#ifdef CONFIG_CPUMASK_OFFSTACK
 +/* We are not allocating bad_irq_desc.affinity or .pending_mask */
 +#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
 +#endif
 +
  /*
   * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
   * come via this function.  Instead, they should provide their
diff --cc arch/x86/kernel/entry_64.S
index e4c9710cae52,a1346217e43c..586bed677557
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@@ -342,11 -341,12 +342,12 @@@ ENTRY(save_args
  	 * a little cheaper to use a separate counter in the PDA (short of
  	 * moving irq_enter into assembly, which would be too much work)
  	 */
 -1:	incl %gs:pda_irqcount
 +1:	incl PER_CPU_VAR(irq_count)
  	jne 2f
  	popq_cfi %rax			/* move return address... */
 -	mov %gs:pda_irqstackptr,%rsp
 +	mov PER_CPU_VAR(irq_stack_ptr),%rsp
  	EMPTY_FRAME 0
+ 	pushq_cfi %rbp			/* backlink for unwinder */
  	pushq_cfi %rax			/* ... to the new stack */
  	/*
  	 * We entered an interrupt context - irqs are off:
diff --cc arch/x86/mach-voyager/voyager_smp.c
index 58c7cac3440d,7ffcdeec4631..6f5a38c7f900
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@@ -1746,13 -1745,14 +1744,13 @@@ static void __init voyager_smp_prepare_
  
  static void __cpuinit voyager_smp_prepare_boot_cpu(void)
  {
 -	init_gdt(smp_processor_id());
 -	switch_to_new_gdt();
 +	int cpu = smp_processor_id();
 +	switch_to_new_gdt(cpu);
  
- 	cpu_set(cpu, cpu_online_map);
- 	cpu_set(cpu, cpu_callout_map);
- 	cpu_set(cpu, cpu_possible_map);
- 	cpu_set(cpu, cpu_present_map);
+ 	cpu_online_map = cpumask_of_cpu(smp_processor_id());
+ 	cpu_callout_map = cpumask_of_cpu(smp_processor_id());
+ 	cpu_callin_map = CPU_MASK_NONE;
+ 	cpu_present_map = cpumask_of_cpu(smp_processor_id());
 -
  }
  
  static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@@ -1779,11 -1779,12 +1777,11 @@@ static void __init voyager_smp_cpus_don
  void __init smp_setup_processor_id(void)
  {
  	current_thread_info()->cpu = hard_smp_processor_id();
 -	x86_write_percpu(cpu_number, hard_smp_processor_id());
  }
  
- static void voyager_send_call_func(cpumask_t callmask)
+ static void voyager_send_call_func(const struct cpumask *callmask)
  {
- 	__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+ 	__u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
  	send_CPI(mask, VIC_CALL_FUNCTION_CPI);
  }
  
diff --cc arch/x86/mm/fault.c
index 65709a6aa6ee,c76ef1d701c9..8c3f3113a6ec
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@@ -807,8 -601,8 +807,6 @@@ void __kprobes do_page_fault(struct pt_
  	/* get the address */
  	address = read_cr2();
  
- 	if (unlikely(notify_page_fault(regs)))
- 		return;
 -	si_code = SEGV_MAPERR;
 -
  	if (unlikely(kmmio_fault(regs, address)))
  		return;
  
@@@ -835,17 -629,23 +833,22 @@@
  			return;
  
  		/* Can handle a stale RO->RW TLB */
 -		if (spurious_fault(address, error_code))
 +		if (spurious_fault(error_code, address))
  			return;
  
+ 		/* kprobes don't want to hook the spurious faults. */
+ 		if (notify_page_fault(regs))
+ 			return;
  		/*
  		 * Don't take the mm semaphore here. If we fixup a prefetch
  		 * fault we could otherwise deadlock.
  		 */
 -		goto bad_area_nosemaphore;
 +		bad_area_nosemaphore(regs, error_code, address);
 +		return;
  	}
  
 -	/* kprobes don't want to hook the spurious faults. */
 -	if (notify_page_fault(regs))
++	if (unlikely(notify_page_fault(regs)))
+ 		return;
 -
  	/*
  	 * It's safe to allow irq's after cr2 has been saved and the
  	 * vmalloc fault has been handled.
diff --cc kernel/irq/numa_migrate.c
index 666260e4c065,acd88356ac76..7f9b80434e32
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@@ -95,8 -81,10 +95,9 @@@ static struct irq_desc *__real_move_irq
  		desc = old_desc;
  		goto out_unlock;
  	}
 -	init_copy_one_irq_desc(irq, old_desc, desc, cpu);
  
  	irq_desc_ptrs[irq] = desc;
+ 	spin_unlock_irqrestore(&sparse_irq_lock, flags);
  
  	/* free the old one */
  	free_one_irq_desc(old_desc, desc);