#define HPTE_LOCK_BIT 3
-static DEFINE_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
static inline void native_lock_hpte(struct hash_pte *hptep)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
local_irq_restore(flags);