unsigned long flags;
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
memset(pmbe, 0, sizeof(struct pmb_entry));
- spin_lock_init(&pmbe->lock);
+ raw_spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
{
unsigned long flags;
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
#endif /* CONFIG_PM */
return PTR_ERR(pmbe);
}
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size;
* entries for easier tear-down.
*/
if (likely(pmbp)) {
- spin_lock(&pmbp->lock);
+ raw_spin_lock_nested(&pmbp->lock,
+ SINGLE_DEPTH_NESTING);
pmbp->link = pmbe;
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
i--;
mapped++;
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);
continue;
}
- spin_lock_irqsave(&pmbe->lock, irqflags);
+ raw_spin_lock_irqsave(&pmbe->lock, irqflags);
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
if (pmbp) {
- spin_lock(&pmbp->lock);
-
+ raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
*/
if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
-
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
- spin_unlock_irqrestore(&pmbe->lock, irqflags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
}
}
/*
* Found it, now resize it.
*/
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK;
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
read_unlock(&pmb_rwlock);