return mm->cpu_vm_mask_var;
}
-
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_file)
+ return NULL;
+
+ return vma->shared.anon_name;
+}
+ #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+ /*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+ {
+ barrier();
+ return mm->tlb_flush_pending;
+ }
+ static inline void set_tlb_flush_pending(struct mm_struct *mm)
+ {
+ mm->tlb_flush_pending = true;
+
+ /*
+ * Guarantee that the tlb_flush_pending store does not leak into the
+ * critical section updating the page tables
+ */
+ smp_mb__before_spinlock();
+ }
+ /* Clearing is done after a TLB flush, which also provides a barrier. */
+ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+ {
+ barrier();
+ mm->tlb_flush_pending = false;
+ }
+ #else
+ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+ {
+ return false;
+ }
+ static inline void set_tlb_flush_pending(struct mm_struct *mm)
+ {
+ }
+ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+ {
+ }
+ #endif
#endif /* _LINUX_MM_TYPES_H */