sched/preempt/x86: Fix voluntary preempt for x86
[firefly-linux-kernel-4.4.55.git] / include / linux / preempt.h
index a3d9dc8c2c006a02cd7677a6e4a6ac10dac69910..de83b4eb164287db363328f87c0f8af216497a91 100644 (file)
@@ -64,7 +64,11 @@ do { \
 } while (0)
 
 #else
-#define preempt_enable() preempt_enable_no_resched()
+#define preempt_enable() \
+do { \
+       barrier(); \
+       preempt_count_dec(); \
+} while (0)
 #define preempt_check_resched() do { } while (0)
 #endif
 
@@ -93,7 +97,11 @@ do { \
                __preempt_schedule_context(); \
 } while (0)
 #else
-#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#define preempt_enable_notrace() \
+do { \
+       barrier(); \
+       __preempt_count_dec(); \
+} while (0)
 #endif
 
 #else /* !CONFIG_PREEMPT_COUNT */
@@ -116,6 +124,26 @@ do { \
 
 #endif /* CONFIG_PREEMPT_COUNT */
 
+#ifdef MODULE
+/*
+ * Modules have no business playing preemption tricks.
+ */
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
+#define preempt_set_need_resched() \
+do { \
+       set_preempt_need_resched(); \
+} while (0)
+#define preempt_fold_need_resched() \
+do { \
+       if (tif_need_resched()) \
+               set_preempt_need_resched(); \
+} while (0)
+
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 
 struct preempt_notifier;