#include <linux/smp.h>
#include <linux/sem.h>
+#include <linux/shm.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
extern unsigned long nr_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
-extern unsigned long this_cpu_load(void);
-
+extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
+ u64 blkio_start; /* Shared by blkio, swapin */
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_delay; /* wait for swapin block io completion */
u32 blkio_count; /* total count of the number of sync block */
u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */
- struct timespec freepages_start, freepages_end;
+ u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
u32 freepages_count; /* total count of memory reclaim */
};
#ifdef CONFIG_TREE_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
- struct rt_mutex *rcu_boost_mutex;
-#endif /* #ifdef CONFIG_RCU_BOOST */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
* execve */
unsigned in_iowait:1;
- /* task may not gain privileges */
- unsigned no_new_privs:1;
-
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
pid_t pid;
pid_t tgid;
} vtime_snap_whence;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
- struct timespec start_time; /* monotonic time */
- struct timespec real_start_time; /* boot based time */
+ u64 start_time; /* monotonic time in nsec */
+ u64 real_start_time; /* boot based time in nsec */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
+ struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
- /* Top pi_waiters task */
- struct task_struct *pi_top_task;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
- struct memcg_batch_info {
- int do_batch; /* incremented when batch uncharge started */
- struct mem_cgroup *memcg; /* target memcg of uncharge */
- unsigned long nr_pages; /* uncharged usage */
- unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
- } memcg_batch;
unsigned int memcg_kmem_skip_account;
struct memcg_oom_info {
struct mem_cgroup *memcg;
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
+
+static inline bool task_no_new_privs(struct task_struct *p)
+{
+ return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
+static inline void task_set_no_new_privs(struct task_struct *p)
+{
+ set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
/*
* task->jobctl flags
*/
#ifdef CONFIG_TREE_PREEMPT_RCU
p->rcu_blocked_node = NULL;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
- p->rcu_boost_mutex = NULL;
-#endif /* #ifdef CONFIG_RCU_BOOST */
INIT_LIST_HEAD(&p->rcu_node_entry);
}
static inline int sas_ss_flags(unsigned long sp)
{
- return (current->sas_ss_size == 0 ? SS_DISABLE
- : on_sig_stack(sp) ? SS_ONSTACK : 0);
+ if (!current->sas_ss_size)
+ return SS_DISABLE;
+
+ return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
/*
* Polling state must be visible before we test NEED_RESCHED,
- * paired by resched_task()
+ * paired by resched_curr()
*/
smp_mb__after_atomic();
/*
* Polling state must be visible before we test NEED_RESCHED,
- * paired by resched_task()
+ * paired by resched_curr()
*/
smp_mb__after_atomic();
* TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
* fold.
*/
- smp_mb(); /* paired with resched_task() */
+ smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
-extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}
-
-static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
-{
-}
#endif /* CONFIG_MEMCG */
static inline unsigned long task_rlimit(const struct task_struct *tsk,