projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'linus' into sched/urgent
[firefly-linux-kernel-4.4.55.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index bfb8ad8ed1717bf95f82ddf7ea8b5b40bb7fbe7b..3aaa5c8cb4214586bd283eedc2375baa3dc739c0 100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-312,12
+312,15
@@
static DEFINE_SPINLOCK(task_group_lock);
#endif
/*
#endif
/*
- * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * A weight of a cfs_rq is the sum of weights of which entities
+ * are queued on this cfs_rq, so a weight of a entity should not be
+ * too large, so as the shares value of a task group.
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
#define MIN_SHARES 2
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
#define MIN_SHARES 2
-#define MAX_SHARES (
ULONG_MAX - 1
)
+#define MAX_SHARES (
1UL << 18
)
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
#endif
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
#endif
@@
-1124,6
+1127,7
@@
static enum hrtimer_restart hrtick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
return HRTIMER_NORESTART;
}
+#ifdef CONFIG_SMP
static void hotplug_hrtick_disable(int cpu)
{
struct rq *rq = cpu_rq(cpu);
static void hotplug_hrtick_disable(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@
-1179,6
+1183,7
@@
static void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
{
hotcpu_notifier(hotplug_hrtick, 0);
}
+#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
static void init_rq_hrtick(struct rq *rq)
{
@@
-1337,8
+1342,13
@@
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
{
u64 tmp;
{
u64 tmp;
- if (!lw->inv_weight)
- lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1);
+ if (!lw->inv_weight) {
+ if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
+ lw->inv_weight = 1;
+ else
+ lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
+ / (lw->weight+1);
+ }
tmp = (u64)delta_exec * weight;
/*
tmp = (u64)delta_exec * weight;
/*
@@
-4159,12
+4169,10
@@
need_resched_nonpreemptible:
clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
- if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
- signal_pending(prev))) {
+ if (unlikely(signal_pending_state(prev->state, prev)))
prev->state = TASK_RUNNING;
prev->state = TASK_RUNNING;
- } else {
+ else
deactivate_task(rq, prev, 1);
deactivate_task(rq, prev, 1);
- }
switch_count = &prev->nvcsw;
}
switch_count = &prev->nvcsw;
}
@@
-4390,22
+4398,20
@@
do_wait_for_common(struct completion *x, long timeout, int state)
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
-
__remove_wait_queue(&x->wait, &wait)
;
-
return -ERESTARTSYS
;
+
timeout = -ERESTARTSYS
;
+
break
;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
- if (!timeout) {
- __remove_wait_queue(&x->wait, &wait);
- return timeout;
- }
- } while (!x->done);
+ } while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
__remove_wait_queue(&x->wait, &wait);
+ if (!x->done)
+ return timeout;
}
x->done--;
}
x->done--;
- return timeout;
+ return timeout
?: 1
;
}
static long __sched
}
static long __sched
@@
-6871,7
+6877,12
@@
static int default_relax_domain_level = -1;
static int __init setup_relax_domain_level(char *str)
{
static int __init setup_relax_domain_level(char *str)
{
- default_relax_domain_level = simple_strtoul(str, NULL, 0);
+ unsigned long val;
+
+ val = simple_strtoul(str, NULL, 0);
+ if (val < SD_LV_MAX)
+ default_relax_domain_level = val;
+
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
@@
-7229,6
+7240,18
@@
void __attribute__((weak)) arch_update_cpu_topology(void)
{
}
{
}
+/*
+ * Free current domain masks.
+ * Called after all cpus are attached to NULL domain.
+ */
+static void free_sched_domains(void)
+{
+ ndoms_cur = 0;
+ if (doms_cur != &fallback_doms)
+ kfree(doms_cur);
+ doms_cur = &fallback_doms;
+}
+
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
@@
-7376,6
+7399,7
@@
int arch_reinit_sched_domains(void)
get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
@@
-7461,6
+7485,7
@@
static int update_sched_domains(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
return NOTIFY_OK;
case CPU_UP_CANCELED:
return NOTIFY_OK;
case CPU_UP_CANCELED:
@@
-7479,8
+7504,16
@@
static int update_sched_domains(struct notifier_block *nfb,
return NOTIFY_DONE;
}
return NOTIFY_DONE;
}
+#ifndef CONFIG_CPUSETS
+ /*
+ * Create default domain partitioning if cpusets are disabled.
+ * Otherwise we let cpusets rebuild the domains based on the
+ * current setup.
+ */
+
/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
+#endif
return NOTIFY_OK;
}
return NOTIFY_OK;
}
@@
-7620,7
+7653,6
@@
static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
else
rt_se->rt_rq = parent->my_q;
else
rt_se->rt_rq = parent->my_q;
- rt_se->rt_rq = &rq->rt;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
@@
-8342,7
+8374,7
@@
static unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
#ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
- struct task_group *tgi, *parent = tg
->parent
;
+ struct task_group *tgi, *parent = tg
? tg->parent : NULL
;
unsigned long total = 0;
if (!parent) {
unsigned long total = 0;
if (!parent) {