sched: Accumulate per-cfs_rq cpu usage and charge against bandwidth
authorPaul Turner <pjt@google.com>
Thu, 21 Jul 2011 16:43:30 +0000 (09:43 -0700)
committerIngo Molnar <mingo@elte.hu>
Sun, 14 Aug 2011 10:03:26 +0000 (12:03 +0200)
Account bandwidth usage on the cfs_rq level versus the task_groups to which
they belong.  Whether we are tracking bandwidth on a given cfs_rq is maintained
under cfs_rq->runtime_enabled.

cfs_rq's which belong to a bandwidth constrained task_group have their runtime
accounted via the update_curr() path, which withdraws bandwidth from the global
pool as desired.  Updates involving the global pool are currently protected
under cfs_bandwidth->lock, local runtime is protected by rq->lock.

This patch only assigns and tracks quota, no action is taken in the case that
cfs_rq->runtime_used exceeds cfs_rq->runtime_assigned.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Nikhil Rao <ncrao@google.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.179386821@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/sched.c
kernel/sched_fair.c
kernel/sysctl.c

index 4ac2c0578e0ff9133c4c761fcf3291879ae646d4..bc6f5f2e24fa8108527f49ce5240d81a636c70cb 100644 (file)
@@ -2040,6 +2040,10 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
 #endif
 
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
index ea6850d93b2a1c71998c3bcb24b7ef8250ebfac0..35561c63a49079af3c7abe4e3450131e1edcb8aa 100644 (file)
@@ -251,7 +251,7 @@ struct cfs_bandwidth {
 #ifdef CONFIG_CFS_BANDWIDTH
        raw_spinlock_t lock;
        ktime_t period;
-       u64 quota;
+       u64 quota, runtime;
        s64 hierarchal_quota;
 #endif
 };
@@ -407,6 +407,7 @@ static inline u64 default_cfs_period(void);
 static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
        raw_spin_lock_init(&cfs_b->lock);
+       cfs_b->runtime = 0;
        cfs_b->quota = RUNTIME_INF;
        cfs_b->period = ns_to_ktime(default_cfs_period());
 }
@@ -9107,6 +9108,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        raw_spin_lock_irq(&cfs_b->lock);
        cfs_b->period = ns_to_ktime(period);
        cfs_b->quota = quota;
+       cfs_b->runtime = quota;
        raw_spin_unlock_irq(&cfs_b->lock);
 
        for_each_possible_cpu(i) {
index f24f4171019d61267cfaafb474b7cf059243b88e..9502aa899f735e3d205185a05919fcf7c5ca04b8 100644 (file)
@@ -89,6 +89,20 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  */
 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
 
+#ifdef CONFIG_CFS_BANDWIDTH
+/*
+ * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
+ * each time a cfs_rq requests quota.
+ *
+ * Note: in the case that the slice exceeds the runtime remaining (either due
+ * to consumption or the quota being specified to be smaller than the slice)
+ * we will always only issue the remaining available time.
+ *
+ * default: 5 msec, units: microseconds
+  */
+unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
+#endif
+
 static const struct sched_class fair_sched_class;
 
 /**************************************************************
@@ -292,6 +306,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
+                                  unsigned long delta_exec);
 
 /**************************************************************
  * Scheduling class tree data structure manipulation methods:
@@ -583,6 +599,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
                cpuacct_charge(curtask, delta_exec);
                account_group_exec_runtime(curtask, delta_exec);
        }
+
+       account_cfs_rq_runtime(cfs_rq, delta_exec);
 }
 
 static inline void
@@ -1248,6 +1266,58 @@ static inline u64 default_cfs_period(void)
 {
        return 100000000ULL;
 }
+
+static inline u64 sched_cfs_bandwidth_slice(void)
+{
+       return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
+}
+
+static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+       struct task_group *tg = cfs_rq->tg;
+       struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+       u64 amount = 0, min_amount;
+
+       /* note: this is a positive sum as runtime_remaining <= 0 */
+       min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
+
+       raw_spin_lock(&cfs_b->lock);
+       if (cfs_b->quota == RUNTIME_INF)
+               amount = min_amount;
+       else if (cfs_b->runtime > 0) {
+               amount = min(cfs_b->runtime, min_amount);
+               cfs_b->runtime -= amount;
+       }
+       raw_spin_unlock(&cfs_b->lock);
+
+       cfs_rq->runtime_remaining += amount;
+}
+
+static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
+                                    unsigned long delta_exec)
+{
+       if (!cfs_rq->runtime_enabled)
+               return;
+
+       cfs_rq->runtime_remaining -= delta_exec;
+       if (cfs_rq->runtime_remaining > 0)
+               return;
+
+       assign_cfs_rq_runtime(cfs_rq);
+}
+
+static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
+                                                  unsigned long delta_exec)
+{
+       if (!cfs_rq->runtime_enabled)
+               return;
+
+       __account_cfs_rq_runtime(cfs_rq, delta_exec);
+}
+
+#else
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
+                                    unsigned long delta_exec) {}
 #endif
 
 /**************************************************
@@ -4266,8 +4336,13 @@ static void set_curr_task_fair(struct rq *rq)
 {
        struct sched_entity *se = &rq->curr->se;
 
-       for_each_sched_entity(se)
-               set_next_entity(cfs_rq_of(se), se);
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+               set_next_entity(cfs_rq, se);
+               /* ensure bandwidth has been allocated on our new cfs_rq */
+               account_cfs_rq_runtime(cfs_rq, 0);
+       }
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
index 11d65b531e507a8b98d8411123bde6aff76bda97..2d2ecdcc8cdbb070999d46ae79b158dbbb28c8a3 100644 (file)
@@ -379,6 +379,16 @@ static struct ctl_table kern_table[] = {
                .extra2         = &one,
        },
 #endif
+#ifdef CONFIG_CFS_BANDWIDTH
+       {
+               .procname       = "sched_cfs_bandwidth_slice_us",
+               .data           = &sysctl_sched_cfs_bandwidth_slice,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+       },
+#endif
 #ifdef CONFIG_PROVE_LOCKING
        {
                .procname       = "prove_locking",