DEBUG: sched/fair: Fix sched_load_avg_cpu events for task_groups
[firefly-linux-kernel-4.4.55.git] / kernel / sched / fair.c
index ad1507e420e807d6f34809d9889a5cc3cc5226e4..3331f453a17f00716ddb20366d535f2fafcb192c 100644 (file)
@@ -2757,7 +2757,9 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
        cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+       /* Trace CPU load, unless cfs_rq belongs to a non-root task_group */
+       if (cfs_rq == &rq_of(cfs_rq)->cfs)
+               trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
 
        return decayed || removed;
 }