sched: s/do_each_thread/for_each_process_thread/ in core.c
[firefly-linux-kernel-4.4.55.git] / tools / perf / builtin-sched.c
index c38d06c047757281b03992522723af980b54412b..f83c08c0dd87caaa97a8c0b3958320b65a2ed2ef 100644 (file)
@@ -10,6 +10,7 @@
 #include "util/header.h"
 #include "util/session.h"
 #include "util/tool.h"
+#include "util/cloexec.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
@@ -434,7 +435,8 @@ static int self_open_counters(void)
        attr.type = PERF_TYPE_SOFTWARE;
        attr.config = PERF_COUNT_SW_TASK_CLOCK;
 
-       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       fd = sys_perf_event_open(&attr, 0, -1, -1,
+                                perf_event_open_cloexec_flag());
 
        if (fd < 0)
                pr_err("Error: sys_perf_event_open() syscall returned "
@@ -935,8 +937,8 @@ static int latency_switch_event(struct perf_sched *sched,
                return -1;
        }
 
-       sched_out = machine__findnew_thread(machine, 0, prev_pid);
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_out = machine__findnew_thread(machine, -1, prev_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
        if (!out_events) {
@@ -979,7 +981,7 @@ static int latency_runtime_event(struct perf_sched *sched,
 {
        const u32 pid      = perf_evsel__intval(evsel, sample, "pid");
        const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
-       struct thread *thread = machine__findnew_thread(machine, 0, pid);
+       struct thread *thread = machine__findnew_thread(machine, -1, pid);
        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
        u64 timestamp = sample->time;
        int cpu = sample->cpu;
@@ -1012,7 +1014,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
        struct thread *wakee;
        u64 timestamp = sample->time;
 
-       wakee = machine__findnew_thread(machine, 0, pid);
+       wakee = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, wakee))
@@ -1072,7 +1074,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
        if (sched->profile_cpu == -1)
                return 0;
 
-       migrant = machine__findnew_thread(machine, 0, pid);
+       migrant = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, migrant))
@@ -1290,7 +1292,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                return -1;
        }
 
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        sched->curr_thread[this_cpu] = sched_in;