perf machine: Fix the value used for unknown pids
authorAdrian Hunter <adrian.hunter@intel.com>
Mon, 14 Jul 2014 10:02:25 +0000 (13:02 +0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 16 Jul 2014 20:57:33 +0000 (17:57 -0300)
The value used for unknown pids cannot be zero because that is used by
the "idle" task.

Use -1 instead.  Also handle the unknown pid case when creating map
groups.

Note that, threads with an unknown pid should not occur because fork (or
synthesized) events precede the thread's existence.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1405332185-4050-2-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-sched.c
tools/perf/util/machine.c
tools/perf/util/session.c
tools/perf/util/thread.c

index c38d06c047757281b03992522723af980b54412b..b7f555add0c8d21664d5dae6a62d90b0e14fe5d6 100644 (file)
@@ -935,8 +935,8 @@ static int latency_switch_event(struct perf_sched *sched,
                return -1;
        }
 
-       sched_out = machine__findnew_thread(machine, 0, prev_pid);
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_out = machine__findnew_thread(machine, -1, prev_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
        if (!out_events) {
@@ -979,7 +979,7 @@ static int latency_runtime_event(struct perf_sched *sched,
 {
        const u32 pid      = perf_evsel__intval(evsel, sample, "pid");
        const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
-       struct thread *thread = machine__findnew_thread(machine, 0, pid);
+       struct thread *thread = machine__findnew_thread(machine, -1, pid);
        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
        u64 timestamp = sample->time;
        int cpu = sample->cpu;
@@ -1012,7 +1012,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
        struct thread *wakee;
        u64 timestamp = sample->time;
 
-       wakee = machine__findnew_thread(machine, 0, pid);
+       wakee = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, wakee))
@@ -1072,7 +1072,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
        if (sched->profile_cpu == -1)
                return 0;
 
-       migrant = machine__findnew_thread(machine, 0, pid);
+       migrant = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, migrant))
@@ -1290,7 +1290,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                return -1;
        }
 
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        sched->curr_thread[this_cpu] = sched_in;
 
index e9b943acaa5e872a3adbec4e328859a950063fce..5b8087728f285b3001cb01fc19cc9bcf9a498f63 100644 (file)
@@ -34,7 +34,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
                return -ENOMEM;
 
        if (pid != HOST_KERNEL_ID) {
-               struct thread *thread = machine__findnew_thread(machine, 0,
+               struct thread *thread = machine__findnew_thread(machine, -1,
                                                                pid);
                char comm[64];
 
@@ -286,7 +286,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
         * the full rbtree:
         */
        if (machine->last_match && machine->last_match->tid == tid) {
-               if (pid && pid != machine->last_match->pid_)
+               if (pid != -1 && pid != machine->last_match->pid_)
                        machine->last_match->pid_ = pid;
                return machine->last_match;
        }
@@ -297,7 +297,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
 
                if (th->tid == tid) {
                        machine->last_match = th;
-                       if (pid && pid != th->pid_)
+                       if (pid != -1 && pid != th->pid_)
                                th->pid_ = pid;
                        return th;
                }
index 64a186edc7be82e006f22de95e2d40b51bd4e29d..c2f4ca91746903d0b0f094d69c548ddefac6d4f1 100644 (file)
@@ -1083,13 +1083,14 @@ void perf_event_header__bswap(struct perf_event_header *hdr)
 
 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
 {
-       return machine__findnew_thread(&session->machines.host, 0, pid);
+       return machine__findnew_thread(&session->machines.host, -1, pid);
 }
 
 static struct thread *perf_session__register_idle_thread(struct perf_session *session)
 {
-       struct thread *thread = perf_session__findnew(session, 0);
+       struct thread *thread;
 
+       thread = machine__findnew_thread(&session->machines.host, 0, 0);
        if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
                pr_err("problem inserting idle task.\n");
                thread = NULL;
index 2fde0d5e40b5f4ef409b1be27197043bbf740a3f..7a32f447a8e78937863ff66da6f4e0f0e1fd6b3f 100644 (file)
@@ -13,7 +13,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
        struct thread *leader;
        pid_t pid = thread->pid_;
 
-       if (pid == thread->tid) {
+       if (pid == thread->tid || pid == -1) {
                thread->mg = map_groups__new();
        } else {
                leader = machine__findnew_thread(machine, pid, pid);