10 #include "parse-events.h"
11 #include "hists_common.h"
16 struct thread *thread;
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
27 /* perf [perf] cmd_record() */
28 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 /* bash [bash] xmalloc() */
30 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
31 /* bash [libc] malloc() */
32 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
35 static struct sample fake_samples[][5] = {
37 /* perf [perf] run_command() */
38 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
39 /* perf [libc] malloc() */
40 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
41 /* perf [kernel] page_fault() */
42 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43 /* perf [kernel] sys_perf_event_open() */
44 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
45 /* bash [libc] free() */
46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
49 /* perf [libc] free() */
50 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
51 /* bash [libc] malloc() */
52 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
53 /* bash [bash] xfee() */
54 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
55 /* bash [libc] realloc() */
56 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
57 /* bash [kernel] page_fault() */
58 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
62 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
67 struct perf_sample sample = { .period = 1, };
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
75 evlist__for_each(evlist, evsel) {
76 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
77 const union perf_event event = {
79 .misc = PERF_RECORD_MISC_USER,
83 sample.pid = fake_common_samples[k].pid;
84 sample.tid = fake_common_samples[k].pid;
85 sample.ip = fake_common_samples[k].ip;
86 if (perf_event__preprocess_sample(&event, machine, &al,
90 he = __hists__add_entry(&evsel->hists, &al, NULL,
91 NULL, NULL, 1, 1, 0, true);
95 fake_common_samples[k].thread = al.thread;
96 fake_common_samples[k].map = al.map;
97 fake_common_samples[k].sym = al.sym;
100 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
101 const union perf_event event = {
103 .misc = PERF_RECORD_MISC_USER,
107 sample.pid = fake_samples[i][k].pid;
108 sample.tid = fake_samples[i][k].pid;
109 sample.ip = fake_samples[i][k].ip;
110 if (perf_event__preprocess_sample(&event, machine, &al,
114 he = __hists__add_entry(&evsel->hists, &al, NULL,
115 NULL, NULL, 1, 1, 0, true);
119 fake_samples[i][k].thread = al.thread;
120 fake_samples[i][k].map = al.map;
121 fake_samples[i][k].sym = al.sym;
129 pr_debug("Not enough memory for adding a hist entry\n");
133 static int find_sample(struct sample *samples, size_t nr_samples,
134 struct thread *t, struct map *m, struct symbol *s)
136 while (nr_samples--) {
137 if (samples->thread == t && samples->map == m &&
145 static int __validate_match(struct hists *hists)
148 struct rb_root *root;
149 struct rb_node *node;
152 * Only entries from fake_common_samples should have a pair.
154 if (sort__need_collapse)
155 root = &hists->entries_collapsed;
157 root = hists->entries_in;
159 node = rb_first(root);
161 struct hist_entry *he;
163 he = rb_entry(node, struct hist_entry, rb_node_in);
165 if (hist_entry__has_pairs(he)) {
166 if (find_sample(fake_common_samples,
167 ARRAY_SIZE(fake_common_samples),
168 he->thread, he->ms.map, he->ms.sym)) {
171 pr_debug("Can't find the matched entry\n");
176 node = rb_next(node);
179 if (count != ARRAY_SIZE(fake_common_samples)) {
180 pr_debug("Invalid count for matched entries: %zd of %zd\n",
181 count, ARRAY_SIZE(fake_common_samples));
188 static int validate_match(struct hists *leader, struct hists *other)
190 return __validate_match(leader) || __validate_match(other);
193 static int __validate_link(struct hists *hists, int idx)
196 size_t count_pair = 0;
197 size_t count_dummy = 0;
198 struct rb_root *root;
199 struct rb_node *node;
202 * Leader hists (idx = 0) will have dummy entries from other,
203 * and some entries will have no pair. However every entry
204 * in other hists should have (dummy) pair.
206 if (sort__need_collapse)
207 root = &hists->entries_collapsed;
209 root = hists->entries_in;
211 node = rb_first(root);
213 struct hist_entry *he;
215 he = rb_entry(node, struct hist_entry, rb_node_in);
217 if (hist_entry__has_pairs(he)) {
218 if (!find_sample(fake_common_samples,
219 ARRAY_SIZE(fake_common_samples),
220 he->thread, he->ms.map, he->ms.sym) &&
221 !find_sample(fake_samples[idx],
222 ARRAY_SIZE(fake_samples[idx]),
223 he->thread, he->ms.map, he->ms.sym)) {
228 pr_debug("A entry from the other hists should have pair\n");
233 node = rb_next(node);
237 * Note that we have a entry collapsed in the other (idx = 1) hists.
240 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
241 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
242 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
245 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
246 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
247 count, count_pair + ARRAY_SIZE(fake_samples[0]));
251 if (count != count_pair) {
252 pr_debug("Invalid count of total other entries: %zd of %zd\n",
256 if (count_dummy > 0) {
257 pr_debug("Other hists should not have dummy entries: %zd\n",
266 static int validate_link(struct hists *leader, struct hists *other)
268 return __validate_link(leader, 0) || __validate_link(other, 1);
271 int test__hists_link(void)
274 struct machines machines;
275 struct machine *machine = NULL;
276 struct perf_evsel *evsel, *first;
277 struct perf_evlist *evlist = perf_evlist__new();
282 err = parse_events(evlist, "cpu-clock");
285 err = parse_events(evlist, "task-clock");
289 /* default sort order (comm,dso,sym) will be used */
290 if (setup_sorting() < 0)
293 machines__init(&machines);
295 /* setup threads/dso/map/symbols also */
296 machine = setup_fake_machine(&machines);
301 machine__fprintf(machine, stderr);
303 /* process sample events */
304 err = add_hist_entries(evlist, machine);
308 evlist__for_each(evlist, evsel) {
309 hists__collapse_resort(&evsel->hists, NULL);
312 print_hists_in(&evsel->hists);
315 first = perf_evlist__first(evlist);
316 evsel = perf_evlist__last(evlist);
318 /* match common entries */
319 hists__match(&first->hists, &evsel->hists);
320 err = validate_match(&first->hists, &evsel->hists);
324 /* link common and/or dummy entries */
325 hists__link(&first->hists, &evsel->hists);
326 err = validate_link(&first->hists, &evsel->hists);
333 /* tear down everything */
334 perf_evlist__delete(evlist);
335 reset_output_field();
336 machines__exit(&machines);