2 * Hypervisor supplied "24x7" performance counter support
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "hv-24x7: " fmt
15 #include <linux/perf_event.h>
16 #include <linux/rbtree.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <asm/firmware.h>
22 #include <asm/hvcall.h>
24 #include <linux/byteorder/generic.h>
27 #include "hv-24x7-catalog.h"
28 #include "hv-common.h"
30 static const char *event_domain_suffix(unsigned domain)
33 #define DOMAIN(n, v, x, c) \
34 case HV_PERF_DOMAIN_##n: \
36 #include "hv-24x7-domains.h"
39 WARN(1, "unknown domain %d\n", domain);
40 return "__UNKNOWN_DOMAIN_SUFFIX";
44 static bool domain_is_valid(unsigned domain)
47 #define DOMAIN(n, v, x, c) \
48 case HV_PERF_DOMAIN_##n: \
50 #include "hv-24x7-domains.h"
58 static bool is_physical_domain(unsigned domain)
61 #define DOMAIN(n, v, x, c) \
62 case HV_PERF_DOMAIN_##n: \
64 #include "hv-24x7-domains.h"
71 static bool catalog_entry_domain_is_valid(unsigned domain)
73 return is_physical_domain(domain);
77 * TODO: Merging events:
78 * - Think of the hcall as an interface to a 4d array of counters:
80 * - y = indexes in the domain (core, chip, vcpu, node, etc)
81 * - z = offset into the counter space
82 * - w = lpars (guest vms, "logical partitions")
83 * - A single request is: x,y,y_last,z,z_last,w,w_last
84 * - this means we can retrieve a rectangle of counters in y,z for a single x.
86 * - Things to consider (ignoring w):
87 * - input cost_per_request = 16
88 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
89 * - limited number of requests per hcall (must fit into 4K bytes)
90 * - 4k = 16 [buffer header] - 16 [request size] * request_count
91 * - 255 requests per hcall
92 * - sometimes it will be more efficient to read extra data and discard
97 * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
100 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
101 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
103 EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
104 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
105 /* u32, see "data_offset" */
106 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
108 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
110 EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
111 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
112 EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
114 static struct attribute *format_attrs[] = {
115 &format_attr_domain.attr,
116 &format_attr_offset.attr,
117 &format_attr_core.attr,
118 &format_attr_vcpu.attr,
119 &format_attr_lpar.attr,
123 static struct attribute_group format_group = {
125 .attrs = format_attrs,
128 static struct attribute_group event_group = {
130 /* .attrs is set in init */
133 static struct attribute_group event_desc_group = {
134 .name = "event_descs",
135 /* .attrs is set in init */
138 static struct attribute_group event_long_desc_group = {
139 .name = "event_long_descs",
140 /* .attrs is set in init */
143 static struct kmem_cache *hv_page_cache;
146 * request_buffer and result_buffer are not required to be 4k aligned,
147 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
148 * the simplest way to ensure that.
150 #define H24x7_DATA_BUFFER_SIZE 4096
151 DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
152 DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
154 static char *event_name(struct hv_24x7_event_data *ev, int *len)
156 *len = be16_to_cpu(ev->event_name_len) - 2;
157 return (char *)ev->remainder;
160 static char *event_desc(struct hv_24x7_event_data *ev, int *len)
162 unsigned nl = be16_to_cpu(ev->event_name_len);
163 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
164 *len = be16_to_cpu(*desc_len) - 2;
165 return (char *)ev->remainder + nl;
168 static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
170 unsigned nl = be16_to_cpu(ev->event_name_len);
171 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
172 unsigned desc_len = be16_to_cpu(*desc_len_);
173 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
174 *len = be16_to_cpu(*long_desc_len) - 2;
175 return (char *)ev->remainder + nl + desc_len;
178 static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
183 return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
187 * Things we don't check:
188 * - padding for desc, name, and long/detailed desc is required to be '\0'
191 * Return NULL if we pass end,
192 * Otherwise return the address of the byte just following the event.
194 static void *event_end(struct hv_24x7_event_data *ev, void *end)
199 unsigned nl = be16_to_cpu(ev->event_name_len);
202 pr_debug("%s: name length too short: %d", __func__, nl);
206 if (start + nl > end) {
207 pr_debug("%s: start=%p + nl=%u > end=%p",
208 __func__, start, nl, end);
212 dl_ = (__be16 *)(ev->remainder + nl - 2);
213 if (!IS_ALIGNED((uintptr_t)dl_, 2))
214 pr_warn("desc len not aligned %p", dl_);
215 dl = be16_to_cpu(*dl_);
217 pr_debug("%s: desc len too short: %d", __func__, dl);
221 if (start + nl + dl > end) {
222 pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
223 __func__, start, nl, dl, start + nl + dl, end);
227 ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
228 if (!IS_ALIGNED((uintptr_t)ldl_, 2))
229 pr_warn("long desc len not aligned %p", ldl_);
230 ldl = be16_to_cpu(*ldl_);
232 pr_debug("%s: long desc len too short (ldl=%u)",
237 if (start + nl + dl + ldl > end) {
238 pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
239 __func__, start, nl, dl, ldl, end);
243 return start + nl + dl + ldl;
246 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
247 unsigned long version,
250 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
254 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
255 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
261 static unsigned long h_get_24x7_catalog_page(char page[],
262 u64 version, u32 index)
264 return h_get_24x7_catalog_page_(virt_to_phys(page),
268 static unsigned core_domains[] = {
269 HV_PERF_DOMAIN_PHYS_CORE,
270 HV_PERF_DOMAIN_VCPU_HOME_CORE,
271 HV_PERF_DOMAIN_VCPU_HOME_CHIP,
272 HV_PERF_DOMAIN_VCPU_HOME_NODE,
273 HV_PERF_DOMAIN_VCPU_REMOTE_NODE,
275 /* chip event data always yeilds a single event, core yeilds multiple */
276 #define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
278 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
283 if (is_physical_domain(domain)) {
291 return kasprintf(GFP_KERNEL,
292 "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
294 be16_to_cpu(event->event_counter_offs) +
295 be16_to_cpu(event->event_group_record_offs),
300 /* Avoid trusting fw to NUL terminate strings */
301 static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
303 return kasprintf(gfp, "%.*s", max_len, maybe_str);
306 static ssize_t device_show_string(struct device *dev,
307 struct device_attribute *attr, char *buf)
309 struct dev_ext_attribute *d;
311 d = container_of(attr, struct dev_ext_attribute, attr);
312 return sprintf(buf, "%s\n", (char *)d->var);
315 static struct attribute *device_str_attr_create_(char *name, char *str)
317 struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
323 attr->attr.attr.name = name;
324 attr->attr.attr.mode = 0444;
325 attr->attr.show = device_show_string;
326 return &attr->attr.attr;
329 static struct attribute *device_str_attr_create(char *name, int name_max,
331 char *str, size_t str_max)
334 char *s = memdup_to_str(str, str_max, GFP_KERNEL);
341 n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
343 n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
348 a = device_str_attr_create_(n, s);
360 static void device_str_attr_destroy(struct attribute *attr)
362 struct dev_ext_attribute *d;
364 d = container_of(attr, struct dev_ext_attribute, attr.attr);
366 kfree(d->attr.attr.name);
370 static struct attribute *event_to_attr(unsigned ix,
371 struct hv_24x7_event_data *event,
376 char *ev_name, *a_ev_name, *val;
377 const char *ev_suffix;
378 struct attribute *attr;
380 if (!domain_is_valid(domain)) {
381 pr_warn("catalog event %u has invalid domain %u\n",
386 val = event_fmt(event, domain);
390 ev_suffix = event_domain_suffix(domain);
391 ev_name = event_name(event, &event_name_len);
393 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s",
394 (int)event_name_len, ev_name, ev_suffix);
396 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
397 (int)event_name_len, ev_name, ev_suffix, nonce);
403 attr = device_str_attr_create_(a_ev_name, val);
415 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
419 char *name = event_name(event, &nl);
420 char *desc = event_desc(event, &dl);
422 /* If there isn't a description, don't create the sysfs file */
426 return device_str_attr_create(name, nl, nonce, desc, dl);
429 static struct attribute *
430 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
433 char *name = event_name(event, &nl);
434 char *desc = event_long_desc(event, &dl);
436 /* If there isn't a description, don't create the sysfs file */
440 return device_str_attr_create(name, nl, nonce, desc, dl);
443 static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
444 struct hv_24x7_event_data *event, int nonce)
448 switch (event->domain) {
449 case HV_PERF_DOMAIN_PHYS_CHIP:
450 *attrs = event_to_attr(ix, event, event->domain, nonce);
452 case HV_PERF_DOMAIN_PHYS_CORE:
453 for (i = 0; i < ARRAY_SIZE(core_domains); i++) {
454 attrs[i] = event_to_attr(ix, event, core_domains[i],
457 pr_warn("catalog event %u: individual attr %u "
458 "creation failure\n", ix, i);
460 device_str_attr_destroy(attrs[i - 1]);
466 pr_warn("catalog event %u: domain %u is not allowed in the "
467 "catalog\n", ix, event->domain);
472 static size_t event_to_attr_ct(struct hv_24x7_event_data *event)
474 switch (event->domain) {
475 case HV_PERF_DOMAIN_PHYS_CHIP:
477 case HV_PERF_DOMAIN_PHYS_CORE:
478 return ARRAY_SIZE(core_domains);
484 static unsigned long vmalloc_to_phys(void *v)
486 struct page *p = vmalloc_to_page(v);
489 return page_to_phys(p) + offset_in_page(v);
501 static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
508 return memcmp(d1, d2, s1);
511 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
512 size_t s2, unsigned d2)
514 int r = memord(v1, s1, v2, s2);
525 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
528 struct rb_node **new = &(root->rb_node), *parent = NULL;
529 struct event_uniq *data;
531 /* Figure out where to put new node */
533 struct event_uniq *it;
536 it = container_of(*new, struct event_uniq, node);
537 result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
542 new = &((*new)->rb_left);
544 new = &((*new)->rb_right);
547 pr_info("found a duplicate event %.*s, ct=%u\n", nl,
553 data = kmalloc(sizeof(*data), GFP_KERNEL);
557 *data = (struct event_uniq) {
564 /* Add new node and rebalance tree. */
565 rb_link_node(&data->node, parent, new);
566 rb_insert_color(&data->node, root);
572 static void event_uniq_destroy(struct rb_root *root)
575 * the strings we point to are in the giant block of memory filled by
576 * the catalog, and are freed separately.
578 struct event_uniq *pos, *n;
580 rbtree_postorder_for_each_entry_safe(pos, n, root, node)
586 * ensure the event structure's sizes are self consistent and don't cause us to
587 * read outside of the event
589 * On success, return the event length in bytes.
590 * Otherwise, return -1 (and print as appropriate).
592 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
594 size_t event_data_bytes,
595 size_t event_entry_count,
596 size_t offset, void *end)
599 void *ev_end, *calc_ev_end;
601 if (offset >= event_data_bytes)
604 if (event_idx >= event_entry_count) {
605 pr_devel("catalog event data has %zu bytes of padding after last event\n",
606 event_data_bytes - offset);
610 if (!event_fixed_portion_is_within(event, end)) {
611 pr_warn("event %zu fixed portion is not within range\n",
616 ev_len = be16_to_cpu(event->length);
619 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
620 event_idx, ev_len, event);
622 ev_end = (__u8 *)event + ev_len;
624 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
625 event_idx, ev_len, ev_end, end,
630 calc_ev_end = event_end(event, end);
632 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
633 event_idx, event_data_bytes, event, end,
638 if (calc_ev_end > ev_end) {
639 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
640 event_idx, event, ev_end, offset, calc_ev_end);
647 #define MAX_4K (SIZE_MAX / 4096)
649 static void create_events_from_catalog(struct attribute ***events_,
650 struct attribute ***event_descs_,
651 struct attribute ***event_long_descs_)
654 size_t catalog_len, catalog_page_len, event_entry_count,
655 event_data_len, event_data_offs,
656 event_data_bytes, junk_events, event_idx, event_attr_ct, i,
657 attr_max, event_idx_last, desc_ct, long_desc_ct;
659 uint32_t catalog_version_num;
660 struct attribute **events, **event_descs, **event_long_descs;
661 struct hv_24x7_catalog_page_0 *page_0 =
662 kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
664 void *event_data, *end;
665 struct hv_24x7_event_data *event;
666 struct rb_root ev_uniq = RB_ROOT;
671 hret = h_get_24x7_catalog_page(page, 0, 0);
675 catalog_version_num = be64_to_cpu(page_0->version);
676 catalog_page_len = be32_to_cpu(page_0->length);
678 if (MAX_4K < catalog_page_len) {
679 pr_err("invalid page count: %zu\n", catalog_page_len);
683 catalog_len = catalog_page_len * 4096;
685 event_entry_count = be16_to_cpu(page_0->event_entry_count);
686 event_data_offs = be16_to_cpu(page_0->event_data_offs);
687 event_data_len = be16_to_cpu(page_0->event_data_len);
689 pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
690 (size_t)catalog_version_num, catalog_len,
691 event_entry_count, event_data_offs, event_data_len);
693 if ((MAX_4K < event_data_len)
694 || (MAX_4K < event_data_offs)
695 || (MAX_4K - event_data_offs < event_data_len)) {
696 pr_err("invalid event data offs %zu and/or len %zu\n",
697 event_data_offs, event_data_len);
701 if ((event_data_offs + event_data_len) > catalog_page_len) {
702 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
704 event_data_offs + event_data_len,
709 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
710 pr_err("event_entry_count %zu is invalid\n",
715 event_data_bytes = event_data_len * 4096;
718 * event data can span several pages, events can cross between these
719 * pages. Use vmalloc to make this easier.
721 event_data = vmalloc(event_data_bytes);
723 pr_err("could not allocate event data\n");
727 end = event_data + event_data_bytes;
730 * using vmalloc_to_phys() like this only works if PAGE_SIZE is
733 BUILD_BUG_ON(PAGE_SIZE % 4096);
735 for (i = 0; i < event_data_len; i++) {
736 hret = h_get_24x7_catalog_page_(
737 vmalloc_to_phys(event_data + i * 4096),
739 i + event_data_offs);
741 pr_err("failed to get event data in page %zu\n",
742 i + event_data_offs);
748 * scan the catalog to determine the number of attributes we need, and
749 * verify it at the same time.
751 for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
753 event_idx++, event = (void *)event + ev_len) {
754 size_t offset = (void *)event - (void *)event_data;
758 ev_len = catalog_event_len_validate(event, event_idx,
765 name = event_name(event, &nl);
767 if (event->event_group_record_len == 0) {
768 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
769 event_idx, nl, name);
774 if (!catalog_entry_domain_is_valid(event->domain)) {
775 pr_info("event %zu (%.*s) has invalid domain %d\n",
776 event_idx, nl, name, event->domain);
781 attr_max += event_to_attr_ct(event);
784 event_idx_last = event_idx;
785 if (event_idx_last != event_entry_count)
786 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
787 event_idx_last, event_entry_count, junk_events);
789 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
793 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
798 event_long_descs = kmalloc_array(event_idx + 1,
799 sizeof(*event_long_descs), GFP_KERNEL);
800 if (!event_long_descs)
803 /* Iterate over the catalog filling in the attribute vector */
804 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
805 event = event_data, event_idx = 0;
806 event_idx < event_idx_last;
807 event_idx++, ev_len = be16_to_cpu(event->length),
808 event = (void *)event + ev_len) {
813 * these are the only "bad" events that are intermixed and that
814 * we can ignore without issue. make sure to skip them here
816 if (event->event_group_record_len == 0)
818 if (!catalog_entry_domain_is_valid(event->domain))
821 name = event_name(event, &nl);
822 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
823 ct = event_data_to_attrs(event_idx, events + event_attr_ct,
826 pr_warn("event %zu (%.*s) creation failure, skipping\n",
827 event_idx, nl, name);
831 event_descs[desc_ct] = event_to_desc_attr(event, nonce);
832 if (event_descs[desc_ct])
834 event_long_descs[long_desc_ct] =
835 event_to_long_desc_attr(event, nonce);
836 if (event_long_descs[long_desc_ct])
841 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
842 event_idx, event_attr_ct, junk_events, desc_ct);
844 events[event_attr_ct] = NULL;
845 event_descs[desc_ct] = NULL;
846 event_long_descs[long_desc_ct] = NULL;
848 event_uniq_destroy(&ev_uniq);
850 kmem_cache_free(hv_page_cache, page);
853 *event_descs_ = event_descs;
854 *event_long_descs_ = event_long_descs;
864 kmem_cache_free(hv_page_cache, page);
867 *event_descs_ = NULL;
868 *event_long_descs_ = NULL;
871 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
872 struct bin_attribute *bin_attr, char *buf,
873 loff_t offset, size_t count)
877 size_t catalog_len = 0, catalog_page_len = 0;
878 loff_t page_offset = 0;
879 loff_t offset_in_page;
881 uint64_t catalog_version_num = 0;
882 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
883 struct hv_24x7_catalog_page_0 *page_0 = page;
887 hret = h_get_24x7_catalog_page(page, 0, 0);
893 catalog_version_num = be64_to_cpu(page_0->version);
894 catalog_page_len = be32_to_cpu(page_0->length);
895 catalog_len = catalog_page_len * 4096;
897 page_offset = offset / 4096;
898 offset_in_page = offset % 4096;
900 if (page_offset >= catalog_page_len)
903 if (page_offset != 0) {
904 hret = h_get_24x7_catalog_page(page, catalog_version_num,
912 copy_len = 4096 - offset_in_page;
913 if (copy_len > count)
916 memcpy(buf, page+offset_in_page, copy_len);
921 pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
923 catalog_version_num, page_offset, hret);
924 kmem_cache_free(hv_page_cache, page);
926 pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
927 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
928 count, catalog_len, catalog_page_len, ret);
933 #define PAGE_0_ATTR(_name, _fmt, _expr) \
934 static ssize_t _name##_show(struct device *dev, \
935 struct device_attribute *dev_attr, \
938 unsigned long hret; \
940 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
941 struct hv_24x7_catalog_page_0 *page_0 = page; \
944 hret = h_get_24x7_catalog_page(page, 0, 0); \
949 ret = sprintf(buf, _fmt, _expr); \
951 kmem_cache_free(hv_page_cache, page); \
954 static DEVICE_ATTR_RO(_name)
956 PAGE_0_ATTR(catalog_version, "%lld\n",
957 (unsigned long long)be64_to_cpu(page_0->version));
958 PAGE_0_ATTR(catalog_len, "%lld\n",
959 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
960 static BIN_ATTR_RO(catalog, 0/* real length varies */);
962 static struct bin_attribute *if_bin_attrs[] = {
967 static struct attribute *if_attrs[] = {
968 &dev_attr_catalog_len.attr,
969 &dev_attr_catalog_version.attr,
973 static struct attribute_group if_group = {
975 .bin_attrs = if_bin_attrs,
979 static const struct attribute_group *attr_groups[] = {
983 &event_long_desc_group,
988 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
989 u16 lpar, u64 *count)
993 struct hv_24x7_request_buffer *request_buffer;
994 struct hv_24x7_data_result_buffer *result_buffer;
995 struct hv_24x7_result *resb;
996 struct hv_24x7_request *req;
998 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
999 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
1001 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1002 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1004 memset(request_buffer, 0, 4096);
1005 memset(result_buffer, 0, 4096);
1007 request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
1008 request_buffer->num_requests = 1;
1010 req = &request_buffer->requests[0];
1012 req->performance_domain = domain;
1013 req->data_size = cpu_to_be16(8);
1014 req->data_offset = cpu_to_be32(offset);
1015 req->starting_lpar_ix = cpu_to_be16(lpar),
1016 req->max_num_lpars = cpu_to_be16(1);
1017 req->starting_ix = cpu_to_be16(ix);
1018 req->max_ix = cpu_to_be16(1);
1021 * NOTE: Due to variable number of array elements in request and
1022 * result buffer(s), sizeof() is not reliable. Use the actual
1023 * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
1025 ret = plpar_hcall_norets(H_GET_24X7_DATA,
1026 virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
1027 virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
1030 pr_devel_ratelimited("hcall failed: %d %#x %#x %d => "
1031 "0x%lx (%ld) detail=0x%x failing ix=%x\n",
1032 domain, offset, ix, lpar, ret, ret,
1033 result_buffer->detailed_rc,
1034 result_buffer->failing_request_ix);
1038 resb = &result_buffer->results[0];
1040 *count = be64_to_cpu(resb->elements[0].element_data[0]);
1045 static unsigned long event_24x7_request(struct perf_event *event, u64 *res)
1048 unsigned domain = event_get_domain(event);
1050 if (is_physical_domain(domain))
1051 idx = event_get_core(event);
1053 idx = event_get_vcpu(event);
1055 return single_24x7_request(event_get_domain(event),
1056 event_get_offset(event),
1058 event_get_lpar(event),
1062 static int h_24x7_event_init(struct perf_event *event)
1064 struct hv_perf_caps caps;
1070 if (event->attr.type != event->pmu->type)
1073 /* Unused areas must be 0 */
1074 if (event_get_reserved1(event) ||
1075 event_get_reserved2(event) ||
1076 event_get_reserved3(event)) {
1077 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
1079 event_get_reserved1(event),
1080 event->attr.config1,
1081 event_get_reserved2(event),
1082 event->attr.config2,
1083 event_get_reserved3(event));
1087 /* unsupported modes and filters */
1088 if (event->attr.exclude_user ||
1089 event->attr.exclude_kernel ||
1090 event->attr.exclude_hv ||
1091 event->attr.exclude_idle ||
1092 event->attr.exclude_host ||
1093 event->attr.exclude_guest)
1096 /* no branch sampling */
1097 if (has_branch_stack(event))
1100 /* offset must be 8 byte aligned */
1101 if (event_get_offset(event) % 8) {
1102 pr_devel("bad alignment\n");
1106 /* Domains above 6 are invalid */
1107 domain = event_get_domain(event);
1109 pr_devel("invalid domain %d\n", domain);
1113 hret = hv_perf_caps_get(&caps);
1115 pr_devel("could not get capabilities: rc=%ld\n", hret);
1119 /* Physical domains & other lpars require extra capabilities */
1120 if (!caps.collect_privileged && (is_physical_domain(domain) ||
1121 (event_get_lpar(event) != event_get_lpar_max()))) {
1122 pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
1123 is_physical_domain(domain),
1124 event_get_lpar(event));
1128 /* see if the event complains */
1129 if (event_24x7_request(event, &ct)) {
1130 pr_devel("test hcall failed\n");
1137 static u64 h_24x7_get_value(struct perf_event *event)
1141 ret = event_24x7_request(event, &ct);
1143 /* We checked this in event init, shouldn't fail here... */
1149 static void h_24x7_event_update(struct perf_event *event)
1153 now = h_24x7_get_value(event);
1154 prev = local64_xchg(&event->hw.prev_count, now);
1155 local64_add(now - prev, &event->count);
1158 static void h_24x7_event_start(struct perf_event *event, int flags)
1160 if (flags & PERF_EF_RELOAD)
1161 local64_set(&event->hw.prev_count, h_24x7_get_value(event));
1164 static void h_24x7_event_stop(struct perf_event *event, int flags)
1166 h_24x7_event_update(event);
1169 static int h_24x7_event_add(struct perf_event *event, int flags)
1171 if (flags & PERF_EF_START)
1172 h_24x7_event_start(event, flags);
1177 static struct pmu h_24x7_pmu = {
1178 .task_ctx_nr = perf_invalid_context,
1181 .attr_groups = attr_groups,
1182 .event_init = h_24x7_event_init,
1183 .add = h_24x7_event_add,
1184 .del = h_24x7_event_stop,
1185 .start = h_24x7_event_start,
1186 .stop = h_24x7_event_stop,
1187 .read = h_24x7_event_update,
1190 static int hv_24x7_init(void)
1194 struct hv_perf_caps caps;
1196 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
1197 pr_debug("not a virtualized system, not enabling\n");
1201 hret = hv_perf_caps_get(&caps);
1203 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
1208 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
1212 /* sampling not supported */
1213 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1215 create_events_from_catalog(&event_group.attrs,
1216 &event_desc_group.attrs,
1217 &event_long_desc_group.attrs);
1219 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1226 device_initcall(hv_24x7_init);