2 * Copyright IBM Corp. 2010
3 * Author: Heinz Graalfs <graalfs@de.ibm.com>
6 #include <linux/kernel_stat.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/workqueue.h>
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/cpu.h>
15 #include <linux/semaphore.h>
16 #include <linux/oom.h>
17 #include <linux/oprofile.h>
19 #include <asm/facility.h>
20 #include <asm/cpu_mf.h>
23 #include "hwsampler.h"
24 #include "op_counter.h"
26 #define MAX_NUM_SDB 511
29 DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
31 struct hws_execute_parms {
36 DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
37 EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
39 static DEFINE_MUTEX(hws_sem);
40 static DEFINE_MUTEX(hws_sem_oom);
42 static unsigned char hws_flush_all;
43 static unsigned int hws_oom;
44 static unsigned int hws_alert;
45 static struct workqueue_struct *hws_wq;
47 static unsigned int hws_state;
55 /* set to 1 if called by kernel during memory allocation */
56 static unsigned char oom_killer_was_active;
57 /* size of SDBT and SDB as of allocate API */
58 static unsigned long num_sdbt = 100;
59 static unsigned long num_sdb = 511;
60 /* sampling interval (machine cycles) */
61 static unsigned long interval;
63 static unsigned long min_sampler_rate;
64 static unsigned long max_sampler_rate;
66 static void execute_qsi(void *parms)
68 struct hws_execute_parms *ep = parms;
70 ep->rc = qsi(ep->buffer);
73 static void execute_ssctl(void *parms)
75 struct hws_execute_parms *ep = parms;
77 ep->rc = lsctl(ep->buffer);
80 static int smp_ctl_ssctl_stop(int cpu)
83 struct hws_execute_parms ep;
84 struct hws_cpu_buffer *cb;
86 cb = &per_cpu(sampler_cpu_buffer, cpu);
91 ep.buffer = &cb->ssctl;
92 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
95 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
100 smp_call_function_single(cpu, execute_qsi, &ep, 1);
102 if (cb->qsi.es || cb->qsi.cs) {
103 printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
110 static int smp_ctl_ssctl_deactivate(int cpu)
113 struct hws_execute_parms ep;
114 struct hws_cpu_buffer *cb;
116 cb = &per_cpu(sampler_cpu_buffer, cpu);
121 ep.buffer = &cb->ssctl;
122 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
125 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
127 ep.buffer = &cb->qsi;
128 smp_call_function_single(cpu, execute_qsi, &ep, 1);
131 printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
136 static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
139 struct hws_execute_parms ep;
140 struct hws_cpu_buffer *cb;
142 cb = &per_cpu(sampler_cpu_buffer, cpu);
145 cb->ssctl.tear = cb->first_sdbt;
146 cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
147 cb->ssctl.interval = interval;
151 ep.buffer = &cb->ssctl;
152 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
155 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
157 ep.buffer = &cb->qsi;
158 smp_call_function_single(cpu, execute_qsi, &ep, 1);
160 printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
165 static int smp_ctl_qsi(int cpu)
167 struct hws_execute_parms ep;
168 struct hws_cpu_buffer *cb;
170 cb = &per_cpu(sampler_cpu_buffer, cpu);
172 ep.buffer = &cb->qsi;
173 smp_call_function_single(cpu, execute_qsi, &ep, 1);
178 static void hws_ext_handler(struct ext_code ext_code,
179 unsigned int param32, unsigned long param64)
181 struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer);
183 if (!(param32 & CPU_MF_INT_SF_MASK))
189 inc_irq_stat(IRQEXT_CMS);
190 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
193 queue_work(hws_wq, &cb->worker);
196 static void worker(struct work_struct *work);
198 static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
199 unsigned long *dear);
201 static void init_all_cpu_buffers(void)
204 struct hws_cpu_buffer *cb;
206 for_each_online_cpu(cpu) {
207 cb = &per_cpu(sampler_cpu_buffer, cpu);
208 memset(cb, 0, sizeof(struct hws_cpu_buffer));
212 static void prepare_cpu_buffers(void)
214 struct hws_cpu_buffer *cb;
217 for_each_online_cpu(cpu) {
218 cb = &per_cpu(sampler_cpu_buffer, cpu);
219 atomic_set(&cb->ext_params, 0);
220 cb->worker_entry = 0;
221 cb->sample_overflow = 0;
223 cb->incorrect_sdbt_entry = 0;
224 cb->invalid_entry_address = 0;
225 cb->loss_of_sample_data = 0;
226 cb->sample_auth_change_alert = 0;
234 * allocate_sdbt() - allocate sampler memory
235 * @cpu: the cpu for which sampler memory is allocated
237 * A 4K page is allocated for each requested SDBT.
238 * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
239 * Set ALERT_REQ mask in each SDBs trailer.
240 * Returns zero if successful, <0 otherwise.
242 static int allocate_sdbt(int cpu)
248 unsigned long *trailer;
249 struct hws_cpu_buffer *cb;
251 cb = &per_cpu(sampler_cpu_buffer, cpu);
259 for (j = 0; j < num_sdbt; j++) {
260 sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
262 mutex_lock(&hws_sem_oom);
263 /* OOM killer might have been activated */
265 if (oom_killer_was_active || !sdbt) {
267 free_page((unsigned long)sdbt);
269 goto allocate_sdbt_error;
271 if (cb->first_sdbt == 0)
272 cb->first_sdbt = (unsigned long)sdbt;
274 /* link current page to tail of chain */
276 *tail = (unsigned long)(void *)sdbt + 1;
278 mutex_unlock(&hws_sem_oom);
280 for (k = 0; k < num_sdb; k++) {
281 /* get and set SDB page */
282 sdb = get_zeroed_page(GFP_KERNEL);
284 mutex_lock(&hws_sem_oom);
285 /* OOM killer might have been activated */
287 if (oom_killer_was_active || !sdb) {
291 goto allocate_sdbt_error;
294 trailer = trailer_entry_ptr(*sdbt);
295 *trailer = SDB_TE_ALERT_REQ_MASK;
297 mutex_unlock(&hws_sem_oom);
301 mutex_lock(&hws_sem_oom);
302 if (oom_killer_was_active)
303 goto allocate_sdbt_error;
307 *tail = (unsigned long)
308 ((void *)cb->first_sdbt) + 1;
311 mutex_unlock(&hws_sem_oom);
316 goto allocate_sdbt_exit;
320 * deallocate_sdbt() - deallocate all sampler memory
322 * For each online CPU all SDBT trees are deallocated.
323 * Returns the number of freed pages.
325 static int deallocate_sdbt(void)
332 for_each_online_cpu(cpu) {
336 struct hws_cpu_buffer *cb;
338 cb = &per_cpu(sampler_cpu_buffer, cpu);
343 sdbt = cb->first_sdbt;
344 curr = (unsigned long *) sdbt;
347 /* we'll free the SDBT after all SDBs are processed... */
352 /* watch for link entry reset if found */
353 if (is_link_entry(curr)) {
354 curr = get_next_sdbt(curr);
358 /* we are done if we reach the start */
359 if ((unsigned long) curr == start)
362 sdbt = (unsigned long) curr;
364 /* process SDB pointer */
377 static int start_sampling(int cpu)
380 struct hws_cpu_buffer *cb;
382 cb = &per_cpu(sampler_cpu_buffer, cpu);
383 rc = smp_ctl_ssctl_enable_activate(cpu, interval);
385 printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
391 printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
396 printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
401 "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
410 static int stop_sampling(int cpu)
414 struct hws_cpu_buffer *cb;
416 rc = smp_ctl_qsi(cpu);
419 cb = &per_cpu(sampler_cpu_buffer, cpu);
420 if (!rc && !cb->qsi.es)
421 printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
423 rc = smp_ctl_ssctl_stop(cpu);
425 printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
430 printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
435 printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
436 " count=%lu.\n", cpu, v);
438 v = cb->loss_of_sample_data;
440 printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
441 " count=%lu.\n", cpu, v);
443 v = cb->invalid_entry_address;
445 printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
446 " count=%lu.\n", cpu, v);
448 v = cb->incorrect_sdbt_entry;
451 "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
452 " count=%lu.\n", cpu, v);
454 v = cb->sample_auth_change_alert;
457 "hwsampler: CPU %d CPUMF Sample authorization change,"
458 " count=%lu.\n", cpu, v);
463 static int check_hardware_prerequisites(void)
465 if (!test_facility(68))
470 * hws_oom_callback() - the OOM callback function
472 * In case the callback is invoked during memory allocation for the
473 * hw sampler, all obtained memory is deallocated and a flag is set
474 * so main sampler memory allocation can exit with a failure code.
475 * In case the callback is invoked during sampling the hw sampler
476 * is deactivated for all CPUs.
478 static int hws_oom_callback(struct notifier_block *nfb,
479 unsigned long dummy, void *parm)
481 unsigned long *freed;
483 struct hws_cpu_buffer *cb;
487 mutex_lock(&hws_sem_oom);
489 if (hws_state == HWS_DEALLOCATED) {
490 /* during memory allocation */
491 if (oom_killer_was_active == 0) {
492 oom_killer_was_active = 1;
493 *freed += deallocate_sdbt();
498 cb = &per_cpu(sampler_cpu_buffer, cpu);
501 for_each_online_cpu(i) {
502 smp_ctl_ssctl_deactivate(i);
508 "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
513 mutex_unlock(&hws_sem_oom);
518 static struct notifier_block hws_oom_notifier = {
519 .notifier_call = hws_oom_callback
522 static int hws_cpu_callback(struct notifier_block *nfb,
523 unsigned long action, void *hcpu)
525 /* We do not have sampler space available for all possible CPUs.
526 All CPUs should be online when hw sampling is activated. */
527 return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
530 static struct notifier_block hws_cpu_notifier = {
531 .notifier_call = hws_cpu_callback
535 * hwsampler_deactivate() - set hardware sampling temporarily inactive
536 * @cpu: specifies the CPU to be set inactive.
538 * Returns 0 on success, !0 on failure.
540 int hwsampler_deactivate(unsigned int cpu)
543 * Deactivate hw sampling temporarily and flush the buffer
544 * by pushing all the pending samples to oprofile buffer.
546 * This function can be called under one of the following conditions:
547 * Memory unmap, task is exiting.
550 struct hws_cpu_buffer *cb;
553 mutex_lock(&hws_sem);
555 cb = &per_cpu(sampler_cpu_buffer, cpu);
556 if (hws_state == HWS_STARTED) {
557 rc = smp_ctl_qsi(cpu);
560 rc = smp_ctl_ssctl_deactivate(cpu);
563 "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
565 hws_state = HWS_STOPPING;
568 /* Add work to queue to read pending samples.*/
569 queue_work_on(cpu, hws_wq, &cb->worker);
573 mutex_unlock(&hws_sem);
576 flush_workqueue(hws_wq);
582 * hwsampler_activate() - activate/resume hardware sampling which was deactivated
583 * @cpu: specifies the CPU to be set active.
585 * Returns 0 on success, !0 on failure.
587 int hwsampler_activate(unsigned int cpu)
590 * Re-activate hw sampling. This should be called in pair with
591 * hwsampler_deactivate().
594 struct hws_cpu_buffer *cb;
597 mutex_lock(&hws_sem);
599 cb = &per_cpu(sampler_cpu_buffer, cpu);
600 if (hws_state == HWS_STARTED) {
601 rc = smp_ctl_qsi(cpu);
605 rc = smp_ctl_ssctl_enable_activate(cpu, interval);
608 "CPU %d, CPUMF activate sampling failed.\n",
614 mutex_unlock(&hws_sem);
619 static int check_qsi_on_setup(void)
623 struct hws_cpu_buffer *cb;
625 for_each_online_cpu(cpu) {
626 cb = &per_cpu(sampler_cpu_buffer, cpu);
627 rc = smp_ctl_qsi(cpu);
633 printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
638 printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
639 rc = smp_ctl_ssctl_stop(cpu);
644 "CPU %d, CPUMF Sampling stopped now.\n", cpu);
650 static int check_qsi_on_start(void)
654 struct hws_cpu_buffer *cb;
656 for_each_online_cpu(cpu) {
657 cb = &per_cpu(sampler_cpu_buffer, cpu);
658 rc = smp_ctl_qsi(cpu);
673 static void worker_on_start(unsigned int cpu)
675 struct hws_cpu_buffer *cb;
677 cb = &per_cpu(sampler_cpu_buffer, cpu);
678 cb->worker_entry = cb->first_sdbt;
681 static int worker_check_error(unsigned int cpu, int ext_params)
685 struct hws_cpu_buffer *cb;
688 cb = &per_cpu(sampler_cpu_buffer, cpu);
689 sdbt = (unsigned long *) cb->worker_entry;
694 if (ext_params & CPU_MF_INT_SF_PRA)
697 if (ext_params & CPU_MF_INT_SF_LSDA)
698 cb->loss_of_sample_data++;
700 if (ext_params & CPU_MF_INT_SF_IAE) {
701 cb->invalid_entry_address++;
705 if (ext_params & CPU_MF_INT_SF_ISE) {
706 cb->incorrect_sdbt_entry++;
710 if (ext_params & CPU_MF_INT_SF_SACA) {
711 cb->sample_auth_change_alert++;
718 static void worker_on_finish(unsigned int cpu)
721 struct hws_cpu_buffer *cb;
723 cb = &per_cpu(sampler_cpu_buffer, cpu);
726 rc = smp_ctl_qsi(cpu);
730 "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
732 rc = smp_ctl_ssctl_stop(cpu);
735 "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
738 for_each_online_cpu(i) {
743 queue_work_on(i, hws_wq,
751 static void worker_on_interrupt(unsigned int cpu)
755 struct hws_cpu_buffer *cb;
757 cb = &per_cpu(sampler_cpu_buffer, cpu);
759 sdbt = (unsigned long *) cb->worker_entry;
762 /* do not proceed if stop was entered,
763 * forget the buffers not yet processed */
764 while (!done && !cb->stop_mode) {
765 unsigned long *trailer;
766 struct hws_trailer_entry *te;
767 unsigned long *dear = 0;
769 trailer = trailer_entry_ptr(*sdbt);
770 /* leave loop if no more work to do */
771 if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
777 te = (struct hws_trailer_entry *)trailer;
778 cb->sample_overflow += te->overflow;
780 add_samples_to_oprofile(cpu, sdbt, dear);
783 xchg((unsigned char *) te, 0x40);
785 /* advance to next sdb slot in current sdbt */
787 /* in case link bit is set use address w/o link bit */
788 if (is_link_entry(sdbt))
789 sdbt = get_next_sdbt(sdbt);
791 cb->worker_entry = (unsigned long)sdbt;
795 static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
798 struct hws_basic_entry *sample_data_ptr;
799 unsigned long *trailer;
801 trailer = trailer_entry_ptr(*sdbt);
808 sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
810 while ((unsigned long *)sample_data_ptr < trailer) {
811 struct pt_regs *regs = NULL;
812 struct task_struct *tsk = NULL;
815 * Check sampling mode, 1 indicates basic (=customer) sampling
818 if (sample_data_ptr->def != 1) {
819 /* sample slot is not yet written */
822 /* make sure we don't use it twice,
823 * the next time the sampler will set it again */
824 sample_data_ptr->def = 0;
828 if (sample_data_ptr->P == 1) {
829 /* userspace sample */
830 unsigned int pid = sample_data_ptr->prim_asn;
831 if (!counter_config.user)
834 tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
836 regs = task_pt_regs(tsk);
839 /* kernelspace sample */
840 if (!counter_config.kernel)
842 regs = task_pt_regs(current);
845 mutex_lock(&hws_sem);
846 oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
847 !sample_data_ptr->P, tsk);
848 mutex_unlock(&hws_sem);
854 static void worker(struct work_struct *work)
858 struct hws_cpu_buffer *cb;
860 cb = container_of(work, struct hws_cpu_buffer, worker);
861 cpu = smp_processor_id();
862 ext_params = atomic_xchg(&cb->ext_params, 0);
864 if (!cb->worker_entry)
865 worker_on_start(cpu);
867 if (worker_check_error(cpu, ext_params))
871 worker_on_interrupt(cpu);
874 worker_on_finish(cpu);
878 * hwsampler_allocate() - allocate memory for the hardware sampler
879 * @sdbt: number of SDBTs per online CPU (must be > 0)
880 * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
882 * Returns 0 on success, !0 on failure.
884 int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
887 mutex_lock(&hws_sem);
890 if (hws_state != HWS_DEALLOCATED)
896 if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
902 oom_killer_was_active = 0;
903 register_oom_notifier(&hws_oom_notifier);
905 for_each_online_cpu(cpu) {
906 if (allocate_sdbt(cpu)) {
907 unregister_oom_notifier(&hws_oom_notifier);
911 unregister_oom_notifier(&hws_oom_notifier);
912 if (oom_killer_was_active)
915 hws_state = HWS_STOPPED;
919 mutex_unlock(&hws_sem);
924 printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
929 * hwsampler_deallocate() - deallocate hardware sampler memory
931 * Returns 0 on success, !0 on failure.
933 int hwsampler_deallocate(void)
937 mutex_lock(&hws_sem);
940 if (hws_state != HWS_STOPPED)
941 goto deallocate_exit;
943 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
947 hws_state = HWS_DEALLOCATED;
951 mutex_unlock(&hws_sem);
956 unsigned long hwsampler_query_min_interval(void)
958 return min_sampler_rate;
961 unsigned long hwsampler_query_max_interval(void)
963 return max_sampler_rate;
966 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
968 struct hws_cpu_buffer *cb;
970 cb = &per_cpu(sampler_cpu_buffer, cpu);
972 return cb->sample_overflow;
975 int hwsampler_setup(void)
979 struct hws_cpu_buffer *cb;
981 mutex_lock(&hws_sem);
987 hws_state = HWS_INIT;
989 init_all_cpu_buffers();
991 rc = check_hardware_prerequisites();
995 rc = check_qsi_on_setup();
1000 hws_wq = create_workqueue("hwsampler");
1004 register_cpu_notifier(&hws_cpu_notifier);
1006 for_each_online_cpu(cpu) {
1007 cb = &per_cpu(sampler_cpu_buffer, cpu);
1008 INIT_WORK(&cb->worker, worker);
1009 rc = smp_ctl_qsi(cpu);
1011 if (min_sampler_rate != cb->qsi.min_sampl_rate) {
1012 if (min_sampler_rate) {
1014 "hwsampler: different min sampler rate values.\n");
1015 if (min_sampler_rate < cb->qsi.min_sampl_rate)
1017 cb->qsi.min_sampl_rate;
1019 min_sampler_rate = cb->qsi.min_sampl_rate;
1021 if (max_sampler_rate != cb->qsi.max_sampl_rate) {
1022 if (max_sampler_rate) {
1024 "hwsampler: different max sampler rate values.\n");
1025 if (max_sampler_rate > cb->qsi.max_sampl_rate)
1027 cb->qsi.max_sampl_rate;
1029 max_sampler_rate = cb->qsi.max_sampl_rate;
1032 register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
1034 hws_state = HWS_DEALLOCATED;
1038 mutex_unlock(&hws_sem);
1042 int hwsampler_shutdown(void)
1046 mutex_lock(&hws_sem);
1049 if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
1050 mutex_unlock(&hws_sem);
1053 flush_workqueue(hws_wq);
1055 mutex_lock(&hws_sem);
1057 if (hws_state == HWS_STOPPED) {
1058 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1063 destroy_workqueue(hws_wq);
1067 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
1068 hws_state = HWS_INIT;
1071 mutex_unlock(&hws_sem);
1073 unregister_cpu_notifier(&hws_cpu_notifier);
1079 * hwsampler_start_all() - start hardware sampling on all online CPUs
1080 * @rate: specifies the used interval when samples are taken
1082 * Returns 0 on success, !0 on failure.
1084 int hwsampler_start_all(unsigned long rate)
1088 mutex_lock(&hws_sem);
1093 if (hws_state != HWS_STOPPED)
1094 goto start_all_exit;
1098 /* fail if rate is not valid */
1099 if (interval < min_sampler_rate || interval > max_sampler_rate)
1100 goto start_all_exit;
1102 rc = check_qsi_on_start();
1104 goto start_all_exit;
1106 prepare_cpu_buffers();
1108 for_each_online_cpu(cpu) {
1109 rc = start_sampling(cpu);
1114 for_each_online_cpu(cpu) {
1117 goto start_all_exit;
1119 hws_state = HWS_STARTED;
1123 mutex_unlock(&hws_sem);
1128 register_oom_notifier(&hws_oom_notifier);
1131 /* now let them in, 1407 CPUMF external interrupts */
1133 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1139 * hwsampler_stop_all() - stop hardware sampling on all online CPUs
1141 * Returns 0 on success, !0 on failure.
1143 int hwsampler_stop_all(void)
1145 int tmp_rc, rc, cpu;
1146 struct hws_cpu_buffer *cb;
1148 mutex_lock(&hws_sem);
1151 if (hws_state == HWS_INIT) {
1152 mutex_unlock(&hws_sem);
1155 hws_state = HWS_STOPPING;
1156 mutex_unlock(&hws_sem);
1158 for_each_online_cpu(cpu) {
1159 cb = &per_cpu(sampler_cpu_buffer, cpu);
1161 tmp_rc = stop_sampling(cpu);
1167 flush_workqueue(hws_wq);
1169 mutex_lock(&hws_sem);
1171 unregister_oom_notifier(&hws_oom_notifier);
1174 hws_state = HWS_STOPPED;
1175 mutex_unlock(&hws_sem);