Loading arch/x86/oprofile/nmi_int.c +9 −91 Original line number Diff line number Diff line Loading @@ -24,8 +24,6 @@ #include "op_counter.h" #include "op_x86_model.h" DEFINE_PER_CPU(int, switch_index); static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); Loading @@ -34,8 +32,6 @@ static int nmi_start(void); static void nmi_stop(void); static void nmi_cpu_start(void *dummy); static void nmi_cpu_stop(void *dummy); static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; Loading Loading @@ -111,47 +107,6 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ static void nmi_cpu_switch(void *dummy) { int cpu = smp_processor_id(); int si = per_cpu(switch_index, cpu); struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); nmi_cpu_stop(NULL); nmi_cpu_save_mpx_registers(msrs); /* move to next set */ si += model->num_hardware_counters; if ((si > model->num_counters) || (counter_config[si].count == 0)) per_cpu(switch_index, smp_processor_id()) = 0; else per_cpu(switch_index, smp_processor_id()) = si; nmi_cpu_restore_mpx_registers(msrs); model->setup_ctrs(msrs); nmi_cpu_start(NULL); } /* * Quick check to see if multiplexing is necessary. * The check should be sufficient since counters are used * in ordre. */ static int nmi_multiplex_on(void) { return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; } static int nmi_switch_event(void) { if (nmi_multiplex_on() < 0) return -EINVAL; on_each_cpu(nmi_cpu_switch, NULL, 1); return 0; } static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { Loading Loading @@ -215,10 +170,11 @@ static void free_msrs(void) static int allocate_msrs(void) { int i, success = 1; int success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); Loading @@ -226,8 +182,8 @@ static int allocate_msrs(void) success = 0; break; } per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, GFP_KERNEL); per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; Loading Loading @@ -271,8 +227,7 @@ static int nmi_setup(void) return err; } /* * We need to serialize save and setup for HT because the subset /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ Loading @@ -288,6 +243,7 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); Loading @@ -295,41 +251,7 @@ static int nmi_setup(void) return 0; } static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) { unsigned int si = __get_cpu_var(switch_index); unsigned int const nr_ctrs = model->num_hardware_counters; struct op_msr *counters = &msrs->counters[si]; unsigned int i; for (i = 0; i < nr_ctrs; ++i) { int offset = i + si; if (counters[offset].addr) { rdmsr(counters[offset].addr, counters[offset].multiplex.low, counters[offset].multiplex.high); } } } static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) { unsigned int si = __get_cpu_var(switch_index); unsigned int const nr_ctrs = model->num_hardware_counters; struct op_msr *counters = &msrs->counters[si]; unsigned int i; for (i = 0; i < nr_ctrs; ++i) { int offset = i + si; if (counters[offset].addr) { wrmsr(counters[offset].addr, counters[offset].multiplex.low, counters[offset].multiplex.high); } } } static void nmi_cpu_restore_registers(struct op_msrs *msrs) static void nmi_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; Loading Loading @@ -369,8 +291,7 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); nmi_cpu_restore_registers(msrs); __get_cpu_var(switch_index) = 0; nmi_restore_registers(msrs); } static void nmi_shutdown(void) Loading Loading @@ -435,7 +356,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); counter_config[i].save_count_low = 0; } return 0; Loading Loading @@ -580,14 +500,12 @@ int __init op_nmi_init(struct oprofile_operations *ops) register_cpu_notifier(&oprofile_cpu_nb); #endif /* default values, can be overwritten by model */ __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); Loading @@ -607,7 +525,7 @@ void op_nmi_exit(void) #ifdef CONFIG_SMP unregister_cpu_notifier(&oprofile_cpu_nb); #endif } if (model->exit) model->exit(); } } arch/x86/oprofile/op_counter.h +1 −2 Original line number Diff line number Diff line Loading @@ -10,14 +10,13 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H #define OP_MAX_COUNTER 32 #define OP_MAX_COUNTER 8 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; Loading arch/x86/oprofile/op_model_amd.c +30 −46 Original line number Diff line number Diff line Loading @@ -15,7 +15,6 @@ #include <linux/oprofile.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/percpu.h> #include <asm/ptrace.h> #include <asm/msr.h> Loading @@ -24,10 +23,8 @@ #include "op_x86_model.h" #include "op_counter.h" #define NUM_COUNTERS 32 #define NUM_HARDWARE_COUNTERS 4 #define NUM_CONTROLS 32 #define NUM_HARDWARE_CONTROLS 4 #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) Loading @@ -51,7 +48,6 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS Loading Loading @@ -134,17 +130,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { int hw_counter = i % NUM_HARDWARE_COUNTERS; if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { int hw_control = i % NUM_HARDWARE_CONTROLS; if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } Loading @@ -156,16 +150,8 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { int offset = i + __get_cpu_var(switch_index); if (counter_config[offset].enabled) reset_value[offset] = counter_config[offset].count; else reset_value[offset] = 0; } /* clear all counters */ for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); Loading @@ -175,31 +161,34 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { int offset = i + __get_cpu_var(switch_index); if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { CTR_WRITE(counter_config[offset].count, msrs, i); for (i = 0; i < NUM_COUNTERS; ++i) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[offset].user); CTRL_SET_KERN(low, counter_config[offset].kernel); CTRL_SET_UM(low, counter_config[offset].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[offset].event); CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[i].event); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); } else { reset_value[i] = 0; } } } Loading Loading @@ -287,14 +276,13 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { int offset = i + __get_cpu_var(switch_index); if (!reset_value[offset]) for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(regs, offset); CTR_WRITE(reset_value[offset], msrs, i); oprofile_add_sample(regs, i); CTR_WRITE(reset_value[i], msrs, i); } } Loading @@ -310,10 +298,8 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { int offset = i + __get_cpu_var(switch_index); if (reset_value[offset]) { for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); Loading Loading @@ -343,8 +329,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (!reset_value[i]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); Loading @@ -370,11 +356,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { for (i = 0 ; i < NUM_CONTROLS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } Loading Loading @@ -548,8 +534,6 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, .num_hardware_counters = NUM_HARDWARE_COUNTERS, .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, Loading arch/x86/oprofile/op_model_p4.c +0 −4 Original line number Diff line number Diff line Loading @@ -700,8 +700,6 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, .num_hardware_counters = NUM_COUNTERS_HT2, .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, Loading @@ -714,8 +712,6 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, .num_hardware_counters = NUM_COUNTERS_NON_HT, .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, Loading arch/x86/oprofile/op_model_ppro.c +0 −2 Original line number Diff line number Diff line Loading @@ -183,8 +183,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, .num_hardware_counters = NUM_COUNTERS, .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, Loading Loading
arch/x86/oprofile/nmi_int.c +9 −91 Original line number Diff line number Diff line Loading @@ -24,8 +24,6 @@ #include "op_counter.h" #include "op_x86_model.h" DEFINE_PER_CPU(int, switch_index); static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); Loading @@ -34,8 +32,6 @@ static int nmi_start(void); static void nmi_stop(void); static void nmi_cpu_start(void *dummy); static void nmi_cpu_stop(void *dummy); static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; Loading Loading @@ -111,47 +107,6 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ static void nmi_cpu_switch(void *dummy) { int cpu = smp_processor_id(); int si = per_cpu(switch_index, cpu); struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); nmi_cpu_stop(NULL); nmi_cpu_save_mpx_registers(msrs); /* move to next set */ si += model->num_hardware_counters; if ((si > model->num_counters) || (counter_config[si].count == 0)) per_cpu(switch_index, smp_processor_id()) = 0; else per_cpu(switch_index, smp_processor_id()) = si; nmi_cpu_restore_mpx_registers(msrs); model->setup_ctrs(msrs); nmi_cpu_start(NULL); } /* * Quick check to see if multiplexing is necessary. * The check should be sufficient since counters are used * in ordre. */ static int nmi_multiplex_on(void) { return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; } static int nmi_switch_event(void) { if (nmi_multiplex_on() < 0) return -EINVAL; on_each_cpu(nmi_cpu_switch, NULL, 1); return 0; } static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { Loading Loading @@ -215,10 +170,11 @@ static void free_msrs(void) static int allocate_msrs(void) { int i, success = 1; int success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); Loading @@ -226,8 +182,8 @@ static int allocate_msrs(void) success = 0; break; } per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, GFP_KERNEL); per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; Loading Loading @@ -271,8 +227,7 @@ static int nmi_setup(void) return err; } /* * We need to serialize save and setup for HT because the subset /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ Loading @@ -288,6 +243,7 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); Loading @@ -295,41 +251,7 @@ static int nmi_setup(void) return 0; } static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) { unsigned int si = __get_cpu_var(switch_index); unsigned int const nr_ctrs = model->num_hardware_counters; struct op_msr *counters = &msrs->counters[si]; unsigned int i; for (i = 0; i < nr_ctrs; ++i) { int offset = i + si; if (counters[offset].addr) { rdmsr(counters[offset].addr, counters[offset].multiplex.low, counters[offset].multiplex.high); } } } static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) { unsigned int si = __get_cpu_var(switch_index); unsigned int const nr_ctrs = model->num_hardware_counters; struct op_msr *counters = &msrs->counters[si]; unsigned int i; for (i = 0; i < nr_ctrs; ++i) { int offset = i + si; if (counters[offset].addr) { wrmsr(counters[offset].addr, counters[offset].multiplex.low, counters[offset].multiplex.high); } } } static void nmi_cpu_restore_registers(struct op_msrs *msrs) static void nmi_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; Loading Loading @@ -369,8 +291,7 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); nmi_cpu_restore_registers(msrs); __get_cpu_var(switch_index) = 0; nmi_restore_registers(msrs); } static void nmi_shutdown(void) Loading Loading @@ -435,7 +356,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); counter_config[i].save_count_low = 0; } return 0; Loading Loading @@ -580,14 +500,12 @@ int __init op_nmi_init(struct oprofile_operations *ops) register_cpu_notifier(&oprofile_cpu_nb); #endif /* default values, can be overwritten by model */ __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); Loading @@ -607,7 +525,7 @@ void op_nmi_exit(void) #ifdef CONFIG_SMP unregister_cpu_notifier(&oprofile_cpu_nb); #endif } if (model->exit) model->exit(); } }
arch/x86/oprofile/op_counter.h +1 −2 Original line number Diff line number Diff line Loading @@ -10,14 +10,13 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H #define OP_MAX_COUNTER 32 #define OP_MAX_COUNTER 8 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; Loading
arch/x86/oprofile/op_model_amd.c +30 −46 Original line number Diff line number Diff line Loading @@ -15,7 +15,6 @@ #include <linux/oprofile.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/percpu.h> #include <asm/ptrace.h> #include <asm/msr.h> Loading @@ -24,10 +23,8 @@ #include "op_x86_model.h" #include "op_counter.h" #define NUM_COUNTERS 32 #define NUM_HARDWARE_COUNTERS 4 #define NUM_CONTROLS 32 #define NUM_HARDWARE_CONTROLS 4 #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) Loading @@ -51,7 +48,6 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS Loading Loading @@ -134,17 +130,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { int hw_counter = i % NUM_HARDWARE_COUNTERS; if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { int hw_control = i % NUM_HARDWARE_CONTROLS; if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } Loading @@ -156,16 +150,8 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { int offset = i + __get_cpu_var(switch_index); if (counter_config[offset].enabled) reset_value[offset] = counter_config[offset].count; else reset_value[offset] = 0; } /* clear all counters */ for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); Loading @@ -175,31 +161,34 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { int offset = i + __get_cpu_var(switch_index); if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { CTR_WRITE(counter_config[offset].count, msrs, i); for (i = 0; i < NUM_COUNTERS; ++i) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[offset].user); CTRL_SET_KERN(low, counter_config[offset].kernel); CTRL_SET_UM(low, counter_config[offset].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[offset].event); CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[i].event); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); } else { reset_value[i] = 0; } } } Loading Loading @@ -287,14 +276,13 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { int offset = i + __get_cpu_var(switch_index); if (!reset_value[offset]) for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(regs, offset); CTR_WRITE(reset_value[offset], msrs, i); oprofile_add_sample(regs, i); CTR_WRITE(reset_value[i], msrs, i); } } Loading @@ -310,10 +298,8 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { int offset = i + __get_cpu_var(switch_index); if (reset_value[offset]) { for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); Loading Loading @@ -343,8 +329,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (!reset_value[i]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); Loading @@ -370,11 +356,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { for (i = 0 ; i < NUM_CONTROLS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } Loading Loading @@ -548,8 +534,6 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, .num_hardware_counters = NUM_HARDWARE_COUNTERS, .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, Loading
arch/x86/oprofile/op_model_p4.c +0 −4 Original line number Diff line number Diff line Loading @@ -700,8 +700,6 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, .num_hardware_counters = NUM_COUNTERS_HT2, .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, Loading @@ -714,8 +712,6 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, .num_hardware_counters = NUM_COUNTERS_NON_HT, .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, Loading
arch/x86/oprofile/op_model_ppro.c +0 −2 Original line number Diff line number Diff line Loading @@ -183,8 +183,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, .num_hardware_counters = NUM_COUNTERS, .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, Loading