Loading arch/s390/include/asm/perf_event.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -21,7 +21,7 @@ #define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) /* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */ extern __init const struct attribute_group **cpumf_cf_event_group(void); extern __init const struct attribute_group **cpumf_cf_event_group(void); extern ssize_t cpumf_events_sysfs_show(struct device *dev, extern ssize_t cpumf_events_sysfs_show(struct device *dev, struct device_attribute *attr, struct device_attribute *attr, Loading arch/s390/include/asm/rwsem.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -31,7 +31,7 @@ * This should be totally fair - if anything is waiting, a process that wants a * This should be totally fair - if anything is waiting, a process that wants a * lock will go to the back of the queue. When the currently active lock is * lock will go to the back of the queue. When the currently active lock is * released, if there's a writer at the front of the queue, then that and only * released, if there's a writer at the front of the queue, then that and only * that will be woken up; if there's a bunch of consequtive readers at the * that will be woken up; if there's a bunch of consecutive readers at the * front, then they'll all be woken up, but no other readers will be. * front, then they'll all be woken up, but no other readers will be. */ */ Loading arch/s390/kernel/perf_cpum_cf.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event) /* Validate the counter that is assigned to this event. /* Validate the counter that is assigned to this event. * Because the counter facility can use numerous counters at the * Because the counter facility can use numerous counters at the * same time without constraints, it is not necessary to explicity * same time without constraints, it is not necessary to explicitly * validate event groups (event->group_leader != event). * validate event groups (event->group_leader != event). */ */ err = validate_event(hwc); err = validate_event(hwc); Loading arch/s390/kernel/perf_event.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); } } /* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */ ssize_t cpumf_events_sysfs_show(struct device *dev, ssize_t cpumf_events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) struct device_attribute *attr, char *page) { { Loading arch/s390/kvm/guestdbg.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -17,7 +17,7 @@ /* /* * Extends the address range given by *start and *stop to include the address * Extends the address range given by *start and *stop to include the address * range starting with estart and the length len. Takes care of overflowing * range starting with estart and the length len. Takes care of overflowing * intervals and tries to minimize the overall intervall size. * intervals and tries to minimize the overall interval size. */ */ static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) { { Loading Loading @@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu) return; return; /* /* * If the guest is not interrested in branching events, we can savely * If the guest is not interested in branching events, we can safely * limit them to the PER address range. * limit them to the PER address range. */ */ if (!(*cr9 & PER_EVENT_BRANCH)) if (!(*cr9 & PER_EVENT_BRANCH)) Loading Loading
arch/s390/include/asm/perf_event.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -21,7 +21,7 @@ #define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_LSDA 0x0200 #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) /* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */ extern __init const struct attribute_group **cpumf_cf_event_group(void); extern __init const struct attribute_group **cpumf_cf_event_group(void); extern ssize_t cpumf_events_sysfs_show(struct device *dev, extern ssize_t cpumf_events_sysfs_show(struct device *dev, struct device_attribute *attr, struct device_attribute *attr, Loading
arch/s390/include/asm/rwsem.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -31,7 +31,7 @@ * This should be totally fair - if anything is waiting, a process that wants a * This should be totally fair - if anything is waiting, a process that wants a * lock will go to the back of the queue. When the currently active lock is * lock will go to the back of the queue. When the currently active lock is * released, if there's a writer at the front of the queue, then that and only * released, if there's a writer at the front of the queue, then that and only * that will be woken up; if there's a bunch of consequtive readers at the * that will be woken up; if there's a bunch of consecutive readers at the * front, then they'll all be woken up, but no other readers will be. * front, then they'll all be woken up, but no other readers will be. */ */ Loading
arch/s390/kernel/perf_cpum_cf.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event) /* Validate the counter that is assigned to this event. /* Validate the counter that is assigned to this event. * Because the counter facility can use numerous counters at the * Because the counter facility can use numerous counters at the * same time without constraints, it is not necessary to explicity * same time without constraints, it is not necessary to explicitly * validate event groups (event->group_leader != event). * validate event groups (event->group_leader != event). */ */ err = validate_event(hwc); err = validate_event(hwc); Loading
arch/s390/kernel/perf_event.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); } } /* Perf defintions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */ ssize_t cpumf_events_sysfs_show(struct device *dev, ssize_t cpumf_events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) struct device_attribute *attr, char *page) { { Loading
arch/s390/kvm/guestdbg.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -17,7 +17,7 @@ /* /* * Extends the address range given by *start and *stop to include the address * Extends the address range given by *start and *stop to include the address * range starting with estart and the length len. Takes care of overflowing * range starting with estart and the length len. Takes care of overflowing * intervals and tries to minimize the overall intervall size. * intervals and tries to minimize the overall interval size. */ */ static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) { { Loading Loading @@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu) return; return; /* /* * If the guest is not interrested in branching events, we can savely * If the guest is not interested in branching events, we can safely * limit them to the PER address range. * limit them to the PER address range. */ */ if (!(*cr9 & PER_EVENT_BRANCH)) if (!(*cr9 & PER_EVENT_BRANCH)) Loading