Commit af54faab authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Alexei Starovoitov says:

====================
pull-request: bpf-next 2021-09-17

We've added 63 non-merge commits during the last 12 day(s) which contain
a total of 65 files changed, 2653 insertions(+), 751 deletions(-).

The main changes are:

1) Streamline internal BPF program sections handling and
   bpf_program__set_attach_target() in libbpf, from Andrii.

2) Add support for new btf kind BTF_KIND_TAG, from Yonghong.

3) Introduce bpf_get_branch_snapshot() to capture LBR, from Song.

4) IMUL optimization for x86-64 JIT, from Jie.

5) xsk selftest improvements, from Magnus.

6) Introduce legacy kprobe events support in libbpf, from Rafael.

7) Access hw timestamp through BPF's __sk_buff, from Vadim.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (63 commits)
  selftests/bpf: Fix a few compiler warnings
  libbpf: Constify all high-level program attach APIs
  libbpf: Schedule open_opts.attach_prog_fd deprecation since v0.7
  selftests/bpf: Switch fexit_bpf2bpf selftest to set_attach_target() API
  libbpf: Allow skipping attach_func_name in bpf_program__set_attach_target()
  libbpf: Deprecated bpf_object_open_opts.relaxed_core_relocs
  selftests/bpf: Stop using relaxed_core_relocs which has no effect
  libbpf: Use pre-setup sec_def in libbpf_find_attach_btf_id()
  bpf: Update bpf_get_smp_processor_id() documentation
  libbpf: Add sphinx code documentation comments
  selftests/bpf: Skip btf_tag test if btf_tag attribute not supported
  docs/bpf: Add documentation for BTF_KIND_TAG
  selftests/bpf: Add a test with a bpf program with btf_tag attributes
  selftests/bpf: Test BTF_KIND_TAG for deduplication
  selftests/bpf: Add BTF_KIND_TAG unit tests
  selftests/bpf: Change NAME_NTH/IS_NAME_NTH for BTF_KIND_TAG format
  selftests/bpf: Test libbpf API function btf__add_tag()
  bpftool: Add support for BTF_KIND_TAG
  libbpf: Add support for BTF_KIND_TAG
  libbpf: Rename btf_{hash,equal}_int to btf_{hash,equal}_int_tag
  ...
====================

Link: https://lore.kernel.org/r/20210917173738.3397064-1-ast@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f68d08c4 ca21a3e5
Loading
Loading
Loading
Loading
+28 −1
Original line number Diff line number Diff line
@@ -85,6 +85,7 @@ sequentially and type id is assigned to each recognized type starting from id
    #define BTF_KIND_VAR            14      /* Variable     */
    #define BTF_KIND_DATASEC        15      /* Section      */
    #define BTF_KIND_FLOAT          16      /* Floating point       */
    #define BTF_KIND_TAG            17      /* Tag          */

Note that the type section encodes debug info, not just pure types.
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
@@ -106,7 +107,7 @@ Each type contains the following common data::
         * "size" tells the size of the type it is describing.
         *
         * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
         * FUNC and FUNC_PROTO.
         * FUNC, FUNC_PROTO and TAG.
         * "type" is a type_id referring to another type.
         */
        union {
@@ -465,6 +466,32 @@ map definition.

No additional type data follow ``btf_type``.

2.2.17 BTF_KIND_TAG
~~~~~~~~~~~~~~~~~~~

``struct btf_type`` encoding requirement:
 * ``name_off``: offset to a non-empty string
 * ``info.kind_flag``: 0
 * ``info.kind``: BTF_KIND_TAG
 * ``info.vlen``: 0
 * ``type``: ``struct``, ``union``, ``func`` or ``var``

``btf_type`` is followed by ``struct btf_tag``.::

    struct btf_tag {
        __u32   component_idx;
    };

The ``name_off`` encodes btf_tag attribute string.
The ``type`` should be ``struct``, ``union``, ``func`` or ``var``.
For ``var`` type, ``btf_tag.component_idx`` must be ``-1``.
For the other three types, if the btf_tag attribute is
applied to the ``struct``, ``union`` or ``func`` itself,
``btf_tag.component_idx`` must be ``-1``. Otherwise,
the attribute is applied to a ``struct``/``union`` member or
a ``func`` argument, and ``btf_tag.component_idx`` should be a
valid index (starting from 0) pointing to a member or an argument.

3. BTF Kernel API
*****************

+61 −6
Original line number Diff line number Diff line
@@ -2143,19 +2143,19 @@ static __initconst const u64 knl_hw_cache_extra_regs
 * However, there are some cases which may change PEBS status, e.g. PMI
 * throttle. The PEBS_ENABLE should be updated where the status changes.
 */
static void __intel_pmu_disable_all(void)
static __always_inline void __intel_pmu_disable_all(bool bts)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);

	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
	if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
		intel_pmu_disable_bts();
}

static void intel_pmu_disable_all(void)
static __always_inline void intel_pmu_disable_all(void)
{
	__intel_pmu_disable_all();
	__intel_pmu_disable_all(true);
	intel_pmu_pebs_disable_all();
	intel_pmu_lbr_disable_all();
}
@@ -2186,6 +2186,49 @@ static void intel_pmu_enable_all(int added)
	__intel_pmu_enable_all(added, false);
}

static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
				  unsigned int cnt, unsigned long flags)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	intel_pmu_lbr_read();
	cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);

	memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
	intel_pmu_enable_all(0);
	local_irq_restore(flags);
	return cnt;
}

static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
{
	unsigned long flags;

	/* must not have branches... */
	local_irq_save(flags);
	__intel_pmu_disable_all(false); /* we don't care about BTS */
	__intel_pmu_pebs_disable_all();
	__intel_pmu_lbr_disable();
	/*            ... until here */
	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
}

static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
{
	unsigned long flags;

	/* must not have branches... */
	local_irq_save(flags);
	__intel_pmu_disable_all(false); /* we don't care about BTS */
	__intel_pmu_pebs_disable_all();
	__intel_pmu_arch_lbr_disable();
	/*            ... until here */
	return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
}

/*
 * Workaround for:
 *   Intel Errata AAK100 (model 26)
@@ -2929,7 +2972,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	intel_bts_disable_local();
	cpuc->enabled = 0;
	__intel_pmu_disable_all();
	__intel_pmu_disable_all(true);
	handled = intel_pmu_drain_bts_buffer();
	handled += intel_bts_interrupt();
	status = intel_pmu_get_status();
@@ -6283,9 +6326,21 @@ __init int intel_pmu_init(void)
			x86_pmu.lbr_nr = 0;
	}

	if (x86_pmu.lbr_nr)
	if (x86_pmu.lbr_nr) {
		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);

		/* only support branch_stack snapshot for perfmon >= v2 */
		if (x86_pmu.disable_all == intel_pmu_disable_all) {
			if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
				static_call_update(perf_snapshot_branch_stack,
						   intel_pmu_snapshot_arch_branch_stack);
			} else {
				static_call_update(perf_snapshot_branch_stack,
						   intel_pmu_snapshot_branch_stack);
			}
		}
	}

	intel_pmu_check_extra_regs(x86_pmu.extra_regs);

	/* Support full width counters using alternative MSR range */
+1 −1
Original line number Diff line number Diff line
@@ -1301,7 +1301,7 @@ void intel_pmu_pebs_disable_all(void)
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	if (cpuc->pebs_enabled)
		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
		__intel_pmu_pebs_disable_all();
}

static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+5 −15
Original line number Diff line number Diff line
@@ -228,20 +228,6 @@ static void __intel_pmu_lbr_enable(bool pmi)
		wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
}

static void __intel_pmu_lbr_disable(void)
{
	u64 debugctl;

	if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
		wrmsrl(MSR_ARCH_LBR_CTL, 0);
		return;
	}

	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}

void intel_pmu_lbr_reset_32(void)
{
	int i;
@@ -779,9 +765,13 @@ void intel_pmu_lbr_disable_all(void)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	if (cpuc->lbr_users && !vlbr_exclude_host())
	if (cpuc->lbr_users && !vlbr_exclude_host()) {
		if (static_cpu_has(X86_FEATURE_ARCH_LBR))
			return __intel_pmu_arch_lbr_disable();

		__intel_pmu_lbr_disable();
	}
}

void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
+19 −0
Original line number Diff line number Diff line
@@ -1240,6 +1240,25 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
	return intel_pmu_has_bts_period(event, hwc->sample_period);
}

static __always_inline void __intel_pmu_pebs_disable_all(void)
{
	wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}

static __always_inline void __intel_pmu_arch_lbr_disable(void)
{
	wrmsrl(MSR_ARCH_LBR_CTL, 0);
}

static __always_inline void __intel_pmu_lbr_disable(void)
{
	u64 debugctl;

	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}

int intel_pmu_save_and_restart(struct perf_event *event);

struct event_constraint *
Loading