Commit 06fca841 authored by Ilya Leoshkevich's avatar Ilya Leoshkevich Committed by Andrii Nakryiko
Browse files

selftests/bpf: Use __BYTE_ORDER__



Use the compiler-defined __BYTE_ORDER__ instead of the libc-defined
__BYTE_ORDER for consistency.

Signed-off-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211026010831.748682-4-iii@linux.ibm.com
parent 3930198d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -7,12 +7,12 @@
#include <bpf/btf.h>

void test_btf_endian() {
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	enum btf_endianness endian = BTF_LITTLE_ENDIAN;
#elif __BYTE_ORDER == __BIG_ENDIAN
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
	enum btf_endianness endian = BTF_BIG_ENDIAN;
#else
#error "Unrecognized __BYTE_ORDER"
#error "Unrecognized __BYTE_ORDER__"
#endif
	enum btf_endianness swap_endian = 1 - endian;
	struct btf *btf = NULL, *swap_btf = NULL;
+2 −2
Original line number Diff line number Diff line
@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = {
		.descr = "ctx:write sysctl:write read ok narrow",
		.insns = {
			/* u64 w = (u16)write & 1; */
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
			BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
				    offsetof(struct bpf_sysctl, write)),
#else
@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = {
		.descr = "ctx:file_pos sysctl:read read ok narrow",
		.insns = {
			/* If (file_pos == X) */
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
			BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
				    offsetof(struct bpf_sysctl, file_pos)),
#else
+7 −7
Original line number Diff line number Diff line
@@ -502,7 +502,7 @@
	"check skb->hash byte load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#else
@@ -537,7 +537,7 @@
	"check skb->hash byte load permitted 3",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#else
@@ -646,7 +646,7 @@
	"check skb->hash half load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#else
@@ -661,7 +661,7 @@
	"check skb->hash half load permitted 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 2),
#else
@@ -676,7 +676,7 @@
	"check skb->hash half load not permitted, unaligned 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 1),
#else
@@ -693,7 +693,7 @@
	"check skb->hash half load not permitted, unaligned 3",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#else
@@ -951,7 +951,7 @@
	"check skb->data half load not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, data)),
#else
+1 −1
Original line number Diff line number Diff line
@@ -174,7 +174,7 @@
	"check skb->tc_classid half load not permitted for lwt prog",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, tc_classid)),
#else
+3 −3
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
	"check bpf_perf_event_data->sample_period byte load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -18,7 +18,7 @@
	"check bpf_perf_event_data->sample_period half load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct bpf_perf_event_data, sample_period)),
#else
@@ -34,7 +34,7 @@
	"check bpf_perf_event_data->sample_period word load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct bpf_perf_event_data, sample_period)),
#else