[RFC PATCH v2 2/7] bpf: Add vnet_hash members to __sk_buff

From: Akihiko Odaki
Date: Sun Oct 15 2023 - 10:17:35 EST


They will be used only by BPF_PROG_TYPE_VNET_HASH to tell the queues to
deliver packets and the hash values and types reported with virtio-net
headers.

Signed-off-by: Akihiko Odaki <akihiko.odaki@xxxxxxxxxx>
---
include/linux/filter.h | 7 ++++
net/core/filter.c | 77 +++++++++++++++++++++++++++++++++-
tools/include/uapi/linux/bpf.h | 4 ++
3 files changed, 86 insertions(+), 2 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index bf7ad887943c..d10afe92ee45 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -643,6 +643,13 @@ struct bpf_skb_data_end {
void *data_end;
};

+struct bpf_skb_vnet_hash_end {
+ struct qdisc_skb_cb qdisc_cb;
+ u32 hash_value;
+ u16 hash_report;
+ u16 rss_queue;
+};
+
struct bpf_nh_params {
u32 nh_family;
union {
diff --git a/net/core/filter.c b/net/core/filter.c
index 867edbc628de..35bc60b71722 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8435,9 +8435,15 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
if (size != size_default)
return false;
break;
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
+ if (size != sizeof(__u16))
+ return false;
+ break;
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
return false;
case bpf_ctx_range(struct __sk_buff, hwtstamp):
@@ -8473,7 +8479,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
return true;
}

-static bool sk_filter_is_valid_access(int off, int size,
+static bool vnet_hash_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
@@ -8493,6 +8499,9 @@ static bool sk_filter_is_valid_access(int off, int size,
if (type == BPF_WRITE) {
switch (off) {
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
break;
default:
return false;
@@ -8502,6 +8511,21 @@ static bool sk_filter_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, prog, info);
}

+static bool sk_filter_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
+ return false;
+ }
+
+ return vnet_hash_is_valid_access(off, size, type, prog, info);
+}
+
static bool cg_skb_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -8511,6 +8535,9 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, wire_len):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
@@ -8558,6 +8585,9 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
case bpf_ctx_range(struct __sk_buff, hwtstamp):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
return false;
}

@@ -8799,6 +8829,10 @@ static bool tc_cls_act_is_valid_access(int off, int size,
}

switch (off) {
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
+ return false;
case bpf_ctx_range(struct __sk_buff, data):
info->reg_type = PTR_TO_PACKET;
break;
@@ -9117,6 +9151,9 @@ static bool sk_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
case bpf_ctx_range(struct __sk_buff, hwtstamp):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_value):
+ case bpf_ctx_range(struct __sk_buff, vnet_hash_report):
+ case bpf_ctx_range(struct __sk_buff, vnet_rss_queue):
return false;
}

@@ -9727,6 +9764,42 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
hwtstamps, 8,
target_size));
break;
+
+ case offsetof(struct __sk_buff, vnet_hash_value):
+ BUILD_BUG_ON(sizeof_field(struct bpf_skb_vnet_hash_end, hash_value) != 4);
+
+ off = offsetof(struct sk_buff, cb) +
+ offsetof(struct bpf_skb_vnet_hash_end, hash_value);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_EMIT_STORE(BPF_W, si, off);
+ else
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
+ break;
+
+ case offsetof(struct __sk_buff, vnet_hash_report):
+ BUILD_BUG_ON(sizeof_field(struct bpf_skb_vnet_hash_end, hash_report) != 2);
+
+ off = offsetof(struct sk_buff, cb) +
+ offsetof(struct bpf_skb_vnet_hash_end, hash_report);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_EMIT_STORE(BPF_H, si, off);
+ else
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, off);
+ break;
+
+ case offsetof(struct __sk_buff, vnet_rss_queue):
+ BUILD_BUG_ON(sizeof_field(struct bpf_skb_vnet_hash_end, rss_queue) != 2);
+
+ off = offsetof(struct sk_buff, cb) +
+ offsetof(struct bpf_skb_vnet_hash_end, rss_queue);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_EMIT_STORE(BPF_H, si, off);
+ else
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, off);
+ break;
}

return insn - insn_buf;
@@ -10969,7 +11042,7 @@ const struct bpf_prog_ops flow_dissector_prog_ops = {

const struct bpf_verifier_ops vnet_hash_verifier_ops = {
.get_func_proto = sk_filter_func_proto,
- .is_valid_access = sk_filter_is_valid_access,
+ .is_valid_access = vnet_hash_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
.gen_ld_abs = bpf_gen_ld_abs,
};
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 60976fe86247..298634556fab 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -6112,6 +6112,10 @@ struct __sk_buff {
__u8 tstamp_type;
__u32 :24; /* Padding, future use. */
__u64 hwtstamp;
+
+ __u32 vnet_hash_value;
+ __u16 vnet_hash_report;
+ __u16 vnet_rss_queue;
};

struct bpf_tunnel_key {
--
2.42.0