Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support eliding map lookup nullness #4814

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 106 additions & 33 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ struct bpf_call_arg_meta {
u32 ret_btf_id;
u32 subprogno;
struct btf_field *kptr_field;
s64 const_map_key;
};

struct bpf_kfunc_call_arg_meta {
Expand Down Expand Up @@ -5303,7 +5304,7 @@ enum bpf_access_src {
static int check_stack_range_initialized(struct bpf_verifier_env *env,
int regno, int off, int access_size,
bool zero_size_allowed,
enum bpf_access_src type,
enum bpf_access_type type,
struct bpf_call_arg_meta *meta);

static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
Expand Down Expand Up @@ -5336,7 +5337,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
/* Note that we pass a NULL meta, so raw access will not be permitted.
*/
err = check_stack_range_initialized(env, ptr_regno, off, size,
false, ACCESS_DIRECT, NULL);
false, BPF_READ, NULL);
if (err)
return err;

Expand Down Expand Up @@ -7190,7 +7191,7 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
static int check_stack_access_within_bounds(
struct bpf_verifier_env *env,
int regno, int off, int access_size,
enum bpf_access_src src, enum bpf_access_type type)
enum bpf_access_type type)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
Expand All @@ -7199,10 +7200,7 @@ static int check_stack_access_within_bounds(
int err;
char *err_extra;

if (src == ACCESS_HELPER)
/* We don't know if helpers are reading or writing (or both). */
err_extra = " indirect access to";
else if (type == BPF_READ)
if (type == BPF_READ)
err_extra = " read from";
else
err_extra = " write to";
Expand Down Expand Up @@ -7420,7 +7418,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn

} else if (reg->type == PTR_TO_STACK) {
/* Basic bounds checks. */
err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
err = check_stack_access_within_bounds(env, regno, off, size, t);
if (err)
return err;

Expand Down Expand Up @@ -7640,13 +7638,11 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
static int check_stack_range_initialized(
struct bpf_verifier_env *env, int regno, int off,
int access_size, bool zero_size_allowed,
enum bpf_access_src type, struct bpf_call_arg_meta *meta)
enum bpf_access_type type, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi;
char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
enum bpf_access_type bounds_check_type;
/* Some accesses can write anything into the stack, others are
* read-only.
*/
Expand All @@ -7657,18 +7653,10 @@ static int check_stack_range_initialized(
return -EACCES;
}

if (type == ACCESS_HELPER) {
/* The bounds checks for writes are more permissive than for
* reads. However, if raw_mode is not set, we'll do extra
* checks below.
*/
bounds_check_type = BPF_WRITE;
if (type == BPF_WRITE)
clobber = true;
} else {
bounds_check_type = BPF_READ;
}
err = check_stack_access_within_bounds(env, regno, off, access_size,
type, bounds_check_type);

err = check_stack_access_within_bounds(env, regno, off, access_size, type);
if (err)
return err;

Expand All @@ -7685,8 +7673,8 @@ static int check_stack_range_initialized(
char tn_buf[48];

tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
regno, err_extra, tn_buf);
verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
/* Only initialized buffer on stack is allowed to be accessed
Expand Down Expand Up @@ -7739,7 +7727,7 @@ static int check_stack_range_initialized(
slot = -i - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot) {
verbose(env, "verifier bug: allocated_stack too small");
verbose(env, "verifier bug: allocated_stack too small\n");
return -EFAULT;
}

Expand Down Expand Up @@ -7767,14 +7755,14 @@ static int check_stack_range_initialized(
}

if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
err_extra, regno, min_off, i - min_off, access_size);
verbose(env, "invalid read from stack R%d off %d+%d size %d\n",
regno, min_off, i - min_off, access_size);
} else {
char tn_buf[48];

tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
err_extra, regno, tn_buf, i - min_off, access_size);
verbose(env, "invalid read from stack R%d var_off %s+%d size %d\n",
regno, tn_buf, i - min_off, access_size);
}
return -EACCES;
mark:
Expand Down Expand Up @@ -7849,7 +7837,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return check_stack_range_initialized(
env,
regno, reg->off, access_size,
zero_size_allowed, ACCESS_HELPER, meta);
zero_size_allowed, access_type, meta);
case PTR_TO_BTF_ID:
return check_ptr_to_btf_access(env, regs, regno, reg->off,
access_size, BPF_READ, -1);
Expand Down Expand Up @@ -9161,6 +9149,63 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
return 0;
}

/* Returns constant key value if possible, else negative error */
static s64 get_constant_map_key(struct bpf_verifier_env *env,
struct bpf_reg_state *key,
u32 key_size)
{
struct bpf_func_state *state = func(env, key);
struct bpf_reg_state *reg;
int slot, spi, off;
int spill_size = 0;
int zero_size = 0;
int stack_off;
int i, err;
u8 *stype;

if (!env->bpf_capable)
return -EOPNOTSUPP;
if (key->type != PTR_TO_STACK)
return -EOPNOTSUPP;
if (!tnum_is_const(key->var_off))
return -EOPNOTSUPP;

stack_off = key->off + key->var_off.value;
slot = -stack_off - 1;
spi = slot / BPF_REG_SIZE;
off = slot % BPF_REG_SIZE;
stype = state->stack[spi].slot_type;

/* First handle precisely tracked STACK_ZERO */
for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--)
zero_size++;
if (zero_size >= key_size)
return 0;

/* Check that stack contains a scalar spill of expected size */
if (!is_spilled_scalar_reg(&state->stack[spi]))
return -EOPNOTSUPP;
for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--)
spill_size++;
if (spill_size != key_size)
return -EOPNOTSUPP;

reg = &state->stack[spi].spilled_ptr;
if (!tnum_is_const(reg->var_off))
/* Stack value not statically known */
return -EOPNOTSUPP;

/* We are relying on a constant value. So mark as precise
* to prevent pruning on it.
*/
bt_set_frame_slot(&env->bt, env->cur_state->curframe, spi);
err = mark_chain_precision_batch(env);
if (err < 0)
return err;

return reg->var_off.value;
}

static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn,
Expand All @@ -9171,6 +9216,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
enum bpf_arg_type arg_type = fn->arg_type[arg];
enum bpf_reg_type type = reg->type;
u32 *arg_btf_id = NULL;
u32 key_size;
int err = 0;

if (arg_type == ARG_DONTCARE)
Expand Down Expand Up @@ -9304,8 +9350,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
err = check_helper_mem_access(env, regno, meta->map_ptr->key_size,
BPF_READ, false, NULL);
key_size = meta->map_ptr->key_size;
err = check_helper_mem_access(env, regno, key_size, BPF_READ, false, NULL);
if (err)
return err;
meta->const_map_key = get_constant_map_key(env, reg, key_size);
if (meta->const_map_key < 0 && meta->const_map_key != -EOPNOTSUPP)
return meta->const_map_key;
break;
case ARG_PTR_TO_MAP_VALUE:
if (type_may_be_null(arg_type) && register_is_null(reg))
Expand Down Expand Up @@ -10829,6 +10880,21 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
state->callback_subprogno == subprogno);
}

/* Returns whether or not the given map type can potentially elide
* lookup return value nullness check. This is possible if the key
* is statically known.
*/
static bool can_elide_value_nullness(enum bpf_map_type type)
{
switch (type) {
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PERCPU_ARRAY:
return true;
default:
return false;
}
}

static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
const struct bpf_func_proto **ptr)
{
Expand Down Expand Up @@ -11195,10 +11261,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}

if (func_id == BPF_FUNC_map_lookup_elem &&
can_elide_value_nullness(meta.map_ptr->map_type) &&
meta.const_map_key >= 0 &&
meta.const_map_key < meta.map_ptr->max_entries)
ret_flag &= ~PTR_MAYBE_NULL;

regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].map_uid = meta.map_uid;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
if (!type_may_be_null(ret_type) &&
if (!type_may_be_null(ret_flag) &&
btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
regs[BPF_REG_0].id = ++env->id_gen;
}
Expand Down
2 changes: 1 addition & 1 deletion net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -7643,7 +7643,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
Expand Down
6 changes: 3 additions & 3 deletions tools/testing/selftests/bpf/progs/dynptr_fail.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ int ringbuf_invalid_api(void *ctx)

/* Can't add a dynptr to a map */
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
__failure __msg("invalid read from stack")
int add_dynptr_to_map1(void *ctx)
{
struct bpf_dynptr ptr;
Expand All @@ -210,7 +210,7 @@ int add_dynptr_to_map1(void *ctx)

/* Can't add a struct with an embedded dynptr to a map */
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
__failure __msg("invalid read from stack")
int add_dynptr_to_map2(void *ctx)
{
struct test_info x;
Expand Down Expand Up @@ -398,7 +398,7 @@ int data_slice_missing_null_check2(void *ctx)
* dynptr argument
*/
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
__failure __msg("invalid read from stack")
int invalid_helper1(void *ctx)
{
struct bpf_dynptr ptr;
Expand Down
14 changes: 7 additions & 7 deletions tools/testing/selftests/bpf/progs/iters.c
Original file line number Diff line number Diff line change
Expand Up @@ -524,11 +524,11 @@ int iter_subprog_iters(const void *ctx)
}

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, int);
__uint(max_entries, 1000);
} arr_map SEC(".maps");
} hash_map SEC(".maps");

SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
Expand All @@ -539,7 +539,7 @@ int iter_err_too_permissive1(const void *ctx)

MY_PID_GUARD();

map_val = bpf_map_lookup_elem(&arr_map, &key);
map_val = bpf_map_lookup_elem(&hash_map, &key);
if (!map_val)
return 0;

Expand All @@ -561,12 +561,12 @@ int iter_err_too_permissive2(const void *ctx)

MY_PID_GUARD();

map_val = bpf_map_lookup_elem(&arr_map, &key);
map_val = bpf_map_lookup_elem(&hash_map, &key);
if (!map_val)
return 0;

bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
map_val = bpf_map_lookup_elem(&hash_map, &key);
}

*map_val = 123;
Expand All @@ -585,7 +585,7 @@ int iter_err_too_permissive3(const void *ctx)
MY_PID_GUARD();

bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
map_val = bpf_map_lookup_elem(&hash_map, &key);
found = true;
}

Expand All @@ -606,7 +606,7 @@ int iter_tricky_but_fine(const void *ctx)
MY_PID_GUARD();

bpf_repeat(1000000) {
map_val = bpf_map_lookup_elem(&arr_map, &key);
map_val = bpf_map_lookup_elem(&hash_map, &key);
if (map_val) {
found = true;
break;
Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/bpf/progs/map_kptr_fail.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ int reject_indirect_global_func_access(struct __sk_buff *ctx)
}

SEC("?tc")
__failure __msg("Unreleased reference id=5 alloc_insn=")
__failure __msg("Unreleased reference id=4 alloc_insn=")
int kptr_xchg_ref_state(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/bpf/progs/test_global_func10.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ __noinline int foo(const struct Big *big)
}

SEC("cgroup_skb/ingress")
__failure __msg("invalid indirect access to stack")
__failure __msg("invalid read from stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
Expand Down
5 changes: 3 additions & 2 deletions tools/testing/selftests/bpf/progs/uninit_stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ __naked int helper_uninit_to_misc(void *ctx)
r1 = r10; \
r1 += -128; \
r2 = 32; \
call %[bpf_trace_printk]; \
r3 = 0; \
call %[bpf_probe_read_user]; \
/* Call to dummy() forces print_verifier_state(..., true), \
* thus showing the stack state, matched by __msg(). \
*/ \
Expand All @@ -79,7 +80,7 @@ __naked int helper_uninit_to_misc(void *ctx)
exit; \
"
:
: __imm(bpf_trace_printk),
: __imm(bpf_probe_read_user),
__imm(dummy)
: __clobber_all);
}
Expand Down
Loading
Loading