Skip to content

Commit 6c3319d

Browse files
joannekoongNobody
authored and
Nobody
committed
bpf: Add dynptr data slices
This patch adds a new helper function void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len); which returns a pointer to the underlying data of a dynptr. *len* must be a statically known value. The bpf program may access the returned data slice as a normal buffer (eg can do direct reads and writes), since the verifier associates the length with the returned pointer, and enforces that no out of bounds accesses occur. This requires a few additions to the verifier. For every referenced-tracked dynptr that is initialized, we associate an id with it and attach any data slices to that id. When a release function is called on a dynptr (eg bpf_free), we invalidate all slices that correspond to that dynptr. This ensures the slice can't be used after its dynptr has been invalidated. Signed-off-by: Joanne Koong <[email protected]>
1 parent 10ca6ef commit 6c3319d

File tree

5 files changed

+122
-2
lines changed

5 files changed

+122
-2
lines changed

include/linux/bpf_verifier.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,8 @@ struct bpf_reg_state {
100100
* for the purpose of tracking that it's freed.
101101
* For PTR_TO_SOCKET this is used to share which pointers retain the
102102
* same reference to the socket, to determine proper reference freeing.
103+
* For stack slots that are dynptrs, this is used to track references to
104+
* the dynptr to enforce proper reference freeing.
103105
*/
104106
u32 id;
105107
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned

include/uapi/linux/bpf.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5191,6 +5191,17 @@ union bpf_attr {
51915191
* Return
51925192
* 0 on success, -EINVAL if *offset* + *len* exceeds the length
51935193
* of *dst*'s data or if *dst* is not writeable.
5194+
*
5195+
* void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
5196+
* Description
5197+
* Get a pointer to the underlying dynptr data.
5198+
*
5199+
* *len* must be a statically known value. The returned data slice
5200+
* is invalidated whenever the dynptr is invalidated.
5201+
* Return
5202+
* Pointer to the underlying dynptr data, NULL if the ptr is
5203+
* read-only, if the dynptr is invalid, or if the offset and length
5204+
* is out of bounds.
51945205
*/
51955206
#define __BPF_FUNC_MAPPER(FN) \
51965207
FN(unspec), \
@@ -5392,6 +5403,7 @@ union bpf_attr {
53925403
FN(free), \
53935404
FN(dynptr_read), \
53945405
FN(dynptr_write), \
5406+
FN(dynptr_data), \
53955407
/* */
53965408

53975409
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

kernel/bpf/helpers.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1412,6 +1412,32 @@ const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
14121412
.arg3_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
14131413
};
14141414

1415+
BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1416+
{
1417+
int err;
1418+
1419+
if (!ptr->data)
1420+
return 0;
1421+
1422+
err = bpf_dynptr_check_off_len(ptr, offset, len);
1423+
if (err)
1424+
return 0;
1425+
1426+
if (bpf_dynptr_is_rdonly(ptr))
1427+
return 0;
1428+
1429+
return (unsigned long)(ptr->data + ptr->offset + offset);
1430+
}
1431+
1432+
const struct bpf_func_proto bpf_dynptr_data_proto = {
1433+
.func = bpf_dynptr_data,
1434+
.gpl_only = false,
1435+
.ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL,
1436+
.arg1_type = ARG_PTR_TO_DYNPTR,
1437+
.arg2_type = ARG_ANYTHING,
1438+
.arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1439+
};
1440+
14151441
BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, u32, offset)
14161442
{
14171443
int err;
@@ -1570,6 +1596,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
15701596
return &bpf_dynptr_read_proto;
15711597
case BPF_FUNC_dynptr_write:
15721598
return &bpf_dynptr_write_proto;
1599+
case BPF_FUNC_dynptr_data:
1600+
return &bpf_dynptr_data_proto;
15731601
default:
15741602
break;
15751603
}

kernel/bpf/verifier.c

Lines changed: 68 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,11 @@ struct bpf_verifier_stack_elem {
187187
POISON_POINTER_DELTA))
188188
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189189

190+
/* forward declarations */
191+
static void release_reg_references(struct bpf_verifier_env *env,
192+
struct bpf_func_state *state,
193+
int ref_obj_id);
194+
190195
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
191196
{
192197
return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
@@ -523,6 +528,11 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
523528
func_id == BPF_FUNC_skc_to_tcp_request_sock;
524529
}
525530

531+
static inline bool is_dynptr_ref_function(enum bpf_func_id func_id)
532+
{
533+
return func_id == BPF_FUNC_dynptr_data;
534+
}
535+
526536
static bool is_cmpxchg_insn(const struct bpf_insn *insn)
527537
{
528538
return BPF_CLASS(insn->code) == BPF_STX &&
@@ -700,7 +710,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
700710
{
701711
struct bpf_func_state *state = cur_func(env);
702712
enum bpf_dynptr_type type;
703-
int spi, i, err;
713+
int spi, id, i, err;
704714

705715
spi = get_spi(reg->off);
706716

@@ -721,19 +731,36 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
721731

722732
state->stack[spi].spilled_ptr.dynptr_first_slot = true;
723733

734+
/* Generate an id for the dynptr if the dynptr type can be
735+
* acquired/released.
736+
*
737+
* This is used to associated data slices with dynptrs, so that
738+
* if a dynptr gets invalidated, its data slices will also be
739+
* invalidated.
740+
*/
741+
if (dynptr_type_refcounted(state, spi)) {
742+
id = ++env->id_gen;
743+
state->stack[spi].spilled_ptr.id = id;
744+
state->stack[spi - 1].spilled_ptr.id = id;
745+
}
746+
724747
return 0;
725748
}
726749

727750
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
728751
{
752+
struct bpf_verifier_state *vstate = env->cur_state;
729753
struct bpf_func_state *state = func(env, reg);
754+
bool refcounted;
730755
int spi, i;
731756

732757
spi = get_spi(reg->off);
733758

734759
if (!check_spi_bounds(state, spi, BPF_DYNPTR_NR_SLOTS))
735760
return -EINVAL;
736761

762+
refcounted = dynptr_type_refcounted(state, spi);
763+
737764
for (i = 0; i < BPF_REG_SIZE; i++) {
738765
state->stack[spi].slot_type[i] = STACK_INVALID;
739766
state->stack[spi - 1].slot_type[i] = STACK_INVALID;
@@ -743,6 +770,15 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
743770
state->stack[spi].spilled_ptr.dynptr_first_slot = 0;
744771
state->stack[spi - 1].spilled_ptr.dynptr_type = 0;
745772

773+
/* Invalidate any slices associated with this dynptr */
774+
if (refcounted) {
775+
for (i = 0; i <= vstate->curframe; i++)
776+
release_reg_references(env, vstate->frame[i],
777+
state->stack[spi].spilled_ptr.id);
778+
state->stack[spi].spilled_ptr.id = 0;
779+
state->stack[spi - 1].spilled_ptr.id = 0;
780+
}
781+
746782
return 0;
747783
}
748784

@@ -780,6 +816,19 @@ static bool check_dynptr_init(struct bpf_verifier_env *env, struct bpf_reg_state
780816
return state->stack[spi].spilled_ptr.dynptr_type == expected_type;
781817
}
782818

819+
static bool is_ref_obj_id_dynptr(struct bpf_func_state *state, u32 id)
820+
{
821+
int i;
822+
823+
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
824+
if (state->stack[i].slot_type[0] == STACK_DYNPTR &&
825+
state->stack[i].spilled_ptr.id == id)
826+
return true;
827+
}
828+
829+
return false;
830+
}
831+
783832
static bool stack_access_into_dynptr(struct bpf_func_state *state, int spi, int size)
784833
{
785834
int nr_slots, i;
@@ -5585,6 +5634,14 @@ static bool id_in_stack_slot(enum bpf_arg_type arg_type)
55855634
return arg_type_is_dynptr(arg_type);
55865635
}
55875636

5637+
static inline u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
5638+
{
5639+
struct bpf_func_state *state = func(env, reg);
5640+
int spi = get_spi(reg->off);
5641+
5642+
return state->stack[spi].spilled_ptr.id;
5643+
}
5644+
55885645
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
55895646
struct bpf_call_arg_meta *meta,
55905647
const struct bpf_func_proto *fn)
@@ -7114,6 +7171,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
71147171
regs[BPF_REG_0].id = id;
71157172
/* For release_reference() */
71167173
regs[BPF_REG_0].ref_obj_id = id;
7174+
} else if (is_dynptr_ref_function(func_id)) {
7175+
/* Retrieve the id of the associated dynptr. */
7176+
int id = stack_slot_get_id(env, &regs[BPF_REG_1]);
7177+
7178+
if (id < 0)
7179+
return id;
7180+
regs[BPF_REG_0].id = id;
7181+
regs[BPF_REG_0].ref_obj_id = id;
71177182
}
71187183

71197184
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
@@ -9545,7 +9610,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
95459610
u32 id = regs[regno].id;
95469611
int i;
95479612

9548-
if (ref_obj_id && ref_obj_id == id && is_null)
9613+
if (ref_obj_id && ref_obj_id == id && is_null &&
9614+
!is_ref_obj_id_dynptr(state, id))
95499615
/* regs[regno] is in the " == NULL" branch.
95509616
* No one could have freed the reference state before
95519617
* doing the NULL check.

tools/include/uapi/linux/bpf.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5191,6 +5191,17 @@ union bpf_attr {
51915191
* Return
51925192
* 0 on success, -EINVAL if *offset* + *len* exceeds the length
51935193
* of *dst*'s data or if *dst* is not writeable.
5194+
*
5195+
* void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
5196+
* Description
5197+
* Get a pointer to the underlying dynptr data.
5198+
*
5199+
* *len* must be a statically known value. The returned data slice
5200+
* is invalidated whenever the dynptr is invalidated.
5201+
* Return
5202+
* Pointer to the underlying dynptr data, NULL if the ptr is
5203+
* read-only, if the dynptr is invalid, or if the offset and length
5204+
* is out of bounds.
51945205
*/
51955206
#define __BPF_FUNC_MAPPER(FN) \
51965207
FN(unspec), \
@@ -5392,6 +5403,7 @@ union bpf_attr {
53925403
FN(free), \
53935404
FN(dynptr_read), \
53945405
FN(dynptr_write), \
5406+
FN(dynptr_data), \
53955407
/* */
53965408

53975409
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

0 commit comments

Comments
 (0)