@@ -187,6 +187,11 @@ struct bpf_verifier_stack_elem {
187
187
POISON_POINTER_DELTA))
188
188
#define BPF_MAP_PTR (X ) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189
189
190
+ /* forward declarations */
191
+ static void release_reg_references (struct bpf_verifier_env * env ,
192
+ struct bpf_func_state * state ,
193
+ int ref_obj_id );
194
+
190
195
static bool bpf_map_ptr_poisoned (const struct bpf_insn_aux_data * aux )
191
196
{
192
197
return BPF_MAP_PTR (aux -> map_ptr_state ) == BPF_MAP_PTR_POISON ;
@@ -523,6 +528,11 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
523
528
func_id == BPF_FUNC_skc_to_tcp_request_sock ;
524
529
}
525
530
531
+ static inline bool is_dynptr_ref_function (enum bpf_func_id func_id )
532
+ {
533
+ return func_id == BPF_FUNC_dynptr_data ;
534
+ }
535
+
526
536
static bool is_cmpxchg_insn (const struct bpf_insn * insn )
527
537
{
528
538
return BPF_CLASS (insn -> code ) == BPF_STX &&
@@ -700,7 +710,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
700
710
{
701
711
struct bpf_func_state * state = cur_func (env );
702
712
enum bpf_dynptr_type type ;
703
- int spi , i , err ;
713
+ int spi , id , i , err ;
704
714
705
715
spi = get_spi (reg -> off );
706
716
@@ -721,19 +731,36 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
721
731
722
732
state -> stack [spi ].spilled_ptr .dynptr_first_slot = true;
723
733
734
+ /* Generate an id for the dynptr if the dynptr type can be
735
+ * acquired/released.
736
+ *
737
+ * This is used to associated data slices with dynptrs, so that
738
+ * if a dynptr gets invalidated, its data slices will also be
739
+ * invalidated.
740
+ */
741
+ if (dynptr_type_refcounted (state , spi )) {
742
+ id = ++ env -> id_gen ;
743
+ state -> stack [spi ].spilled_ptr .id = id ;
744
+ state -> stack [spi - 1 ].spilled_ptr .id = id ;
745
+ }
746
+
724
747
return 0 ;
725
748
}
726
749
727
750
static int unmark_stack_slots_dynptr (struct bpf_verifier_env * env , struct bpf_reg_state * reg )
728
751
{
752
+ struct bpf_verifier_state * vstate = env -> cur_state ;
729
753
struct bpf_func_state * state = func (env , reg );
754
+ bool refcounted ;
730
755
int spi , i ;
731
756
732
757
spi = get_spi (reg -> off );
733
758
734
759
if (!check_spi_bounds (state , spi , BPF_DYNPTR_NR_SLOTS ))
735
760
return - EINVAL ;
736
761
762
+ refcounted = dynptr_type_refcounted (state , spi );
763
+
737
764
for (i = 0 ; i < BPF_REG_SIZE ; i ++ ) {
738
765
state -> stack [spi ].slot_type [i ] = STACK_INVALID ;
739
766
state -> stack [spi - 1 ].slot_type [i ] = STACK_INVALID ;
@@ -743,6 +770,15 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
743
770
state -> stack [spi ].spilled_ptr .dynptr_first_slot = 0 ;
744
771
state -> stack [spi - 1 ].spilled_ptr .dynptr_type = 0 ;
745
772
773
+ /* Invalidate any slices associated with this dynptr */
774
+ if (refcounted ) {
775
+ for (i = 0 ; i <= vstate -> curframe ; i ++ )
776
+ release_reg_references (env , vstate -> frame [i ],
777
+ state -> stack [spi ].spilled_ptr .id );
778
+ state -> stack [spi ].spilled_ptr .id = 0 ;
779
+ state -> stack [spi - 1 ].spilled_ptr .id = 0 ;
780
+ }
781
+
746
782
return 0 ;
747
783
}
748
784
@@ -780,6 +816,19 @@ static bool check_dynptr_init(struct bpf_verifier_env *env, struct bpf_reg_state
780
816
return state -> stack [spi ].spilled_ptr .dynptr_type == expected_type ;
781
817
}
782
818
819
+ static bool is_ref_obj_id_dynptr (struct bpf_func_state * state , u32 id )
820
+ {
821
+ int i ;
822
+
823
+ for (i = 0 ; i < state -> allocated_stack / BPF_REG_SIZE ; i ++ ) {
824
+ if (state -> stack [i ].slot_type [0 ] == STACK_DYNPTR &&
825
+ state -> stack [i ].spilled_ptr .id == id )
826
+ return true;
827
+ }
828
+
829
+ return false;
830
+ }
831
+
783
832
static bool stack_access_into_dynptr (struct bpf_func_state * state , int spi , int size )
784
833
{
785
834
int nr_slots , i ;
@@ -5585,6 +5634,14 @@ static bool id_in_stack_slot(enum bpf_arg_type arg_type)
5585
5634
return arg_type_is_dynptr (arg_type );
5586
5635
}
5587
5636
5637
+ static inline u32 stack_slot_get_id (struct bpf_verifier_env * env , struct bpf_reg_state * reg )
5638
+ {
5639
+ struct bpf_func_state * state = func (env , reg );
5640
+ int spi = get_spi (reg -> off );
5641
+
5642
+ return state -> stack [spi ].spilled_ptr .id ;
5643
+ }
5644
+
5588
5645
static int check_func_arg (struct bpf_verifier_env * env , u32 arg ,
5589
5646
struct bpf_call_arg_meta * meta ,
5590
5647
const struct bpf_func_proto * fn )
@@ -7114,6 +7171,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7114
7171
regs [BPF_REG_0 ].id = id ;
7115
7172
/* For release_reference() */
7116
7173
regs [BPF_REG_0 ].ref_obj_id = id ;
7174
+ } else if (is_dynptr_ref_function (func_id )) {
7175
+ /* Retrieve the id of the associated dynptr. */
7176
+ int id = stack_slot_get_id (env , & regs [BPF_REG_1 ]);
7177
+
7178
+ if (id < 0 )
7179
+ return id ;
7180
+ regs [BPF_REG_0 ].id = id ;
7181
+ regs [BPF_REG_0 ].ref_obj_id = id ;
7117
7182
}
7118
7183
7119
7184
do_refine_retval_range (regs , fn -> ret_type , func_id , & meta );
@@ -9545,7 +9610,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9545
9610
u32 id = regs [regno ].id ;
9546
9611
int i ;
9547
9612
9548
- if (ref_obj_id && ref_obj_id == id && is_null )
9613
+ if (ref_obj_id && ref_obj_id == id && is_null &&
9614
+ !is_ref_obj_id_dynptr (state , id ))
9549
9615
/* regs[regno] is in the " == NULL" branch.
9550
9616
* No one could have freed the reference state before
9551
9617
* doing the NULL check.
0 commit comments