Skip to content

Commit a3ce685

Browse files
Alexei Starovoitovborkmann
Alexei Starovoitov
authored andcommitted
bpf: fix precision tracking
When equivalent state is found the current state needs to propagate precision marks. Otherwise the verifier will prune the search incorrectly. There is a price for correctness: before before broken fixed cnst spill precise precise bpf_lb-DLB_L3.o 1923 8128 1863 1898 bpf_lb-DLB_L4.o 3077 6707 2468 2666 bpf_lb-DUNKNOWN.o 1062 1062 544 544 bpf_lxc-DDROP_ALL.o 166729 380712 22629 36823 bpf_lxc-DUNKNOWN.o 174607 440652 28805 45325 bpf_netdev.o 8407 31904 6801 7002 bpf_overlay.o 5420 23569 4754 4858 bpf_lxc_jit.o 39389 359445 50925 69631 Overall precision tracking is still very effective. Fixes: b5dc016 ("bpf: precise scalar_value tracking") Reported-by: Lawrence Brakmo <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Tested-by: Lawrence Brakmo <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
1 parent 8daed76 commit a3ce685

File tree

1 file changed

+107
-14
lines changed

1 file changed

+107
-14
lines changed

kernel/bpf/verifier.c

Lines changed: 107 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1659,35 +1659,62 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
16591659
}
16601660
}
16611661

1662-
static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1662+
static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1663+
int spi)
16631664
{
16641665
struct bpf_verifier_state *st = env->cur_state;
16651666
int first_idx = st->first_insn_idx;
16661667
int last_idx = env->insn_idx;
16671668
struct bpf_func_state *func;
16681669
struct bpf_reg_state *reg;
1669-
u32 reg_mask = 1u << regno;
1670-
u64 stack_mask = 0;
1670+
u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1671+
u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
16711672
bool skip_first = true;
1673+
bool new_marks = false;
16721674
int i, err;
16731675

16741676
if (!env->allow_ptr_leaks)
16751677
/* backtracking is root only for now */
16761678
return 0;
16771679

16781680
func = st->frame[st->curframe];
1679-
reg = &func->regs[regno];
1680-
if (reg->type != SCALAR_VALUE) {
1681-
WARN_ONCE(1, "backtracing misuse");
1682-
return -EFAULT;
1681+
if (regno >= 0) {
1682+
reg = &func->regs[regno];
1683+
if (reg->type != SCALAR_VALUE) {
1684+
WARN_ONCE(1, "backtracing misuse");
1685+
return -EFAULT;
1686+
}
1687+
if (!reg->precise)
1688+
new_marks = true;
1689+
else
1690+
reg_mask = 0;
1691+
reg->precise = true;
16831692
}
1684-
if (reg->precise)
1685-
return 0;
1686-
func->regs[regno].precise = true;
16871693

1694+
while (spi >= 0) {
1695+
if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1696+
stack_mask = 0;
1697+
break;
1698+
}
1699+
reg = &func->stack[spi].spilled_ptr;
1700+
if (reg->type != SCALAR_VALUE) {
1701+
stack_mask = 0;
1702+
break;
1703+
}
1704+
if (!reg->precise)
1705+
new_marks = true;
1706+
else
1707+
stack_mask = 0;
1708+
reg->precise = true;
1709+
break;
1710+
}
1711+
1712+
if (!new_marks)
1713+
return 0;
1714+
if (!reg_mask && !stack_mask)
1715+
return 0;
16881716
for (;;) {
16891717
DECLARE_BITMAP(mask, 64);
1690-
bool new_marks = false;
16911718
u32 history = st->jmp_history_cnt;
16921719

16931720
if (env->log.level & BPF_LOG_LEVEL)
@@ -1730,12 +1757,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
17301757
if (!st)
17311758
break;
17321759

1760+
new_marks = false;
17331761
func = st->frame[st->curframe];
17341762
bitmap_from_u64(mask, reg_mask);
17351763
for_each_set_bit(i, mask, 32) {
17361764
reg = &func->regs[i];
1737-
if (reg->type != SCALAR_VALUE)
1765+
if (reg->type != SCALAR_VALUE) {
1766+
reg_mask &= ~(1u << i);
17381767
continue;
1768+
}
17391769
if (!reg->precise)
17401770
new_marks = true;
17411771
reg->precise = true;
@@ -1756,11 +1786,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
17561786
return -EFAULT;
17571787
}
17581788

1759-
if (func->stack[i].slot_type[0] != STACK_SPILL)
1789+
if (func->stack[i].slot_type[0] != STACK_SPILL) {
1790+
stack_mask &= ~(1ull << i);
17601791
continue;
1792+
}
17611793
reg = &func->stack[i].spilled_ptr;
1762-
if (reg->type != SCALAR_VALUE)
1794+
if (reg->type != SCALAR_VALUE) {
1795+
stack_mask &= ~(1ull << i);
17631796
continue;
1797+
}
17641798
if (!reg->precise)
17651799
new_marks = true;
17661800
reg->precise = true;
@@ -1772,6 +1806,8 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
17721806
reg_mask, stack_mask);
17731807
}
17741808

1809+
if (!reg_mask && !stack_mask)
1810+
break;
17751811
if (!new_marks)
17761812
break;
17771813

@@ -1781,6 +1817,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
17811817
return 0;
17821818
}
17831819

1820+
static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1821+
{
1822+
return __mark_chain_precision(env, regno, -1);
1823+
}
1824+
1825+
static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1826+
{
1827+
return __mark_chain_precision(env, -1, spi);
1828+
}
17841829

17851830
static bool is_spillable_regtype(enum bpf_reg_type type)
17861831
{
@@ -7111,6 +7156,46 @@ static int propagate_liveness(struct bpf_verifier_env *env,
71117156
return 0;
71127157
}
71137158

7159+
/* find precise scalars in the previous equivalent state and
7160+
* propagate them into the current state
7161+
*/
7162+
static int propagate_precision(struct bpf_verifier_env *env,
7163+
const struct bpf_verifier_state *old)
7164+
{
7165+
struct bpf_reg_state *state_reg;
7166+
struct bpf_func_state *state;
7167+
int i, err = 0;
7168+
7169+
state = old->frame[old->curframe];
7170+
state_reg = state->regs;
7171+
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7172+
if (state_reg->type != SCALAR_VALUE ||
7173+
!state_reg->precise)
7174+
continue;
7175+
if (env->log.level & BPF_LOG_LEVEL2)
7176+
verbose(env, "propagating r%d\n", i);
7177+
err = mark_chain_precision(env, i);
7178+
if (err < 0)
7179+
return err;
7180+
}
7181+
7182+
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7183+
if (state->stack[i].slot_type[0] != STACK_SPILL)
7184+
continue;
7185+
state_reg = &state->stack[i].spilled_ptr;
7186+
if (state_reg->type != SCALAR_VALUE ||
7187+
!state_reg->precise)
7188+
continue;
7189+
if (env->log.level & BPF_LOG_LEVEL2)
7190+
verbose(env, "propagating fp%d\n",
7191+
(-i - 1) * BPF_REG_SIZE);
7192+
err = mark_chain_precision_stack(env, i);
7193+
if (err < 0)
7194+
return err;
7195+
}
7196+
return 0;
7197+
}
7198+
71147199
static bool states_maybe_looping(struct bpf_verifier_state *old,
71157200
struct bpf_verifier_state *cur)
71167201
{
@@ -7203,6 +7288,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
72037288
* this state and will pop a new one.
72047289
*/
72057290
err = propagate_liveness(env, &sl->state, cur);
7291+
7292+
/* if previous state reached the exit with precision and
7293+
* current state is equivalent to it (except precsion marks)
7294+
* the precision needs to be propagated back in
7295+
* the current state.
7296+
*/
7297+
err = err ? : push_jmp_history(env, cur);
7298+
err = err ? : propagate_precision(env, &sl->state);
72067299
if (err)
72077300
return err;
72087301
return 1;

0 commit comments

Comments
 (0)