Skip to content

Commit fa0d976

Browse files
dverbeirkernel-patches-bot
authored andcommitted
bpf: zero-fill re-used per-cpu map element
Zero-fill element values for all other cpus than current, just as when not using prealloc. This is the only way the bpf program can ensure known initial values for all cpus ('onallcpus' cannot be set when coming from the bpf program). The scenario is: bpf program inserts some elements in a per-cpu map, then deletes some (or userspace does). When later adding new elements using bpf_map_update_elem(), the bpf program can only set the value of the new elements for the current cpu. When prealloc is enabled, previously deleted elements are re-used. Without the fix, values for other cpus remain whatever they were when the re-used entry was previously freed. Fixes: 6c90598 ("bpf: pre-allocate hash map elements") Acked-by: Matthieu Baerts <[email protected]> Signed-off-by: David Verbeiren <[email protected]>
1 parent 68ed5ea commit fa0d976

File tree

1 file changed

+28
-2
lines changed

1 file changed

+28
-2
lines changed

kernel/bpf/hashtab.c

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
821821
}
822822
}
823823

824+
static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
825+
void *value, bool onallcpus)
826+
{
827+
/* When using prealloc and not setting the initial value on all cpus,
828+
* zero-fill element values for other cpus (just as what happens when
829+
* not using prealloc). Otherwise, bpf program has no way to ensure
830+
* known initial values for cpus other than current one
831+
* (onallcpus=false always when coming from bpf prog).
832+
*/
833+
if (htab_is_prealloc(htab) && !onallcpus) {
834+
u32 size = round_up(htab->map.value_size, 8);
835+
int current_cpu = raw_smp_processor_id();
836+
int cpu;
837+
838+
for_each_possible_cpu(cpu) {
839+
if (cpu == current_cpu)
840+
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
841+
size);
842+
else
843+
memset(per_cpu_ptr(pptr, cpu), 0, size);
844+
}
845+
} else {
846+
pcpu_copy_value(htab, pptr, value, onallcpus);
847+
}
848+
}
849+
824850
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
825851
{
826852
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
@@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
891917
}
892918
}
893919

894-
pcpu_copy_value(htab, pptr, value, onallcpus);
920+
pcpu_init_value(htab, pptr, value, onallcpus);
895921

896922
if (!prealloc)
897923
htab_elem_set_ptr(l_new, key_size, pptr);
@@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
11831209
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
11841210
value, onallcpus);
11851211
} else {
1186-
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
1212+
pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
11871213
value, onallcpus);
11881214
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
11891215
l_new = NULL;

0 commit comments

Comments
 (0)