33
33
#include <linux/extable.h>
34
34
#include <linux/log2.h>
35
35
#include <linux/bpf_verifier.h>
36
+ #include <linux/nodemask.h>
36
37
37
38
#include <asm/barrier.h>
38
39
#include <asm/unaligned.h>
@@ -815,46 +816,66 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
815
816
* allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
816
817
* to host BPF programs.
817
818
*/
818
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
819
- #define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
820
- #else
821
- #define BPF_PROG_PACK_SIZE PAGE_SIZE
822
- #endif
823
819
#define BPF_PROG_CHUNK_SHIFT 6
824
820
#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
825
821
#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
826
- #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
827
822
828
823
struct bpf_prog_pack {
829
824
struct list_head list ;
830
825
void * ptr ;
831
826
unsigned long bitmap [];
832
827
};
833
828
834
- #define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
835
829
#define BPF_PROG_SIZE_TO_NBITS (size ) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
836
830
831
+ static size_t bpf_prog_pack_size = -1 ;
832
+
833
+ static int bpf_prog_chunk_count (void )
834
+ {
835
+ WARN_ON_ONCE (bpf_prog_pack_size == -1 );
836
+ return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE ;
837
+ }
838
+
837
839
static DEFINE_MUTEX (pack_mutex );
838
840
static LIST_HEAD (pack_list );
839
841
842
+ static size_t select_bpf_prog_pack_size (void )
843
+ {
844
+ size_t size ;
845
+ void * ptr ;
846
+
847
+ size = PMD_SIZE * num_online_nodes ();
848
+ ptr = module_alloc (size );
849
+
850
+ /* Test whether we can get huge pages. If not just use PAGE_SIZE
851
+ * packs.
852
+ */
853
+ if (!ptr || !is_vm_area_hugepages (ptr ))
854
+ size = PAGE_SIZE ;
855
+
856
+ vfree (ptr );
857
+ return size ;
858
+ }
859
+
840
860
static struct bpf_prog_pack * alloc_new_pack (void )
841
861
{
842
862
struct bpf_prog_pack * pack ;
843
863
844
- pack = kzalloc (sizeof (* pack ) + BITS_TO_BYTES (BPF_PROG_CHUNK_COUNT ), GFP_KERNEL );
864
+ pack = kzalloc (struct_size (pack , bitmap , BITS_TO_LONGS (bpf_prog_chunk_count ())),
865
+ GFP_KERNEL );
845
866
if (!pack )
846
867
return NULL ;
847
- pack -> ptr = module_alloc (BPF_PROG_PACK_SIZE );
868
+ pack -> ptr = module_alloc (bpf_prog_pack_size );
848
869
if (!pack -> ptr ) {
849
870
kfree (pack );
850
871
return NULL ;
851
872
}
852
- bitmap_zero (pack -> bitmap , BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE );
873
+ bitmap_zero (pack -> bitmap , bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE );
853
874
list_add_tail (& pack -> list , & pack_list );
854
875
855
876
set_vm_flush_reset_perms (pack -> ptr );
856
- set_memory_ro ((unsigned long )pack -> ptr , BPF_PROG_PACK_SIZE / PAGE_SIZE );
857
- set_memory_x ((unsigned long )pack -> ptr , BPF_PROG_PACK_SIZE / PAGE_SIZE );
877
+ set_memory_ro ((unsigned long )pack -> ptr , bpf_prog_pack_size / PAGE_SIZE );
878
+ set_memory_x ((unsigned long )pack -> ptr , bpf_prog_pack_size / PAGE_SIZE );
858
879
return pack ;
859
880
}
860
881
@@ -865,21 +886,24 @@ static void *bpf_prog_pack_alloc(u32 size)
865
886
unsigned long pos ;
866
887
void * ptr = NULL ;
867
888
868
- if (size > BPF_PROG_MAX_PACK_PROG_SIZE ) {
889
+ mutex_lock (& pack_mutex );
890
+ if (bpf_prog_pack_size == -1 )
891
+ bpf_prog_pack_size = select_bpf_prog_pack_size ();
892
+
893
+ if (size > bpf_prog_pack_size ) {
869
894
size = round_up (size , PAGE_SIZE );
870
895
ptr = module_alloc (size );
871
896
if (ptr ) {
872
897
set_vm_flush_reset_perms (ptr );
873
898
set_memory_ro ((unsigned long )ptr , size / PAGE_SIZE );
874
899
set_memory_x ((unsigned long )ptr , size / PAGE_SIZE );
875
900
}
876
- return ptr ;
901
+ goto out ;
877
902
}
878
- mutex_lock (& pack_mutex );
879
903
list_for_each_entry (pack , & pack_list , list ) {
880
- pos = bitmap_find_next_zero_area (pack -> bitmap , BPF_PROG_CHUNK_COUNT , 0 ,
904
+ pos = bitmap_find_next_zero_area (pack -> bitmap , bpf_prog_chunk_count () , 0 ,
881
905
nbits , 0 );
882
- if (pos < BPF_PROG_CHUNK_COUNT )
906
+ if (pos < bpf_prog_chunk_count () )
883
907
goto found_free_area ;
884
908
}
885
909
@@ -905,13 +929,13 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
905
929
unsigned long pos ;
906
930
void * pack_ptr ;
907
931
908
- if (hdr -> size > BPF_PROG_MAX_PACK_PROG_SIZE ) {
932
+ mutex_lock (& pack_mutex );
933
+ if (hdr -> size > bpf_prog_pack_size ) {
909
934
module_memfree (hdr );
910
- return ;
935
+ goto out ;
911
936
}
912
937
913
- pack_ptr = (void * )((unsigned long )hdr & ~(BPF_PROG_PACK_SIZE - 1 ));
914
- mutex_lock (& pack_mutex );
938
+ pack_ptr = (void * )((unsigned long )hdr & ~(bpf_prog_pack_size - 1 ));
915
939
916
940
list_for_each_entry (tmp , & pack_list , list ) {
917
941
if (tmp -> ptr == pack_ptr ) {
@@ -927,8 +951,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
927
951
pos = ((unsigned long )hdr - (unsigned long )pack_ptr ) >> BPF_PROG_CHUNK_SHIFT ;
928
952
929
953
bitmap_clear (pack -> bitmap , pos , nbits );
930
- if (bitmap_find_next_zero_area (pack -> bitmap , BPF_PROG_CHUNK_COUNT , 0 ,
931
- BPF_PROG_CHUNK_COUNT , 0 ) == 0 ) {
954
+ if (bitmap_find_next_zero_area (pack -> bitmap , bpf_prog_chunk_count () , 0 ,
955
+ bpf_prog_chunk_count () , 0 ) == 0 ) {
932
956
list_del (& pack -> list );
933
957
module_memfree (pack -> ptr );
934
958
kfree (pack );
0 commit comments