49
49
#include <linux/page_owner.h>
50
50
#include "internal.h"
51
51
#include "hugetlb_vmemmap.h"
52
+ #include "hugetlb_cma.h"
52
53
#include <linux/page-isolation.h>
53
54
54
55
int hugetlb_max_hstate __read_mostly ;
55
56
unsigned int default_hstate_idx ;
56
57
struct hstate hstates [HUGE_MAX_HSTATE ];
57
58
58
- #ifdef CONFIG_CMA
59
- static struct cma * hugetlb_cma [MAX_NUMNODES ];
60
- static unsigned long hugetlb_cma_size_in_node [MAX_NUMNODES ] __initdata ;
61
- #endif
62
- static bool hugetlb_cma_only ;
63
- static unsigned long hugetlb_cma_size __initdata ;
64
-
65
59
__initdata struct list_head huge_boot_pages [MAX_NUMNODES ];
66
60
static unsigned long hstate_boot_nrinvalid [HUGE_MAX_HSTATE ] __initdata ;
67
61
@@ -128,14 +122,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
128
122
129
123
static void hugetlb_free_folio (struct folio * folio )
130
124
{
131
- #ifdef CONFIG_CMA
132
- int nid = folio_nid (folio );
133
-
134
125
if (folio_test_hugetlb_cma (folio )) {
135
- WARN_ON_ONCE (! cma_free_folio ( hugetlb_cma [ nid ], folio ) );
126
+ hugetlb_cma_free_folio ( folio );
136
127
return ;
137
128
}
138
- #endif
129
+
139
130
folio_put (folio );
140
131
}
141
132
@@ -1492,31 +1483,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1492
1483
if (nid == NUMA_NO_NODE )
1493
1484
nid = numa_mem_id ();
1494
1485
retry :
1495
- folio = NULL ;
1496
- #ifdef CONFIG_CMA
1497
- {
1498
- int node ;
1499
-
1500
- if (hugetlb_cma [nid ])
1501
- folio = cma_alloc_folio (hugetlb_cma [nid ], order , gfp_mask );
1502
-
1503
- if (!folio && !(gfp_mask & __GFP_THISNODE )) {
1504
- for_each_node_mask (node , * nodemask ) {
1505
- if (node == nid || !hugetlb_cma [node ])
1506
- continue ;
1507
-
1508
- folio = cma_alloc_folio (hugetlb_cma [node ], order , gfp_mask );
1509
- if (folio )
1510
- break ;
1511
- }
1512
- }
1513
-
1514
- if (folio )
1515
- folio_set_hugetlb_cma (folio );
1516
- }
1517
- #endif
1486
+ folio = hugetlb_cma_alloc_folio (h , gfp_mask , nid , nodemask );
1518
1487
if (!folio ) {
1519
- if (hugetlb_cma_only )
1488
+ if (hugetlb_cma_exclusive_alloc () )
1520
1489
return NULL ;
1521
1490
1522
1491
folio = folio_alloc_gigantic (order , gfp_mask , nid , nodemask );
@@ -3191,47 +3160,14 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
3191
3160
return ERR_PTR (- ENOSPC );
3192
3161
}
3193
3162
3194
- static bool __init hugetlb_early_cma (struct hstate * h )
3195
- {
3196
- if (arch_has_huge_bootmem_alloc ())
3197
- return false;
3198
-
3199
- return (hstate_is_gigantic (h ) && hugetlb_cma_only );
3200
- }
3201
-
3202
3163
static __init void * alloc_bootmem (struct hstate * h , int nid , bool node_exact )
3203
3164
{
3204
3165
struct huge_bootmem_page * m ;
3205
- unsigned long flags ;
3206
- struct cma * cma ;
3207
3166
int listnode = nid ;
3208
3167
3209
- #ifdef CONFIG_CMA
3210
- if (hugetlb_early_cma (h )) {
3211
- flags = HUGE_BOOTMEM_CMA ;
3212
- cma = hugetlb_cma [nid ];
3213
- m = cma_reserve_early (cma , huge_page_size (h ));
3214
- if (!m ) {
3215
- int node ;
3216
-
3217
- if (node_exact )
3218
- return NULL ;
3219
- for_each_online_node (node ) {
3220
- cma = hugetlb_cma [node ];
3221
- if (!cma || node == nid )
3222
- continue ;
3223
- m = cma_reserve_early (cma , huge_page_size (h ));
3224
- if (m ) {
3225
- listnode = node ;
3226
- break ;
3227
- }
3228
- }
3229
- }
3230
- } else
3231
- #endif
3232
- {
3233
- flags = 0 ;
3234
- cma = NULL ;
3168
+ if (hugetlb_early_cma (h ))
3169
+ m = hugetlb_cma_alloc_bootmem (h , & listnode , node_exact );
3170
+ else {
3235
3171
if (node_exact )
3236
3172
m = memblock_alloc_exact_nid_raw (huge_page_size (h ),
3237
3173
huge_page_size (h ), 0 ,
@@ -3250,6 +3186,11 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3250
3186
if (m )
3251
3187
listnode = early_pfn_to_nid (PHYS_PFN (virt_to_phys (m )));
3252
3188
}
3189
+
3190
+ if (m ) {
3191
+ m -> flags = 0 ;
3192
+ m -> cma = NULL ;
3193
+ }
3253
3194
}
3254
3195
3255
3196
if (m ) {
@@ -3264,8 +3205,6 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3264
3205
INIT_LIST_HEAD (& m -> list );
3265
3206
list_add (& m -> list , & huge_boot_pages [listnode ]);
3266
3207
m -> hstate = h ;
3267
- m -> flags = flags ;
3268
- m -> cma = cma ;
3269
3208
}
3270
3209
3271
3210
return m ;
@@ -3715,7 +3654,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3715
3654
* Skip gigantic hugepages allocation if early CMA
3716
3655
* reservations are not available.
3717
3656
*/
3718
- if (hstate_is_gigantic (h ) && hugetlb_cma_size && !hugetlb_early_cma (h )) {
3657
+ if (hstate_is_gigantic (h ) && hugetlb_cma_total_size () &&
3658
+ !hugetlb_early_cma (h )) {
3719
3659
pr_warn_once ("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n" );
3720
3660
return ;
3721
3661
}
@@ -3752,7 +3692,7 @@ static void __init hugetlb_init_hstates(void)
3752
3692
*/
3753
3693
if (hstate_is_gigantic (h ) && !gigantic_page_runtime_supported ())
3754
3694
continue ;
3755
- if (hugetlb_cma_size && h -> order <= HUGETLB_PAGE_ORDER )
3695
+ if (hugetlb_cma_total_size () && h -> order <= HUGETLB_PAGE_ORDER )
3756
3696
continue ;
3757
3697
for_each_hstate (h2 ) {
3758
3698
if (h2 == h )
@@ -4654,14 +4594,6 @@ static void hugetlb_register_all_nodes(void) { }
4654
4594
4655
4595
#endif
4656
4596
4657
- #ifdef CONFIG_CMA
4658
- static void __init hugetlb_cma_check (void );
4659
- #else
4660
- static inline __init void hugetlb_cma_check (void )
4661
- {
4662
- }
4663
- #endif
4664
-
4665
4597
static void __init hugetlb_sysfs_init (void )
4666
4598
{
4667
4599
struct hstate * h ;
@@ -4845,8 +4777,7 @@ static __init void hugetlb_parse_params(void)
4845
4777
hcp -> setup (hcp -> val );
4846
4778
}
4847
4779
4848
- if (!hugetlb_cma_size )
4849
- hugetlb_cma_only = false;
4780
+ hugetlb_cma_validate_params ();
4850
4781
}
4851
4782
4852
4783
/*
@@ -7916,169 +7847,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7916
7847
hugetlb_unshare_pmds (vma , ALIGN (vma -> vm_start , PUD_SIZE ),
7917
7848
ALIGN_DOWN (vma -> vm_end , PUD_SIZE ));
7918
7849
}
7919
-
7920
- #ifdef CONFIG_CMA
7921
- static bool cma_reserve_called __initdata ;
7922
-
7923
- static int __init cmdline_parse_hugetlb_cma (char * p )
7924
- {
7925
- int nid , count = 0 ;
7926
- unsigned long tmp ;
7927
- char * s = p ;
7928
-
7929
- while (* s ) {
7930
- if (sscanf (s , "%lu%n" , & tmp , & count ) != 1 )
7931
- break ;
7932
-
7933
- if (s [count ] == ':' ) {
7934
- if (tmp >= MAX_NUMNODES )
7935
- break ;
7936
- nid = array_index_nospec (tmp , MAX_NUMNODES );
7937
-
7938
- s += count + 1 ;
7939
- tmp = memparse (s , & s );
7940
- hugetlb_cma_size_in_node [nid ] = tmp ;
7941
- hugetlb_cma_size += tmp ;
7942
-
7943
- /*
7944
- * Skip the separator if have one, otherwise
7945
- * break the parsing.
7946
- */
7947
- if (* s == ',' )
7948
- s ++ ;
7949
- else
7950
- break ;
7951
- } else {
7952
- hugetlb_cma_size = memparse (p , & p );
7953
- break ;
7954
- }
7955
- }
7956
-
7957
- return 0 ;
7958
- }
7959
-
7960
- early_param ("hugetlb_cma" , cmdline_parse_hugetlb_cma );
7961
-
7962
- static int __init cmdline_parse_hugetlb_cma_only (char * p )
7963
- {
7964
- return kstrtobool (p , & hugetlb_cma_only );
7965
- }
7966
-
7967
- early_param ("hugetlb_cma_only" , cmdline_parse_hugetlb_cma_only );
7968
-
7969
- void __init hugetlb_cma_reserve (int order )
7970
- {
7971
- unsigned long size , reserved , per_node ;
7972
- bool node_specific_cma_alloc = false;
7973
- int nid ;
7974
-
7975
- /*
7976
- * HugeTLB CMA reservation is required for gigantic
7977
- * huge pages which could not be allocated via the
7978
- * page allocator. Just warn if there is any change
7979
- * breaking this assumption.
7980
- */
7981
- VM_WARN_ON (order <= MAX_PAGE_ORDER );
7982
- cma_reserve_called = true;
7983
-
7984
- if (!hugetlb_cma_size )
7985
- return ;
7986
-
7987
- for (nid = 0 ; nid < MAX_NUMNODES ; nid ++ ) {
7988
- if (hugetlb_cma_size_in_node [nid ] == 0 )
7989
- continue ;
7990
-
7991
- if (!node_online (nid )) {
7992
- pr_warn ("hugetlb_cma: invalid node %d specified\n" , nid );
7993
- hugetlb_cma_size -= hugetlb_cma_size_in_node [nid ];
7994
- hugetlb_cma_size_in_node [nid ] = 0 ;
7995
- continue ;
7996
- }
7997
-
7998
- if (hugetlb_cma_size_in_node [nid ] < (PAGE_SIZE << order )) {
7999
- pr_warn ("hugetlb_cma: cma area of node %d should be at least %lu MiB\n" ,
8000
- nid , (PAGE_SIZE << order ) / SZ_1M );
8001
- hugetlb_cma_size -= hugetlb_cma_size_in_node [nid ];
8002
- hugetlb_cma_size_in_node [nid ] = 0 ;
8003
- } else {
8004
- node_specific_cma_alloc = true;
8005
- }
8006
- }
8007
-
8008
- /* Validate the CMA size again in case some invalid nodes specified. */
8009
- if (!hugetlb_cma_size )
8010
- return ;
8011
-
8012
- if (hugetlb_cma_size < (PAGE_SIZE << order )) {
8013
- pr_warn ("hugetlb_cma: cma area should be at least %lu MiB\n" ,
8014
- (PAGE_SIZE << order ) / SZ_1M );
8015
- hugetlb_cma_size = 0 ;
8016
- return ;
8017
- }
8018
-
8019
- if (!node_specific_cma_alloc ) {
8020
- /*
8021
- * If 3 GB area is requested on a machine with 4 numa nodes,
8022
- * let's allocate 1 GB on first three nodes and ignore the last one.
8023
- */
8024
- per_node = DIV_ROUND_UP (hugetlb_cma_size , nr_online_nodes );
8025
- pr_info ("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n" ,
8026
- hugetlb_cma_size / SZ_1M , per_node / SZ_1M );
8027
- }
8028
-
8029
- reserved = 0 ;
8030
- for_each_online_node (nid ) {
8031
- int res ;
8032
- char name [CMA_MAX_NAME ];
8033
-
8034
- if (node_specific_cma_alloc ) {
8035
- if (hugetlb_cma_size_in_node [nid ] == 0 )
8036
- continue ;
8037
-
8038
- size = hugetlb_cma_size_in_node [nid ];
8039
- } else {
8040
- size = min (per_node , hugetlb_cma_size - reserved );
8041
- }
8042
-
8043
- size = round_up (size , PAGE_SIZE << order );
8044
-
8045
- snprintf (name , sizeof (name ), "hugetlb%d" , nid );
8046
- /*
8047
- * Note that 'order per bit' is based on smallest size that
8048
- * may be returned to CMA allocator in the case of
8049
- * huge page demotion.
8050
- */
8051
- res = cma_declare_contiguous_multi (size , PAGE_SIZE << order ,
8052
- HUGETLB_PAGE_ORDER , name ,
8053
- & hugetlb_cma [nid ], nid );
8054
- if (res ) {
8055
- pr_warn ("hugetlb_cma: reservation failed: err %d, node %d" ,
8056
- res , nid );
8057
- continue ;
8058
- }
8059
-
8060
- reserved += size ;
8061
- pr_info ("hugetlb_cma: reserved %lu MiB on node %d\n" ,
8062
- size / SZ_1M , nid );
8063
-
8064
- if (reserved >= hugetlb_cma_size )
8065
- break ;
8066
- }
8067
-
8068
- if (!reserved )
8069
- /*
8070
- * hugetlb_cma_size is used to determine if allocations from
8071
- * cma are possible. Set to zero if no cma regions are set up.
8072
- */
8073
- hugetlb_cma_size = 0 ;
8074
- }
8075
-
8076
- static void __init hugetlb_cma_check (void )
8077
- {
8078
- if (!hugetlb_cma_size || cma_reserve_called )
8079
- return ;
8080
-
8081
- pr_warn ("hugetlb_cma: the option isn't supported by current arch\n" );
8082
- }
8083
-
8084
- #endif /* CONFIG_CMA */
0 commit comments