Skip to content

Commit 474fe91

Browse files
Frank van der Lindenakpm00
authored andcommitted
mm/hugetlb: move hugetlb CMA code in to its own file
hugetlb.c contained a number of CONFIG_CMA ifdefs, and the code inside them was large enough to merit being in its own file, so move it, cleaning up things a bit. Hide some direct variable access behind functions to accommodate the move. No functional change intended. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Frank van der Linden <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Dan Carpenter <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Joao Martins <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Madhavan Srinivasan <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Muchun Song <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Roman Gushchin (Cruise) <[email protected]> Cc: Usama Arif <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Yu Zhao <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent d2d7867 commit 474fe91

File tree

5 files changed

+354
-252
lines changed

5 files changed

+354
-252
lines changed

MAINTAINERS

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10708,6 +10708,8 @@ F: fs/hugetlbfs/
1070810708
F: include/linux/hugetlb.h
1070910709
F: include/trace/events/hugetlbfs.h
1071010710
F: mm/hugetlb.c
10711+
F: mm/hugetlb_cma.c
10712+
F: mm/hugetlb_cma.h
1071110713
F: mm/hugetlb_vmemmap.c
1071210714
F: mm/hugetlb_vmemmap.h
1071310715
F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c

mm/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,9 @@ obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o
7979
obj-$(CONFIG_ZSWAP) += zswap.o
8080
obj-$(CONFIG_HAS_DMA) += dmapool.o
8181
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
82+
ifdef CONFIG_CMA
83+
obj-$(CONFIG_HUGETLBFS) += hugetlb_cma.o
84+
endif
8285
obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o
8386
obj-$(CONFIG_NUMA) += mempolicy.o
8487
obj-$(CONFIG_SPARSEMEM) += sparse.o

mm/hugetlb.c

Lines changed: 17 additions & 252 deletions
Original file line numberDiff line numberDiff line change
@@ -49,19 +49,13 @@
4949
#include <linux/page_owner.h>
5050
#include "internal.h"
5151
#include "hugetlb_vmemmap.h"
52+
#include "hugetlb_cma.h"
5253
#include <linux/page-isolation.h>
5354

5455
int hugetlb_max_hstate __read_mostly;
5556
unsigned int default_hstate_idx;
5657
struct hstate hstates[HUGE_MAX_HSTATE];
5758

58-
#ifdef CONFIG_CMA
59-
static struct cma *hugetlb_cma[MAX_NUMNODES];
60-
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
61-
#endif
62-
static bool hugetlb_cma_only;
63-
static unsigned long hugetlb_cma_size __initdata;
64-
6559
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
6660
static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
6761

@@ -128,14 +122,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
128122

129123
static void hugetlb_free_folio(struct folio *folio)
130124
{
131-
#ifdef CONFIG_CMA
132-
int nid = folio_nid(folio);
133-
134125
if (folio_test_hugetlb_cma(folio)) {
135-
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
126+
hugetlb_cma_free_folio(folio);
136127
return;
137128
}
138-
#endif
129+
139130
folio_put(folio);
140131
}
141132

@@ -1492,31 +1483,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
14921483
if (nid == NUMA_NO_NODE)
14931484
nid = numa_mem_id();
14941485
retry:
1495-
folio = NULL;
1496-
#ifdef CONFIG_CMA
1497-
{
1498-
int node;
1499-
1500-
if (hugetlb_cma[nid])
1501-
folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
1502-
1503-
if (!folio && !(gfp_mask & __GFP_THISNODE)) {
1504-
for_each_node_mask(node, *nodemask) {
1505-
if (node == nid || !hugetlb_cma[node])
1506-
continue;
1507-
1508-
folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
1509-
if (folio)
1510-
break;
1511-
}
1512-
}
1513-
1514-
if (folio)
1515-
folio_set_hugetlb_cma(folio);
1516-
}
1517-
#endif
1486+
folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
15181487
if (!folio) {
1519-
if (hugetlb_cma_only)
1488+
if (hugetlb_cma_exclusive_alloc())
15201489
return NULL;
15211490

15221491
folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
@@ -3191,47 +3160,14 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
31913160
return ERR_PTR(-ENOSPC);
31923161
}
31933162

3194-
static bool __init hugetlb_early_cma(struct hstate *h)
3195-
{
3196-
if (arch_has_huge_bootmem_alloc())
3197-
return false;
3198-
3199-
return (hstate_is_gigantic(h) && hugetlb_cma_only);
3200-
}
3201-
32023163
static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
32033164
{
32043165
struct huge_bootmem_page *m;
3205-
unsigned long flags;
3206-
struct cma *cma;
32073166
int listnode = nid;
32083167

3209-
#ifdef CONFIG_CMA
3210-
if (hugetlb_early_cma(h)) {
3211-
flags = HUGE_BOOTMEM_CMA;
3212-
cma = hugetlb_cma[nid];
3213-
m = cma_reserve_early(cma, huge_page_size(h));
3214-
if (!m) {
3215-
int node;
3216-
3217-
if (node_exact)
3218-
return NULL;
3219-
for_each_online_node(node) {
3220-
cma = hugetlb_cma[node];
3221-
if (!cma || node == nid)
3222-
continue;
3223-
m = cma_reserve_early(cma, huge_page_size(h));
3224-
if (m) {
3225-
listnode = node;
3226-
break;
3227-
}
3228-
}
3229-
}
3230-
} else
3231-
#endif
3232-
{
3233-
flags = 0;
3234-
cma = NULL;
3168+
if (hugetlb_early_cma(h))
3169+
m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3170+
else {
32353171
if (node_exact)
32363172
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
32373173
huge_page_size(h), 0,
@@ -3250,6 +3186,11 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
32503186
if (m)
32513187
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
32523188
}
3189+
3190+
if (m) {
3191+
m->flags = 0;
3192+
m->cma = NULL;
3193+
}
32533194
}
32543195

32553196
if (m) {
@@ -3264,8 +3205,6 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
32643205
INIT_LIST_HEAD(&m->list);
32653206
list_add(&m->list, &huge_boot_pages[listnode]);
32663207
m->hstate = h;
3267-
m->flags = flags;
3268-
m->cma = cma;
32693208
}
32703209

32713210
return m;
@@ -3715,7 +3654,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
37153654
* Skip gigantic hugepages allocation if early CMA
37163655
* reservations are not available.
37173656
*/
3718-
if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) {
3657+
if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3658+
!hugetlb_early_cma(h)) {
37193659
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
37203660
return;
37213661
}
@@ -3752,7 +3692,7 @@ static void __init hugetlb_init_hstates(void)
37523692
*/
37533693
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
37543694
continue;
3755-
if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3695+
if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
37563696
continue;
37573697
for_each_hstate(h2) {
37583698
if (h2 == h)
@@ -4654,14 +4594,6 @@ static void hugetlb_register_all_nodes(void) { }
46544594

46554595
#endif
46564596

4657-
#ifdef CONFIG_CMA
4658-
static void __init hugetlb_cma_check(void);
4659-
#else
4660-
static inline __init void hugetlb_cma_check(void)
4661-
{
4662-
}
4663-
#endif
4664-
46654597
static void __init hugetlb_sysfs_init(void)
46664598
{
46674599
struct hstate *h;
@@ -4845,8 +4777,7 @@ static __init void hugetlb_parse_params(void)
48454777
hcp->setup(hcp->val);
48464778
}
48474779

4848-
if (!hugetlb_cma_size)
4849-
hugetlb_cma_only = false;
4780+
hugetlb_cma_validate_params();
48504781
}
48514782

48524783
/*
@@ -7916,169 +7847,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
79167847
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
79177848
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
79187849
}
7919-
7920-
#ifdef CONFIG_CMA
7921-
static bool cma_reserve_called __initdata;
7922-
7923-
static int __init cmdline_parse_hugetlb_cma(char *p)
7924-
{
7925-
int nid, count = 0;
7926-
unsigned long tmp;
7927-
char *s = p;
7928-
7929-
while (*s) {
7930-
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7931-
break;
7932-
7933-
if (s[count] == ':') {
7934-
if (tmp >= MAX_NUMNODES)
7935-
break;
7936-
nid = array_index_nospec(tmp, MAX_NUMNODES);
7937-
7938-
s += count + 1;
7939-
tmp = memparse(s, &s);
7940-
hugetlb_cma_size_in_node[nid] = tmp;
7941-
hugetlb_cma_size += tmp;
7942-
7943-
/*
7944-
* Skip the separator if have one, otherwise
7945-
* break the parsing.
7946-
*/
7947-
if (*s == ',')
7948-
s++;
7949-
else
7950-
break;
7951-
} else {
7952-
hugetlb_cma_size = memparse(p, &p);
7953-
break;
7954-
}
7955-
}
7956-
7957-
return 0;
7958-
}
7959-
7960-
early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7961-
7962-
static int __init cmdline_parse_hugetlb_cma_only(char *p)
7963-
{
7964-
return kstrtobool(p, &hugetlb_cma_only);
7965-
}
7966-
7967-
early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
7968-
7969-
void __init hugetlb_cma_reserve(int order)
7970-
{
7971-
unsigned long size, reserved, per_node;
7972-
bool node_specific_cma_alloc = false;
7973-
int nid;
7974-
7975-
/*
7976-
* HugeTLB CMA reservation is required for gigantic
7977-
* huge pages which could not be allocated via the
7978-
* page allocator. Just warn if there is any change
7979-
* breaking this assumption.
7980-
*/
7981-
VM_WARN_ON(order <= MAX_PAGE_ORDER);
7982-
cma_reserve_called = true;
7983-
7984-
if (!hugetlb_cma_size)
7985-
return;
7986-
7987-
for (nid = 0; nid < MAX_NUMNODES; nid++) {
7988-
if (hugetlb_cma_size_in_node[nid] == 0)
7989-
continue;
7990-
7991-
if (!node_online(nid)) {
7992-
pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7993-
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7994-
hugetlb_cma_size_in_node[nid] = 0;
7995-
continue;
7996-
}
7997-
7998-
if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7999-
pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
8000-
nid, (PAGE_SIZE << order) / SZ_1M);
8001-
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
8002-
hugetlb_cma_size_in_node[nid] = 0;
8003-
} else {
8004-
node_specific_cma_alloc = true;
8005-
}
8006-
}
8007-
8008-
/* Validate the CMA size again in case some invalid nodes specified. */
8009-
if (!hugetlb_cma_size)
8010-
return;
8011-
8012-
if (hugetlb_cma_size < (PAGE_SIZE << order)) {
8013-
pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
8014-
(PAGE_SIZE << order) / SZ_1M);
8015-
hugetlb_cma_size = 0;
8016-
return;
8017-
}
8018-
8019-
if (!node_specific_cma_alloc) {
8020-
/*
8021-
* If 3 GB area is requested on a machine with 4 numa nodes,
8022-
* let's allocate 1 GB on first three nodes and ignore the last one.
8023-
*/
8024-
per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
8025-
pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
8026-
hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
8027-
}
8028-
8029-
reserved = 0;
8030-
for_each_online_node(nid) {
8031-
int res;
8032-
char name[CMA_MAX_NAME];
8033-
8034-
if (node_specific_cma_alloc) {
8035-
if (hugetlb_cma_size_in_node[nid] == 0)
8036-
continue;
8037-
8038-
size = hugetlb_cma_size_in_node[nid];
8039-
} else {
8040-
size = min(per_node, hugetlb_cma_size - reserved);
8041-
}
8042-
8043-
size = round_up(size, PAGE_SIZE << order);
8044-
8045-
snprintf(name, sizeof(name), "hugetlb%d", nid);
8046-
/*
8047-
* Note that 'order per bit' is based on smallest size that
8048-
* may be returned to CMA allocator in the case of
8049-
* huge page demotion.
8050-
*/
8051-
res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
8052-
HUGETLB_PAGE_ORDER, name,
8053-
&hugetlb_cma[nid], nid);
8054-
if (res) {
8055-
pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
8056-
res, nid);
8057-
continue;
8058-
}
8059-
8060-
reserved += size;
8061-
pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
8062-
size / SZ_1M, nid);
8063-
8064-
if (reserved >= hugetlb_cma_size)
8065-
break;
8066-
}
8067-
8068-
if (!reserved)
8069-
/*
8070-
* hugetlb_cma_size is used to determine if allocations from
8071-
* cma are possible. Set to zero if no cma regions are set up.
8072-
*/
8073-
hugetlb_cma_size = 0;
8074-
}
8075-
8076-
static void __init hugetlb_cma_check(void)
8077-
{
8078-
if (!hugetlb_cma_size || cma_reserve_called)
8079-
return;
8080-
8081-
pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
8082-
}
8083-
8084-
#endif /* CONFIG_CMA */

0 commit comments

Comments
 (0)