Skip to content

Commit 12f2478

Browse files
Maxim Levitskybonzini
authored andcommitted
KVM: x86/mmu: Prevent installing hugepages when mem attributes are changing
JIRA: https://issues.redhat.com/browse/RHEL-95318 commit 9129633 Author: Sean Christopherson <[email protected]> Date: Wed Apr 30 15:09:54 2025 -0700 KVM: x86/mmu: Prevent installing hugepages when mem attributes are changing When changing memory attributes on a subset of a potential hugepage, add the hugepage to the invalidation range tracking to prevent installing a hugepage until the attributes are fully updated. Like the actual hugepage tracking updates in kvm_arch_post_set_memory_attributes(), process only the head and tail pages, as any potential hugepages that are entirely covered by the range will already be tracked. Note, only hugepage chunks whose current attributes are NOT mixed need to be added to the invalidation set, as mixed attributes already prevent installing a hugepage, and it's perfectly safe to install a smaller mapping for a gfn whose attributes aren't changing. Fixes: 8dd2eee ("KVM: x86/mmu: Handle page fault for private memory") Cc: [email protected] Reported-by: Michael Roth <[email protected]> Tested-by: Michael Roth <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 6704ab0 commit 12f2478

File tree

1 file changed

+53
-16
lines changed

1 file changed

+53
-16
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 53 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7560,9 +7560,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
75607560
}
75617561

75627562
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
7563+
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7564+
int level)
7565+
{
7566+
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7567+
}
7568+
7569+
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7570+
int level)
7571+
{
7572+
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7573+
}
7574+
7575+
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7576+
int level)
7577+
{
7578+
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7579+
}
7580+
75637581
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
75647582
struct kvm_gfn_range *range)
75657583
{
7584+
struct kvm_memory_slot *slot = range->slot;
7585+
int level;
7586+
75667587
/*
75677588
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
75687589
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7577,6 +7598,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
75777598
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
75787599
return false;
75797600

7601+
if (WARN_ON_ONCE(range->end <= range->start))
7602+
return false;
7603+
7604+
/*
7605+
* If the head and tail pages of the range currently allow a hugepage,
7606+
* i.e. reside fully in the slot and don't have mixed attributes, then
7607+
* add each corresponding hugepage range to the ongoing invalidation,
7608+
* e.g. to prevent KVM from creating a hugepage in response to a fault
7609+
* for a gfn whose attributes aren't changing. Note, only the range
7610+
* of gfns whose attributes are being modified needs to be explicitly
7611+
* unmapped, as that will unmap any existing hugepages.
7612+
*/
7613+
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7614+
gfn_t start = gfn_round_for_level(range->start, level);
7615+
gfn_t end = gfn_round_for_level(range->end - 1, level);
7616+
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7617+
7618+
if ((start != range->start || start + nr_pages > range->end) &&
7619+
start >= slot->base_gfn &&
7620+
start + nr_pages <= slot->base_gfn + slot->npages &&
7621+
!hugepage_test_mixed(slot, start, level))
7622+
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7623+
7624+
if (end == start)
7625+
continue;
7626+
7627+
if ((end + nr_pages) > range->end &&
7628+
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7629+
!hugepage_test_mixed(slot, end, level))
7630+
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7631+
}
7632+
75807633
/* Unmap the old attribute page. */
75817634
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
75827635
range->attr_filter = KVM_FILTER_SHARED;
@@ -7586,23 +7639,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
75867639
return kvm_unmap_gfn_range(kvm, range);
75877640
}
75887641

7589-
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7590-
int level)
7591-
{
7592-
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7593-
}
7594-
7595-
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7596-
int level)
7597-
{
7598-
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7599-
}
76007642

7601-
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7602-
int level)
7603-
{
7604-
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7605-
}
76067643

76077644
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
76087645
gfn_t gfn, int level, unsigned long attrs)

0 commit comments

Comments
 (0)