Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
262 changes: 132 additions & 130 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,33 +127,21 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
trace!("Pages reserved");
trace!("Polling ..");

if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) {
// Clear the request
pr.clear_request(pages_reserved);
// Whether we have triggered GC. There are two places in this function that can trigger GC.
let mut gc_triggered = false;

// If we do not want GC on fail, just return zero.
if !alloc_options.on_fail.allow_gc() {
return Address::ZERO;
}

// Otherwise do GC here
debug!("Collection required");
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");

// Inform GC trigger about the pending allocation.
let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
let total_pages_reserved = pages_reserved + meta_pages_reserved;
self.get_gc_trigger()
.policy
.on_pending_allocation(total_pages_reserved);

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
if should_poll {
gc_triggered = self.get_gc_trigger().poll(false, Some(self.as_space()));
debug!("Polled. GC triggered? {gc_triggered}");
}

// Return zero -- the caller will handle re-attempting allocation
Address::ZERO
} else {
debug!("Collection not required");
// We should try to allocate if GC was not triggered.
// It includes the case that we didn't poll at all.
// TODO: If we allow triggering GC AND over-committing at the same time,
// we can still allocate even if polling triggers GC.
let should_try_to_allocate = !gc_triggered;

let attempted_allocation_and_failed = if should_try_to_allocate {
// We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
// set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
// a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
Expand All @@ -162,125 +150,139 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
// See: https://github.com/mmtk/mmtk-core/issues/610
let lock = self.common().acquire_lock.lock().unwrap();

match pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) {
Ok(res) => {
debug!(
"Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
res.start,
res.pages,
self.get_name(),
conversions::chunk_align_down(res.start),
res.new_chunk
);
let bytes = conversions::pages_to_bytes(res.pages);

let mmap = || {
// Mmap the pages and the side metadata, and handle error. In case of any error,
// we will either call back to the VM for OOM, or simply panic.
if let Err(mmap_error) = self
.common()
.mmapper
.ensure_mapped(
res.start,
res.pages,
self.common().mmap_strategy(),
&memory::MmapAnnotation::Space {
name: self.get_name(),
},
)
.and(self.common().metadata.try_map_metadata_space(
res.start,
bytes,
self.get_name(),
))
{
memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
}
};
let grow_space = || {
self.grow_space(res.start, bytes, res.new_chunk);
};

// The scope of the lock is important in terms of performance when we have many allocator threads.
if SFT_MAP.get_side_metadata().is_some() {
// If the SFT map uses side metadata, so we have to initialize side metadata first.
mmap();
// then grow space, which will use the side metadata we mapped above
grow_space();
// then we can drop the lock after grow_space()
drop(lock);
} else {
// In normal cases, we can drop lock immediately after grow_space()
grow_space();
drop(lock);
// and map side metadata without holding the lock
mmap();
if let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls)
{
debug!(
"Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
res.start,
res.pages,
self.get_name(),
conversions::chunk_align_down(res.start),
res.new_chunk
);
let bytes = conversions::pages_to_bytes(res.pages);

let mmap = || {
// Mmap the pages and the side metadata, and handle error. In case of any error,
// we will either call back to the VM for OOM, or simply panic.
if let Err(mmap_error) = self
.common()
.mmapper
.ensure_mapped(
res.start,
res.pages,
self.common().mmap_strategy(),
&memory::MmapAnnotation::Space {
name: self.get_name(),
},
)
.and(self.common().metadata.try_map_metadata_space(
res.start,
bytes,
self.get_name(),
))
{
memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
}
};
let grow_space = || {
self.grow_space(res.start, bytes, res.new_chunk);
};

// The scope of the lock is important in terms of performance when we have many allocator threads.
if SFT_MAP.get_side_metadata().is_some() {
// If the SFT map uses side metadata, so we have to initialize side metadata first.
mmap();
// then grow space, which will use the side metadata we mapped above
grow_space();
// then we can drop the lock after grow_space()
drop(lock);
} else {
// In normal cases, we can drop lock immediately after grow_space()
grow_space();
drop(lock);
// and map side metadata without holding the lock
mmap();
}

// TODO: Concurrent zeroing
if self.common().zeroed {
memory::zero(res.start, bytes);
}
// TODO: Concurrent zeroing
if self.common().zeroed {
memory::zero(res.start, bytes);
}

// Some assertions
{
// --- Assert the start of the allocated region ---
// The start address SFT should be correct.
debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
// The start address is in our space.
debug_assert!(self.address_in_space(res.start));
// The descriptor should be correct.
debug_assert_eq!(
self.common().vm_map().get_descriptor_for_address(res.start),
self.common().descriptor
);

// --- Assert the last byte in the allocated region ---
let last_byte = res.start + bytes - 1;
// The SFT for the last byte in the allocated memory should be correct.
debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
// The last byte in the allocated memory should be in this space.
debug_assert!(self.address_in_space(last_byte));
// The descriptor for the last byte should be correct.
debug_assert_eq!(
self.common().vm_map().get_descriptor_for_address(last_byte),
self.common().descriptor
);
}
// Some assertions
{
// --- Assert the start of the allocated region ---
// The start address SFT should be correct.
debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
// The start address is in our space.
debug_assert!(self.address_in_space(res.start));
// The descriptor should be correct.
debug_assert_eq!(
self.common().vm_map().get_descriptor_for_address(res.start),
self.common().descriptor
);

debug!("Space.acquire(), returned = {}", res.start);
res.start
// --- Assert the last byte in the allocated region ---
let last_byte = res.start + bytes - 1;
// The SFT for the last byte in the allocated memory should be correct.
debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
// The last byte in the allocated memory should be in this space.
debug_assert!(self.address_in_space(last_byte));
// The descriptor for the last byte should be correct.
debug_assert_eq!(
self.common().vm_map().get_descriptor_for_address(last_byte),
self.common().descriptor
);
}
Err(_) => {
drop(lock); // drop the lock immediately

// Clear the request
pr.clear_request(pages_reserved);
debug!("Space.acquire(), returned = {}", res.start);
return res.start;
}

// If we do not want GC on fail, just return zero.
if !alloc_options.on_fail.allow_gc() {
return Address::ZERO;
}
true
} else {
false
};

// We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
assert!(
allow_gc,
"Physical allocation failed when GC is not allowed!"
);
// We didn't successfully allocate memory.

let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
debug_assert!(gc_performed, "GC not performed when forced.");
// Clear the request
pr.clear_request(pages_reserved);

// Inform GC trigger about the pending allocation.
self.get_gc_trigger()
.policy
.on_pending_allocation(pages_reserved);
// If we do not want GC on fail, just return zero.
if !alloc_options.on_fail.allow_gc() {
return Address::ZERO;
}

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator.
Address::ZERO
}
// Otherwise do GC here
assert!(
allow_gc,
"{}",
if attempted_allocation_and_failed {
// We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
"Physical allocation failed when GC is not allowed!"
} else {
"GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."
}
);

if attempted_allocation_and_failed {
let gc_triggered = self.get_gc_trigger().poll(true, Some(self.as_space()));
debug!("Polled. GC triggered? {gc_triggered}");
debug_assert!(gc_triggered, "GC not triggered when forced.");
}
// Inform GC trigger about the pending allocation.
let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
let total_pages_reserved = pages_reserved + meta_pages_reserved;
self.get_gc_trigger()
.policy
.on_pending_allocation(total_pages_reserved);

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator

// Return zero -- the caller will handle re-attempting allocation
Address::ZERO
}

fn address_in_space(&self, start: Address) -> bool {
Expand Down