diff --git a/Cargo.toml b/Cargo.toml index 4e2433a65b..3945cd3023 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ itertools = "0.10.5" sys-info = "0.9" regex = "1.7.0" static_assertions = "1.1.0" +delegate = "0.9.0" [dev-dependencies] rand = "0.8.5" diff --git a/docs/tutorial/code/mygc_semispace/global.rs b/docs/tutorial/code/mygc_semispace/global.rs index 9338bb5152..677c821a87 100644 --- a/docs/tutorial/code/mygc_semispace/global.rs +++ b/docs/tutorial/code/mygc_semispace/global.rs @@ -154,6 +154,10 @@ impl Plan for MyGC { fn base(&self) -> &BasePlan { &self.common.base } + + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } // ANCHOR_END: plan_base // Add diff --git a/src/memory_manager.rs b/src/memory_manager.rs index f6bc4a1b5d..04d2d69c5f 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -85,6 +85,11 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { Box::new(mmtk) } +#[cfg(feature = "vm_space")] +pub fn lazy_init_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { + mmtk.plan.base_mut().vm_space.lazy_initialize(start, size); +} + /// Request MMTk to create a mutator for the given thread. The ownership /// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its /// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 2d5fe600bd..7c7f2409bc 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -148,6 +148,10 @@ impl Plan for GenCopy { &self.gen.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.gen.common.base + } + fn common(&self) -> &CommonPlan { &self.gen.common } diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index d732cac364..9c7ea1fa22 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -178,6 +178,10 @@ impl Plan for GenImmix { &self.gen.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.gen.common.base + } + fn common(&self) -> &CommonPlan { &self.gen.common } diff --git a/src/plan/global.rs b/src/plan/global.rs index c80c021608..0a1dbb5f45 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -8,6 +8,8 @@ use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; +#[cfg(feature = "vm_space")] +use crate::policy::vmspace::VMSpace; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; #[cfg(feature = "analysis")] @@ -176,6 +178,7 @@ pub trait Plan: 'static + Sync + Downcast { } fn base(&self) -> &BasePlan; + fn base_mut(&mut self) -> &mut BasePlan; fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler); fn common(&self) -> &CommonPlan { panic!("Common Plan not handled!") @@ -423,29 +426,7 @@ pub struct BasePlan { /// the VM space. #[cfg(feature = "vm_space")] #[trace] - pub vm_space: ImmortalSpace, -} - -#[cfg(feature = "vm_space")] -pub fn create_vm_space(args: &mut CreateSpecificPlanArgs) -> ImmortalSpace { - use crate::util::constants::LOG_BYTES_IN_MBYTE; - let boot_segment_bytes = *args.global_args.options.vm_space_size; - debug_assert!(boot_segment_bytes > 0); - - use crate::util::conversions::raw_align_up; - use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; - let boot_segment_mb = raw_align_up(boot_segment_bytes, BYTES_IN_CHUNK) >> LOG_BYTES_IN_MBYTE; - - let space = ImmortalSpace::new_vm_space(args.get_space_args( - "boot", - false, - VMRequest::fixed_size(boot_segment_mb), - )); - - // The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped. - space.ensure_mapped(); - - space + pub vm_space: VMSpace, } /// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This @@ -520,7 +501,7 @@ impl BasePlan { VMRequest::discontiguous(), )), #[cfg(feature = "vm_space")] - vm_space: create_vm_space(&mut args), + vm_space: VMSpace::new(&mut args), initialized: AtomicBool::new(false), trigger_gc_when_heap_is_full: AtomicBool::new(true), diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 1a7e797e9e..ec3b2a4b71 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -117,6 +117,10 @@ impl Plan for Immix { &self.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + fn common(&self) -> &CommonPlan { &self.common } diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 70aafde211..96117e6a39 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -61,6 +61,10 @@ impl Plan for MarkCompact { &self.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + fn common(&self) -> &CommonPlan { &self.common } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 6e5b9e40aa..cbb31a5284 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -87,6 +87,10 @@ impl Plan for MarkSweep { &self.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + fn common(&self) -> &CommonPlan { &self.common } diff --git a/src/plan/mod.rs b/src/plan/mod.rs index b2c415464d..15c16182fa 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -26,6 +26,8 @@ pub use global::AllocationSemantics; pub(crate) use global::GcStatus; pub use global::Plan; pub(crate) use global::PlanTraceObject; +#[cfg(feature = "vm_space")] // This is used for creating VM space +pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; mod mutator_context; pub use mutator_context::Mutator; diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index e09cca562a..ed0fd5266a 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -53,6 +53,10 @@ impl Plan for NoGC { &self.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.base + } + fn prepare(&mut self, _tls: VMWorkerThread) { unreachable!() } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 2bcd583cf9..5c812c14e9 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -77,6 +77,10 @@ impl Plan for PageProtect { &self.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + fn common(&self) -> &CommonPlan { &self.common } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index f8fa78ef8d..2485fcb9ea 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -129,6 +129,10 @@ impl Plan for SemiSpace { &self.common.base } + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + fn common(&self) -> &CommonPlan { &self.common } diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 295c402367..bbe83ec259 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -67,6 +67,10 @@ impl Plan for StickyImmix { self.immix.base() } + fn base_mut(&mut self) -> &mut crate::plan::global::BasePlan { + self.immix.base_mut() + } + fn generational( &self, ) -> Option<&dyn crate::plan::generational::global::GenerationalPlan> { diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index e041648156..a131750366 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -120,15 +120,6 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmortalSp impl ImmortalSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { - Self::new_inner(args, false) - } - - #[cfg(feature = "vm_space")] - pub fn new_vm_space(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { - Self::new_inner(args, true) - } - - pub fn new_inner(args: crate::policy::space::PlanCreateSpaceArgs, vm_space: bool) -> Self { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); let common = CommonSpace::new(args.into_policy_args( @@ -144,7 +135,26 @@ impl ImmortalSpace { MonotonePageResource::new_contiguous(common.start, common.extent, vm_map) }, common, - vm_space, + vm_space: false, + } + } + + #[cfg(feature = "vm_space")] + pub fn new_vm_space( + args: crate::policy::space::PlanCreateSpaceArgs, + start: Address, + size: usize, + ) -> Self { + assert!(!args.vmrequest.is_discontiguous()); + ImmortalSpace { + mark_state: MarkState::new(), + pr: MonotonePageResource::new_contiguous(start, size, args.vm_map), + common: CommonSpace::new(args.into_policy_args( + false, + true, + metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]), + )), + vm_space: true, } } diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 2e459db095..f5214b273d 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -27,3 +27,5 @@ pub mod largeobjectspace; pub mod lockfreeimmortalspace; pub mod markcompactspace; pub mod marksweepspace; +#[cfg(feature = "vm_space")] +pub mod vmspace; diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 12def71549..dbe934221f 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -120,8 +120,13 @@ mod space_map { assert!(old.name() == EMPTY_SFT_NAME || old.name() == space.name()); // Make sure the range is in the space let space_start = Self::index_to_space_start(index); - assert!(start >= space_start); - assert!(start + bytes <= space_start + MAX_SPACE_EXTENT); + // FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces, + // but the VM space is an exception. Any address after the last space is considered as the last space, + // based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT). + if index != Self::TABLE_SIZE - 1 { + assert!(start >= space_start); + assert!(start + bytes <= space_start + MAX_SPACE_EXTENT); + } } *mut_self.sft.get_unchecked_mut(index) = space; } @@ -512,8 +517,9 @@ mod sparse_chunk_map { // in which case, we still set SFT map again. debug_assert!( old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new, - "attempt to overwrite a non-empty chunk {} in SFT map (from {} to {})", + "attempt to overwrite a non-empty chunk {} ({}) in SFT map (from {} to {})", chunk, + crate::util::conversions::chunk_index_to_address(chunk), old, new ); diff --git a/src/policy/space.rs b/src/policy/space.rs index 50395eb7a0..e1f89cfdc1 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -490,10 +490,8 @@ impl CommonSpace { top: _top, } => (_extent, _top), VMRequest::Fixed { - extent: _extent, - top: _top, - .. - } => (_extent, _top), + extent: _extent, .. + } => (_extent, false), _ => unreachable!(), }; @@ -524,7 +522,22 @@ impl CommonSpace { // FIXME rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); // VM.memory.setHeapRange(index, start, start.plus(extent)); - args.plan_args.vm_map.insert(start, extent, rtn.descriptor); + + // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, + // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only + // insert into our vm map if the range overlaps with our heap. + { + use crate::util::heap::layout; + let overlap = + Address::range_intersection(&(start..start + extent), &layout::available_range()); + if !overlap.is_empty() { + args.plan_args.vm_map.insert( + overlap.start, + overlap.end - overlap.start, + rtn.descriptor, + ); + } + } // For contiguous space, we know its address range so we reserve metadata memory for its range. if rtn diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs new file mode 100644 index 0000000000..bbf0044040 --- /dev/null +++ b/src/policy/vmspace.rs @@ -0,0 +1,229 @@ +use crate::plan::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; +use crate::plan::{ObjectQueue, VectorObjectQueue}; +use crate::policy::immortalspace::ImmortalSpace; +use crate::policy::sft::GCWorkerMutRef; +use crate::policy::sft::SFT; +use crate::policy::space::{CommonSpace, Space}; +use crate::util::address::Address; +use crate::util::heap::HeapMeta; +use crate::util::heap::PageResource; +use crate::util::heap::VMRequest; +use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::util::metadata::side_metadata::SideMetadataSanity; +use crate::util::ObjectReference; +use crate::vm::VMBinding; + +use delegate::delegate; + +pub struct VMSpace { + inner: Option>, + // Save it + args: CreateSpecificPlanArgs, +} + +impl SFT for VMSpace { + delegate! { + // Delegate every call to the inner space. Given that we have acquired SFT, we can assume there are objects in the space and the space is initialized. + to self.space() { + fn name(&self) -> &str; + fn is_live(&self, object: ObjectReference) -> bool; + fn is_reachable(&self, object: ObjectReference) -> bool; + #[cfg(feature = "object_pinning")] + fn pin_object(&self, object: ObjectReference) -> bool; + #[cfg(feature = "object_pinning")] + fn unpin_object(&self, object: ObjectReference) -> bool; + #[cfg(feature = "object_pinning")] + fn is_object_pinned(&self, object: ObjectReference) -> bool; + fn is_movable(&self) -> bool; + #[cfg(feature = "sanity")] + fn is_sane(&self) -> bool; + fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool); + #[cfg(feature = "is_mmtk_object")] + fn is_mmtk_object(&self, addr: Address) -> bool; + fn sft_trace_object( + &self, + queue: &mut VectorObjectQueue, + object: ObjectReference, + worker: GCWorkerMutRef, + ) -> ObjectReference; + } + } +} + +impl Space for VMSpace { + fn as_space(&self) -> &dyn Space { + self + } + fn as_sft(&self) -> &(dyn SFT + Sync + 'static) { + self + } + fn get_page_resource(&self) -> &dyn PageResource { + self.space().get_page_resource() + } + fn common(&self) -> &CommonSpace { + self.space().common() + } + + fn initialize_sft(&self) { + if self.inner.is_some() { + self.common().initialize_sft(self.as_sft()) + } + } + + fn release_multiple_pages(&mut self, _start: Address) { + panic!("immortalspace only releases pages enmasse") + } + + fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) { + side_metadata_sanity_checker.verify_metadata_context( + std::any::type_name::(), + &SideMetadataContext { + global: self.args.global_side_metadata_specs.clone(), + local: vec![], + }, + ) + } + + fn address_in_space(&self, start: Address) -> bool { + if let Some(space) = self.space_maybe() { + space.address_in_space(start) + } else { + false + } + } +} + +use crate::scheduler::GCWorker; +use crate::util::copy::CopySemantics; + +impl crate::policy::gc_work::PolicyTraceObject for VMSpace { + fn trace_object( + &self, + queue: &mut Q, + object: ObjectReference, + _copy: Option, + _worker: &mut GCWorker, + ) -> ObjectReference { + self.trace_object(queue, object) + } + fn may_move_objects() -> bool { + false + } +} + +impl VMSpace { + pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { + let args_clone = CreateSpecificPlanArgs { + global_args: CreateGeneralPlanArgs { + vm_map: args.global_args.vm_map, + mmapper: args.global_args.mmapper, + heap: HeapMeta::new(), // we do not use this + options: args.global_args.options.clone(), + gc_trigger: args.global_args.gc_trigger.clone(), + scheduler: args.global_args.scheduler.clone(), + }, + constraints: args.constraints, + global_side_metadata_specs: args.global_side_metadata_specs.clone(), + }; + // Create the space if the VM space start/size is set. Otherwise, use None. + let inner = (!args.global_args.options.vm_space_start.is_zero()) + .then(|| Self::create_space(args, None)); + Self { + inner, + args: args_clone, + } + } + + pub fn lazy_initialize(&mut self, start: Address, size: usize) { + assert!(self.inner.is_none(), "VM space has been initialized"); + self.inner = Some(Self::create_space(&mut self.args, Some((start, size)))); + + self.common().initialize_sft(self.as_sft()); + } + + fn create_space( + args: &mut CreateSpecificPlanArgs, + location: Option<(Address, usize)>, + ) -> ImmortalSpace { + use crate::util::conversions::raw_align_up; + use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; + + let (vm_space_start, vm_space_bytes) = if let Some((start, size)) = location { + (start, size) + } else { + ( + *args.global_args.options.vm_space_start, + *args.global_args.options.vm_space_size, + ) + }; + + assert!(!vm_space_start.is_zero()); + assert!(vm_space_bytes > 0); + + // For simplicity, VMSpace has to be outside our available heap range. + // TODO: Allow VMSpace in our available heap range. + assert!(Address::range_intersection( + &(vm_space_start..vm_space_start + vm_space_bytes), + &crate::util::heap::layout::available_range() + ) + .is_empty()); + + let (vm_space_start_aligned, vm_space_bytes_aligned) = ( + vm_space_start.align_down(BYTES_IN_CHUNK), + raw_align_up(vm_space_bytes, BYTES_IN_CHUNK), + ); + debug!( + "start {} is aligned to {}, bytes = {}", + vm_space_start, vm_space_start_aligned, vm_space_bytes_aligned + ); + + let space_args = args.get_space_args( + "vm_space", + false, + VMRequest::fixed(vm_space_start_aligned, vm_space_bytes_aligned), + ); + let space = + ImmortalSpace::new_vm_space(space_args, vm_space_start_aligned, vm_space_bytes_aligned); + + // The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped. + space.ensure_mapped(); + + space + } + + fn space_maybe(&self) -> Option<&ImmortalSpace> { + self.inner.as_ref() + } + + fn space(&self) -> &ImmortalSpace { + self.inner.as_ref().unwrap() + } + + // fn space_mut(&mut self) -> &mut ImmortalSpace { + // self.inner.as_mut().unwrap() + // } + + pub fn prepare(&mut self) { + if let Some(ref mut space) = &mut self.inner { + space.prepare() + } + } + + pub fn release(&mut self) { + if let Some(ref mut space) = &mut self.inner { + space.release() + } + } + + pub fn trace_object( + &self, + queue: &mut Q, + object: ObjectReference, + ) -> ObjectReference { + if let Some(ref space) = &self.inner { + space.trace_object(queue, object) + } else { + panic!("We haven't initialized vm space, but we tried to trace the object {} and thought it was in vm space?", object) + } + } +} diff --git a/src/util/address.rs b/src/util/address.rs index 00c895d374..e263e182a2 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -318,6 +318,12 @@ impl Address { MMAPPER.is_mapped_address(self) } } + + /// Returns the intersection of the two address ranges. The returned range could + /// be empty if there is no intersection between the ranges. + pub fn range_intersection(r1: &Range
, r2: &Range
) -> Range
{ + r1.start.max(r2.start)..r1.end.min(r2.end) + } } /// allows print Address as upper-case hex value @@ -348,6 +354,15 @@ impl fmt::Debug for Address { } } +impl std::str::FromStr for Address { + type Err = std::num::ParseIntError; + + fn from_str(s: &str) -> Result { + let raw: usize = s.parse()?; + Ok(Address(raw)) + } +} + #[cfg(test)] mod tests { use crate::util::Address; diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index e88347e56f..9cc235609e 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -63,10 +63,12 @@ pub fn align_allocation_inner( // May require an alignment let region_isize = region.as_usize() as isize; - let mask = (alignment - 1) as isize; // fromIntSignExtend let neg_off = -offset; // fromIntSignExtend - let delta = (neg_off - region_isize) & mask; + + // TODO: Consider using neg_off.wrapping_sub_unsigned(region.as_usize()), and we can remove region_isize. + // This requires Rust 1.66.0+. + let delta = neg_off.wrapping_sub(region_isize) & mask; // Use wrapping_sub to avoid overflow if fillalignmentgap && (VM::ALIGNMENT_VALUE != 0) { fill_alignment_gap::(region, region + delta); diff --git a/src/util/heap/layout/mod.rs b/src/util/heap/layout/mod.rs index bf348f8e46..f8fd768dbe 100644 --- a/src/util/heap/layout/mod.rs +++ b/src/util/heap/layout/mod.rs @@ -34,3 +34,20 @@ pub fn create_mmapper() -> Box { // TODO: ByteMapMmapper for 39-bit or less virtual space Box::new(fragmented_mapper::FragmentedMapper::new()) } + +use crate::util::Address; +use std::ops::Range; + +/// The heap range between HEAP_START and HEAP_END +/// Heap range include the availble range, but may include some address ranges +/// that we count as part of the heap but we do not allocate into, such as +/// VM spaces. However, currently, heap range is the same as available range. +pub const fn heap_range() -> Range
{ + vm_layout_constants::HEAP_START..vm_layout_constants::HEAP_END +} + +/// The avialable heap range between AVAILABLE_START and AVAILABLE_END. +/// Available range is what MMTk may allocate into. +pub const fn available_range() -> Range
{ + vm_layout_constants::AVAILABLE_START..vm_layout_constants::AVAILABLE_END +} diff --git a/src/util/heap/layout/vm_layout_constants.rs b/src/util/heap/layout/vm_layout_constants.rs index 1d42e67b1e..f1735885e9 100644 --- a/src/util/heap/layout/vm_layout_constants.rs +++ b/src/util/heap/layout/vm_layout_constants.rs @@ -53,14 +53,14 @@ pub const MAX_SPACE_EXTENT: usize = 1 << LOG_SPACE_EXTENT; // FIXME: HEAP_START, HEAP_END are VM-dependent /** Lowest virtual address used by the virtual machine */ #[cfg(target_pointer_width = "32")] -pub const HEAP_START: Address = chunk_align_down(unsafe { Address::from_usize(0x6000_0000) }); +pub const HEAP_START: Address = chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }); #[cfg(target_pointer_width = "64")] pub const HEAP_START: Address = chunk_align_down(unsafe { Address::from_usize(0x0000_0200_0000_0000usize) }); /** Highest virtual address used by the virtual machine */ #[cfg(target_pointer_width = "32")] -pub const HEAP_END: Address = chunk_align_up(unsafe { Address::from_usize(0xb000_0000) }); +pub const HEAP_END: Address = chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }); #[cfg(target_pointer_width = "64")] pub const HEAP_END: Address = HEAP_START.add(1 << (LOG_MAX_SPACES + LOG_SPACE_EXTENT)); @@ -86,20 +86,23 @@ pub const VM_SPACE_SIZE: usize = pub const VM_SPACE_SIZE: usize = chunk_align_up(unsafe { Address::from_usize(0xdc0_0000) }).as_usize(); +// In Java MMTk, the virtual memory between HEAP_START and AVIALBE_START, and between AVAILABLE_END +// and HEAP_END, are VM spaces. +// For us, the address range for VM spaces is set by the runtime, and we do not know them +// as constants. At this point, our AVAILALBE_START is the same as HEAP_START, and our AVIALABLE_END +// is the same as HEAP_END. +// TOOD: We should decide if VM space is considered as part of our heap range, and remove either AVAILABLE_START/END, or HEAP_START/END. +// We can do either: +// 1. Our heap is what we use for MMTk. So VM spaces are not in our heap. Or +// 2. Our heap includes VM spaces, so its address range depends on the VM space range. + /** - * Lowest virtual address available for MMTk to manage. The address space between - * HEAP_START and AVAILABLE_START comprises memory directly managed by the VM, - * and not available to MMTk. + * Lowest virtual address available for MMTk to manage. */ -#[cfg(feature = "vm_space")] -pub const AVAILABLE_START: Address = HEAP_START.add(VM_SPACE_SIZE); -#[cfg(not(feature = "vm_space"))] pub const AVAILABLE_START: Address = HEAP_START; /** - * Highest virtual address available for MMTk to manage. The address space between - * HEAP_END and AVAILABLE_END comprises memory directly managed by the VM, - * and not available to MMTk. + * Highest virtual address available for MMTk to manage. */ pub const AVAILABLE_END: Address = HEAP_END; diff --git a/src/util/heap/vmrequest.rs b/src/util/heap/vmrequest.rs index c2c34a4634..e8adbd69d6 100644 --- a/src/util/heap/vmrequest.rs +++ b/src/util/heap/vmrequest.rs @@ -5,19 +5,9 @@ use crate::util::Address; #[derive(Clone, Copy, Debug)] pub enum VMRequest { Discontiguous, - Fixed { - start: Address, - extent: usize, - top: bool, - }, - Extent { - extent: usize, - top: bool, - }, - Fraction { - frac: f32, - top: bool, - }, + Fixed { start: Address, extent: usize }, + Extent { extent: usize, top: bool }, + Fraction { frac: f32, top: bool }, } impl VMRequest { @@ -72,4 +62,8 @@ impl VMRequest { } VMRequest::Extent { extent, top } } + + pub fn fixed(start: Address, extent: usize) -> Self { + VMRequest::Fixed { start, extent } + } } diff --git a/src/util/options.rs b/src/util/options.rs index 20a0adf71e..88d9fd58ca 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -1,6 +1,7 @@ use crate::scheduler::affinity::{get_total_num_cpus, CoreId}; use crate::util::constants::DEFAULT_STRESS_FACTOR; use crate::util::constants::LOG_BYTES_IN_MBYTE; +use crate::util::Address; use std::default::Default; use std::fmt::Debug; use std::str::FromStr; @@ -681,10 +682,10 @@ options! { // But this should have no obvious mutator overhead, and can be used to test GC performance along with a larger stress // factor (e.g. tens of metabytes). precise_stress: bool [env_var: true, command_line: true] [always_valid] = true, + // The start of vmspace. + vm_space_start: Address [env_var: true, command_line: true] [always_valid] = Address::ZERO, // The size of vmspace. - // FIXME: This value is set for JikesRVM. We need a proper way to set options. - // We need to set these values programmatically in VM specific code. - vm_space_size: usize [env_var: true, command_line: true] [|v: &usize| *v > 0] = 0x7cc_cccc, + vm_space_size: usize [env_var: true, command_line: true] [|v: &usize| *v > 0] = usize::MAX, // Perf events to measure // Semicolons are used to separate events // Each event is in the format of event_name,pid,cpu (see man perf_event_open for what pid and cpu mean).