From 24b7e47fc1647efec38f3d41e9419de619f70d99 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 30 Mar 2022 11:29:43 +0800 Subject: [PATCH 1/4] Edge trait We make the representation of edges VM-specific. The MMTk core only depends on the abstract Edge trait. This allows VM bindings to custom the behaviour of loading/storing of edges, and support features like compressed OOPs. Binding migration advices: - The type of edges is now defined by the VM, and is no longer limited to Address. To transition from the old API, the VM can start by defining its vm-specific edge type as an alias of Address, like: type OpenJDKEdge = Address; and gradually transition to mmtk::vm::edge_shape::SimpleEdge, or define a more appropriate type for the VM. - The VMBinding trait now requires a VMEdge member type. Implement it with the VM-specific edge type, like: type VMEdge = OpenJDKEdge; - EdgeVisitor and RootsWorkFactory now become EdgeVisitor and RootsWorkFactory. If an old API function passes an EdgeVisitor or RootsWorkFactory object, add the VM's edge type as T. For example, change fn scan_object(...) { ... } to fn scan_object>(...) { ... } --- docs/tutorial/code/mygc_semispace/gc_work.rs | 2 +- src/mmtk.rs | 9 +- src/plan/generational/gc_work.rs | 11 +- src/plan/markcompact/gc_work.rs | 2 +- src/plan/tracing.rs | 10 +- src/scheduler/gc_work.rs | 35 ++-- src/util/edge_logger.rs | 62 ++++--- src/util/sanity/sanity_checker.rs | 15 +- src/vm/edge_shape.rs | 115 +++++++++++++ src/vm/mod.rs | 4 + src/vm/scanning.rs | 23 +-- tests/test_roots_work_factory.rs | 4 +- vmbindings/dummyvm/Cargo.toml | 1 + vmbindings/dummyvm/src/edges.rs | 157 +++++++++++++++++ vmbindings/dummyvm/src/lib.rs | 2 + vmbindings/dummyvm/src/scanning.rs | 9 +- vmbindings/dummyvm/src/tests/edges_test.rs | 169 +++++++++++++++++++ vmbindings/dummyvm/src/tests/fixtures/mod.rs | 31 ++++ vmbindings/dummyvm/src/tests/mod.rs | 1 + 19 files changed, 586 insertions(+), 76 deletions(-) create mode 100644 src/vm/edge_shape.rs create mode 100644 vmbindings/dummyvm/src/edges.rs create mode 100644 vmbindings/dummyvm/src/tests/edges_test.rs diff --git a/docs/tutorial/code/mygc_semispace/gc_work.rs b/docs/tutorial/code/mygc_semispace/gc_work.rs index 4efa345e35..9ad75cabe6 100644 --- a/docs/tutorial/code/mygc_semispace/gc_work.rs +++ b/docs/tutorial/code/mygc_semispace/gc_work.rs @@ -42,7 +42,7 @@ impl ProcessEdgesWork for MyGCProcessEdges { type VM = VM; type ScanObjectsWorkType = ScanObjects; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let plan = base.plan().downcast_ref::>().unwrap(); Self { base, plan } diff --git a/src/mmtk.rs b/src/mmtk.rs index c3199aaded..265790e5a8 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -2,6 +2,9 @@ use crate::plan::Plan; use crate::policy::space::SFTMap; use crate::scheduler::GCWorkScheduler; + +#[cfg(feature = "extreme_assertions")] +use crate::util::edge_logger::EdgeLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; @@ -86,7 +89,9 @@ pub struct MMTK { Mutex>::FinalizableType>>, pub(crate) scheduler: Arc>, #[cfg(feature = "sanity")] - pub(crate) sanity_checker: Mutex, + pub(crate) sanity_checker: Mutex>, + #[cfg(feature = "extreme_assertions")] + pub(crate) edge_logger: EdgeLogger, inside_harness: AtomicBool, } @@ -130,6 +135,8 @@ impl MMTK { #[cfg(feature = "sanity")] sanity_checker: Mutex::new(SanityChecker::new()), inside_harness: AtomicBool::new(false), + #[cfg(feature = "extreme_assertions")] + edge_logger: EdgeLogger::new(), } } diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 081cdaf5ab..27d8b65e2c 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -1,7 +1,8 @@ use crate::plan::generational::global::Gen; use crate::policy::space::Space; use crate::scheduler::gc_work::*; -use crate::util::{Address, ObjectReference}; +use crate::util::ObjectReference; +use crate::vm::edge_shape::Edge; use crate::vm::*; use crate::MMTK; use std::ops::{Deref, DerefMut}; @@ -18,7 +19,7 @@ impl ProcessEdgesWork for GenNurseryProcessEdges { type VM = VM; type ScanObjectsWorkType = ScanObjects; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let gen = base.plan().generational(); Self { gen, base } @@ -34,11 +35,11 @@ impl ProcessEdgesWork for GenNurseryProcessEdges { .trace_object_nursery(&mut self.base.nodes, object, worker) } #[inline] - fn process_edge(&mut self, slot: Address) { - let object = unsafe { slot.load::() }; + fn process_edge(&mut self, slot: EdgeOf) { + let object = slot.load(); let new_object = self.trace_object(object); debug_assert!(!self.gen.nursery.in_space(new_object)); - unsafe { slot.store(new_object) }; + slot.store(new_object); } #[inline(always)] diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index d7f76df34e..1fad94f322 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -43,7 +43,7 @@ impl GCWork for UpdateReferences { VM::VMScanning::prepare_for_roots_re_scanning(); mmtk.plan.base().prepare_for_stack_scanning(); #[cfg(feature = "extreme_assertions")] - crate::util::edge_logger::reset(); + mmtk.edge_logger.reset(); // TODO investigate why the following will create duplicate edges // scheduler.work_buckets[WorkBucketStage::RefForwarding] diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index ebfed8ebd4..ad859598fe 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -3,9 +3,9 @@ use std::mem; -use crate::scheduler::gc_work::ProcessEdgesWork; +use crate::scheduler::gc_work::{EdgeOf, ProcessEdgesWork}; use crate::scheduler::{GCWorker, WorkBucketStage}; -use crate::util::{Address, ObjectReference}; +use crate::util::ObjectReference; use crate::vm::EdgeVisitor; /// This trait represents an object queue to enqueue objects during tracing. @@ -63,7 +63,7 @@ impl ObjectQueue for VectorObjectQueue { /// A transitive closure visitor to collect all the edges of an object. pub struct ObjectsClosure<'a, E: ProcessEdgesWork> { - buffer: Vec
, + buffer: Vec>, worker: &'a mut GCWorker, } @@ -85,9 +85,9 @@ impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> { } } -impl<'a, E: ProcessEdgesWork> EdgeVisitor for ObjectsClosure<'a, E> { +impl<'a, E: ProcessEdgesWork> EdgeVisitor> for ObjectsClosure<'a, E> { #[inline(always)] - fn visit_edge(&mut self, slot: Address) { + fn visit_edge(&mut self, slot: EdgeOf) { if self.buffer.is_empty() { self.buffer.reserve(E::CAPACITY); } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index cc5bfdfa94..5ae3c33f95 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -5,6 +5,7 @@ use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; use crate::util::metadata::*; use crate::util::*; +use crate::vm::edge_shape::Edge; use crate::vm::*; use crate::*; use std::marker::PhantomData; @@ -227,7 +228,7 @@ impl GCWork for EndOfGC { #[cfg(feature = "extreme_assertions")] if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) { // reset the logging info at the end of each GC - crate::util::edge_logger::reset(); + mmtk.edge_logger.reset(); } if ::VMCollection::COORDINATOR_ONLY_STW { @@ -331,7 +332,7 @@ impl GCWork for ScanVMSpecificRoots { } pub struct ProcessEdgesBase { - pub edges: Vec
, + pub edges: Vec, pub nodes: VectorObjectQueue, mmtk: &'static MMTK, // Use raw pointer for fast pointer dereferencing, instead of using `Option<&'static mut GCWorker>`. @@ -345,12 +346,12 @@ unsafe impl Send for ProcessEdgesBase {} impl ProcessEdgesBase { // Requires an MMTk reference. Each plan-specific type that uses ProcessEdgesBase can get a static plan reference // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced. - pub fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + pub fn new(edges: Vec, roots: bool, mmtk: &'static MMTK) -> Self { #[cfg(feature = "extreme_assertions")] if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) { for edge in &edges { // log edge, panic if already logged - crate::util::edge_logger::log_edge(*edge); + mmtk.edge_logger.log_edge(*edge); } } Self { @@ -388,6 +389,9 @@ impl ProcessEdgesBase { } } +/// A short-hand for `::VMEdge`. +pub type EdgeOf = <::VM as VMBinding>::VMEdge; + /// Scan & update a list of object slots // // Note: be very careful when using this trait. process_node() will push objects @@ -407,7 +411,8 @@ pub trait ProcessEdgesWork: const CAPACITY: usize = 4096; const OVERWRITE_REFERENCE: bool = true; const SCAN_OBJECTS_IMMEDIATELY: bool = true; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self; + + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self; /// Trace an MMTk object. The implementation should forward this call to the policy-specific /// `trace_object()` methods, depending on which space this object is in. @@ -463,11 +468,11 @@ pub trait ProcessEdgesWork: } #[inline] - fn process_edge(&mut self, slot: Address) { - let object = unsafe { slot.load::() }; + fn process_edge(&mut self, slot: EdgeOf) { + let object = slot.load(); let new_object = self.trace_object(object); if Self::OVERWRITE_REFERENCE { - unsafe { slot.store(new_object) }; + slot.store(new_object); } } @@ -511,7 +516,7 @@ impl ProcessEdgesWork for SFTProcessEdges { type VM = VM; type ScanObjectsWorkType = ScanObjects; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); Self { base } } @@ -552,8 +557,8 @@ impl Clone for ProcessEdgesWorkRootsWorkFactory { } } -impl RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_edge_roots_work(&mut self, edges: Vec
) { +impl RootsWorkFactory> for ProcessEdgesWorkRootsWorkFactory { + fn create_process_edge_roots_work(&mut self, edges: Vec>) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, @@ -827,7 +832,7 @@ impl + Plan, const KIND: TraceKin type VM = VM; type ScanObjectsWorkType = PlanScanObjects; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let plan = base.plan().downcast_ref::

().unwrap(); Self { plan, base } @@ -854,11 +859,11 @@ impl + Plan, const KIND: TraceKin } #[inline] - fn process_edge(&mut self, slot: Address) { - let object = unsafe { slot.load::() }; + fn process_edge(&mut self, slot: EdgeOf) { + let object = slot.load(); let new_object = self.trace_object(object); if P::may_move_objects::() { - unsafe { slot.store(new_object) }; + slot.store(new_object); } } } diff --git a/src/util/edge_logger.rs b/src/util/edge_logger.rs index 86062936b3..7c4bb08e9a 100644 --- a/src/util/edge_logger.rs +++ b/src/util/edge_logger.rs @@ -5,14 +5,49 @@ //! use crate::plan::Plan; -use crate::util::Address; +use crate::vm::edge_shape::Edge; use crate::vm::VMBinding; use std::collections::HashSet; use std::sync::RwLock; -lazy_static! { +pub struct EdgeLogger { // A private hash-set to keep track of edges. - static ref EDGE_LOG: RwLock> = RwLock::new(HashSet::new()); + edge_log: RwLock>, +} + +unsafe impl Sync for EdgeLogger {} + +impl EdgeLogger { + pub fn new() -> Self { + Self { + edge_log: Default::default(), + } + } + + /// Logs an edge. + /// Panics if the edge was already logged. + /// + /// # Arguments + /// + /// * `edge` - The edge to log. + /// + pub fn log_edge(&self, edge: ES) { + trace!("log_edge({:?})", edge); + let mut edge_log = self.edge_log.write().unwrap(); + assert!( + edge_log.insert(edge), + "duplicate edge ({:?}) detected", + edge + ); + } + + /// Reset the edge logger by clearing the hash-set of edges. + /// This function is called at the end of each GC iteration. + /// + pub fn reset(&self) { + let mut edge_log = self.edge_log.write().unwrap(); + edge_log.clear(); + } } /// Whether we should check duplicate edges. This depends on the actual plan. @@ -20,24 +55,3 @@ pub fn should_check_duplicate_edges(plan: &dyn Plan) -> // If a plan allows tracing duplicate edges, we should not run this check. !plan.constraints().may_trace_duplicate_edges } - -/// Logs an edge. -/// Panics if the edge was already logged. -/// -/// # Arguments -/// -/// * `edge` - The edge to log. -/// -pub fn log_edge(edge: Address) { - trace!("log_edge({})", edge); - let mut edge_log = EDGE_LOG.write().unwrap(); - assert!(edge_log.insert(edge), "duplicate edge ({}) detected", edge); -} - -/// Reset the edge logger by clearing the hash-set of edges. -/// This function is called at the end of each GC iteration. -/// -pub fn reset() { - let mut edge_log = EDGE_LOG.write().unwrap(); - edge_log.clear(); -} diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 78093dd3cb..7afbdcf98b 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -1,6 +1,7 @@ use crate::plan::Plan; use crate::scheduler::gc_work::*; -use crate::util::{Address, ObjectReference}; +use crate::util::ObjectReference; +use crate::vm::edge_shape::Edge; use crate::vm::*; use crate::MMTK; use crate::{scheduler::*, ObjectQueue}; @@ -9,20 +10,20 @@ use std::ops::{Deref, DerefMut}; use std::sync::atomic::Ordering; #[allow(dead_code)] -pub struct SanityChecker { +pub struct SanityChecker { /// Visited objects refs: HashSet, /// Cached root edges for sanity root scanning - roots: Vec>, + roots: Vec>, } -impl Default for SanityChecker { +impl Default for SanityChecker { fn default() -> Self { Self::new() } } -impl SanityChecker { +impl SanityChecker { pub fn new() -> Self { Self { refs: HashSet::new(), @@ -31,7 +32,7 @@ impl SanityChecker { } /// Cache a list of root edges to the sanity checker. - pub fn add_roots(&mut self, roots: Vec

) { + pub fn add_roots(&mut self, roots: Vec) { self.roots.push(roots) } @@ -164,7 +165,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { type ScanObjectsWorkType = ScanObjects; const OVERWRITE_REFERENCE: bool = false; - fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { + fn new(edges: Vec>, roots: bool, mmtk: &'static MMTK) -> Self { Self { base: ProcessEdgesBase::new(edges, roots, mmtk), // ..Default::default() diff --git a/src/vm/edge_shape.rs b/src/vm/edge_shape.rs new file mode 100644 index 0000000000..d931475b1d --- /dev/null +++ b/src/vm/edge_shape.rs @@ -0,0 +1,115 @@ +use std::fmt::Debug; +use std::hash::Hash; + +use atomic::Atomic; + +use crate::util::{Address, ObjectReference}; + +/// An abstract edge. An edge holds an object reference. When we load from it, we get an +/// ObjectReference; we can also store an ObjectReference into it. +/// +/// This intends to abstract out the differences of reference field representation among different +/// VMs. If the VM represent a reference field as a word that holds the pointer to the object, it +/// can use the default `SimpleEdge` we provide. In some cases, the VM need to implement its own +/// `Edge` instances. +/// +/// For example: +/// - The VM uses compressed pointer (Compressed OOP in OpenJDK's terminology), where the heap +/// size is limited, and a 64-bit pointer is stored in a 32-bit slot. +/// - The VM uses tagged pointer, where some bits of a word are used as metadata while the rest +/// are used as pointer. +/// - A field holds a pointer to the middle of an object (an object field, or an array element, +/// or some arbitrary offset) for some reasons. +/// +/// When loading, `Edge::load` shall decode its internal representation to a "regular" +/// `ObjectReference` which is applicable to `ObjectModel::object_start_ref`. The implementation +/// can do this with any appropriate operations, usually shifting and masking bits or subtracting +/// offset from the address. By doing this conversion, MMTk can implement GC algorithms in a +/// VM-neutral way, knowing only `ObjectReference`. +/// +/// When GC moves object, `Edge::store` shall convert the updated `ObjectReference` back to the +/// edge-specific representation. Compressed pointers remain compressed; tagged pointers preserve +/// their tag bits; and offsetted pointers keep their offsets. +/// +/// The methods of this trait are called on hot paths. Please ensure they have high performance. +/// Use inlining when appropriate. +/// +/// Note: this trait only concerns the representation (i.e. the shape) of the edge, not its +/// semantics, such as whether it holds strong or weak references. If a VM holds a weak reference +/// in a word as a pointer, it can also use `SimpleEdge` for weak reference fields. +pub trait Edge: Copy + Send + Debug + PartialEq + Eq + Hash { + /// Load object reference from the edge. + fn load(&self) -> ObjectReference; + + /// Store the object reference `object` into the edge. + fn store(&self, object: ObjectReference); + + /// Prefetch the edge so that a subsequent `load` will be faster. + #[inline(always)] + fn prefetch_load(&self) { + // no-op by default + } + + /// Prefetch the edge so that a subsequent `store` will be faster. + #[inline(always)] + fn prefetch_store(&self) { + // no-op by default + } +} + +/// A simple edge implementation that represents a word-sized slot where an ObjectReference value +/// is stored as is. It is the default edge type, and should be suitable for most VMs. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct SimpleEdge { + slot_addr: *mut Atomic, +} + +impl SimpleEdge { + /// Create a simple edge from an address. + /// + /// Arguments: + /// * `address`: The address in memory where an `ObjectReference` is stored. + #[inline(always)] + pub fn from_address(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + } + } + + /// Get the address of the edge. + /// + /// Return the address at which the `ObjectReference` is stored. + #[inline(always)] + pub fn as_address(&self) -> Address { + Address::from_mut_ptr(self.slot_addr) + } +} + +unsafe impl Send for SimpleEdge {} + +impl Edge for SimpleEdge { + #[inline(always)] + fn load(&self) -> ObjectReference { + unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) } + } + + #[inline(always)] + fn store(&self, object: ObjectReference) { + unsafe { (*self.slot_addr).store(object, atomic::Ordering::Relaxed) } + } +} + +/// For backword compatibility. +/// We let Address implement Edge so existing bindings that use `Address` to represent an edge can +/// continue to work. +impl Edge for Address { + #[inline(always)] + fn load(&self) -> ObjectReference { + unsafe { Address::load(*self) } + } + + #[inline(always)] + fn store(&self, object: ObjectReference) { + unsafe { Address::store(*self, object) } + } +} diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 8b4d5935ca..5b9763e92c 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -19,6 +19,7 @@ use crate::util::constants::*; mod active_plan; mod collection; +pub mod edge_shape; mod object_model; mod reference_glue; mod scanning; @@ -45,6 +46,9 @@ where type VMActivePlan: ActivePlan; type VMReferenceGlue: ReferenceGlue; + /// The type of edges in this VM. + type VMEdge: edge_shape::Edge; + /// A value to fill in alignment gaps. This value can be used for debugging. const ALIGNMENT_VALUE: usize = 0xdead_beef; /// Allowed minimal alignment. diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 20431e0d81..4c55db608e 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -1,17 +1,18 @@ use crate::plan::Mutator; +use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::util::{Address, ObjectReference}; +use crate::vm::edge_shape::Edge; use crate::vm::VMBinding; /// Callback trait of scanning functions that report edges. -pub trait EdgeVisitor { +pub trait EdgeVisitor { /// Call this function for each edge. - fn visit_edge(&mut self, edge: Address); + fn visit_edge(&mut self, edge: ES); } /// This lets us use closures as EdgeVisitor. -impl EdgeVisitor for F { - fn visit_edge(&mut self, edge: Address) { +impl EdgeVisitor for F { + fn visit_edge(&mut self, edge: ES) { self(edge) } } @@ -47,14 +48,14 @@ impl ObjectReference> ObjectTracer for F { /// it needs `Send` to be sent between threads. `'static` means it must not have /// references to variables with limited lifetime (such as local variables), because /// it needs to be moved between threads. -pub trait RootsWorkFactory: Clone + Send + 'static { +pub trait RootsWorkFactory: Clone + Send + 'static { /// Create work packets to handle root edges. /// /// The work packet may update the edges. /// /// Arguments: /// * `edges`: A vector of edges. - fn create_process_edge_roots_work(&mut self, edges: Vec
); + fn create_process_edge_roots_work(&mut self, edges: Vec); /// Create work packets to handle nodes pointed by root edges. /// @@ -109,7 +110,7 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `edge_visitor`: Called back for each edge. - fn scan_object( + fn scan_object>( tls: VMWorkerThread, object: ObjectReference, edge_visitor: &mut EV, @@ -152,7 +153,7 @@ pub trait Scanning { /// Arguments: /// * `tls`: The GC thread that is performing this scanning. /// * `factory`: The VM uses it to create work packets for scanning roots. - fn scan_thread_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); + fn scan_thread_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); /// Scan one mutator for roots. /// @@ -163,7 +164,7 @@ pub trait Scanning { fn scan_thread_root( tls: VMWorkerThread, mutator: &'static mut Mutator, - factory: impl RootsWorkFactory, + factory: impl RootsWorkFactory, ); /// Scan VM-specific roots. The creation of all root scan tasks (except thread scanning) @@ -172,7 +173,7 @@ pub trait Scanning { /// Arguments: /// * `tls`: The GC thread that is performing this scanning. /// * `factory`: The VM uses it to create work packets for scanning roots. - fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); + fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); /// Return whether the VM supports return barriers. This is unused at the moment. fn supports_return_barrier() -> bool; diff --git a/tests/test_roots_work_factory.rs b/tests/test_roots_work_factory.rs index 1d2eabf0cc..15a95ca227 100644 --- a/tests/test_roots_work_factory.rs +++ b/tests/test_roots_work_factory.rs @@ -20,7 +20,7 @@ impl MockScanning { self.roots.extend(roots); } - fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory) { + fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory
) { factory.create_process_edge_roots_work(self.roots.clone()); } } @@ -41,7 +41,7 @@ struct MockFactory { a: Arc>, } -impl RootsWorkFactory for MockFactory { +impl RootsWorkFactory
for MockFactory { fn create_process_edge_roots_work(&mut self, edges: Vec
) { assert_eq!(edges, EDGES); match self.round { diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml index 963420d014..00f4541948 100644 --- a/vmbindings/dummyvm/Cargo.toml +++ b/vmbindings/dummyvm/Cargo.toml @@ -17,6 +17,7 @@ mmtk = { path = "../../", version = "*" } libc = "0.2" lazy_static = "1.1" atomic_refcell = "0.1.7" +atomic = "0.4.6" [features] default = [] diff --git a/vmbindings/dummyvm/src/edges.rs b/vmbindings/dummyvm/src/edges.rs new file mode 100644 index 0000000000..31bd2b815e --- /dev/null +++ b/vmbindings/dummyvm/src/edges.rs @@ -0,0 +1,157 @@ +use atomic::Atomic; +use mmtk::{ + util::{Address, ObjectReference}, + vm::edge_shape::{Edge, SimpleEdge}, +}; + +/// If a VM supports multiple kinds of edges, we can use tagged union to represent all of them. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum DummyVMEdge { + Simple(SimpleEdge), + #[cfg(target_pointer_width = "64")] + Compressed(only_64_bit::CompressedOopEdge), + Offset(OffsetEdge), + Value(ValueEdge), +} + +unsafe impl Send for DummyVMEdge {} + +impl Edge for DummyVMEdge { + fn load(&self) -> ObjectReference { + match self { + DummyVMEdge::Simple(e) => e.load(), + #[cfg(target_pointer_width = "64")] + DummyVMEdge::Compressed(e) => e.load(), + DummyVMEdge::Offset(e) => e.load(), + DummyVMEdge::Value(e) => e.load(), + } + } + + fn store(&self, object: ObjectReference) { + match self { + DummyVMEdge::Simple(e) => e.store(object), + #[cfg(target_pointer_width = "64")] + DummyVMEdge::Compressed(e) => e.store(object), + DummyVMEdge::Offset(e) => e.store(object), + DummyVMEdge::Value(e) => e.store(object), + } + } +} + +/// Compressed OOP edge only makes sense on 64-bit architectures. +#[cfg(target_pointer_width = "64")] +pub mod only_64_bit { + use super::*; + + /// This represents a location that holds a 32-bit pointer on a 64-bit machine. + /// + /// OpenJDK uses this kind of edge to store compressed OOPs on 64-bit machines. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + pub struct CompressedOopEdge { + slot_addr: *mut Atomic, + } + + unsafe impl Send for CompressedOopEdge {} + + impl CompressedOopEdge { + pub fn from_address(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + } + } + pub fn as_address(&self) -> Address { + Address::from_mut_ptr(self.slot_addr) + } + } + + impl Edge for CompressedOopEdge { + fn load(&self) -> ObjectReference { + let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let expanded = (compressed as usize) << 3; + unsafe { Address::from_usize(expanded).to_object_reference() } + } + + fn store(&self, object: ObjectReference) { + let expanded = object.to_address().as_usize(); + let compressed = (expanded >> 3) as u32; + unsafe { (*self.slot_addr).store(compressed, atomic::Ordering::Relaxed) } + } + } +} + +/// This represents an edge that holds a pointer to the *middle* of an object, and the offset is known. +/// +/// Julia uses this trick to facilitate deleting array elements from the front. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct OffsetEdge { + slot_addr: *mut Atomic
, + offset: usize, +} + +unsafe impl Send for OffsetEdge {} + +impl OffsetEdge { + pub fn new_no_offset(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + offset: 0, + } + } + + pub fn new_with_offset(address: Address, offset: usize) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + offset, + } + } + + pub fn slot_address(&self) -> Address { + Address::from_mut_ptr(self.slot_addr) + } + + pub fn offset(&self) -> usize { + self.offset + } +} + +impl Edge for OffsetEdge { + fn load(&self) -> ObjectReference { + let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let begin = middle - self.offset; + unsafe { begin.to_object_reference() } + } + + fn store(&self, object: ObjectReference) { + let begin = object.to_address(); + let middle = begin + self.offset; + unsafe { (*self.slot_addr).store(middle, atomic::Ordering::Relaxed) } + } +} + +/// This edge presents the object reference itself to mmtk-core. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct ValueEdge { + value: ObjectReference, +} + +unsafe impl Send for ValueEdge {} + +impl ValueEdge { + pub fn new(value: ObjectReference) -> Self { + Self { value } + } + + pub fn value(&self) -> ObjectReference { + self.value + } +} + +impl Edge for ValueEdge { + fn load(&self) -> ObjectReference { + self.value + } + + fn store(&self, _object: ObjectReference) { + // No-op. Value edges are immutable. + } +} diff --git a/vmbindings/dummyvm/src/lib.rs b/vmbindings/dummyvm/src/lib.rs index fb68fcd16e..da58262e82 100644 --- a/vmbindings/dummyvm/src/lib.rs +++ b/vmbindings/dummyvm/src/lib.rs @@ -16,6 +16,7 @@ pub mod api; #[cfg(test)] mod tests; +mod edges; #[derive(Default)] pub struct DummyVM; @@ -26,6 +27,7 @@ impl VMBinding for DummyVM { type VMCollection = collection::VMCollection; type VMActivePlan = active_plan::VMActivePlan; type VMReferenceGlue = reference_glue::VMReferenceGlue; + type VMEdge = edges::DummyVMEdge; /// Allowed maximum alignment as shift by min alignment. const MAX_ALIGNMENT_SHIFT: usize = 6_usize - Self::LOG_MIN_ALIGNMENT as usize; diff --git a/vmbindings/dummyvm/src/scanning.rs b/vmbindings/dummyvm/src/scanning.rs index 31697a5de3..c6e46f14e5 100644 --- a/vmbindings/dummyvm/src/scanning.rs +++ b/vmbindings/dummyvm/src/scanning.rs @@ -1,4 +1,5 @@ use crate::DummyVM; +use crate::edges::DummyVMEdge; use mmtk::util::opaque_pointer::*; use mmtk::util::ObjectReference; use mmtk::vm::EdgeVisitor; @@ -9,20 +10,20 @@ use mmtk::Mutator; pub struct VMScanning {} impl Scanning for VMScanning { - fn scan_thread_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { + fn scan_thread_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { unimplemented!() } fn scan_thread_root( _tls: VMWorkerThread, _mutator: &'static mut Mutator, - _factory: impl RootsWorkFactory, + _factory: impl RootsWorkFactory, ) { unimplemented!() } - fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { + fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { unimplemented!() } - fn scan_object( + fn scan_object>( _tls: VMWorkerThread, _object: ObjectReference, _edge_visitor: &mut EV, diff --git a/vmbindings/dummyvm/src/tests/edges_test.rs b/vmbindings/dummyvm/src/tests/edges_test.rs new file mode 100644 index 0000000000..ed66c6c387 --- /dev/null +++ b/vmbindings/dummyvm/src/tests/edges_test.rs @@ -0,0 +1,169 @@ +// GITHUB-CI: MMTK_PLAN=NoGC + +use atomic::{Atomic, Ordering}; +use mmtk::{ + util::{Address, ObjectReference}, + vm::edge_shape::{Edge, SimpleEdge}, +}; + +use crate::{ + edges::{DummyVMEdge, OffsetEdge, ValueEdge}, + tests::fixtures::{Fixture, TwoObjects}, +}; + +#[cfg(target_pointer_width = "64")] +use crate::edges::only_64_bit::CompressedOopEdge; + +lazy_static! { + static ref FIXTURE: Fixture = Fixture::new(); +} + +#[test] +pub fn load_simple() { + FIXTURE.with_fixture(|fixture| { + let mut slot: Atomic = Atomic::new(fixture.objref1); + + let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); + let objref = edge.load(); + + assert_eq!(objref, fixture.objref1); + }); +} + +#[test] +pub fn store_simple() { + FIXTURE.with_fixture(|fixture| { + let mut slot: Atomic = Atomic::new(fixture.objref1); + + let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); + edge.store(fixture.objref2); + assert_eq!(slot.load(Ordering::SeqCst), fixture.objref2); + + let objref = edge.load(); + assert_eq!(objref, fixture.objref2); + }); +} + +#[cfg(target_pointer_width = "64")] +mod only_64_bit { + use super::*; + + // Two 35-bit addresses aligned to 8 bytes (3 zeros in the lowest bits). + const COMPRESSABLE_ADDR1: usize = 0b101_10111011_11011111_01111110_11111000usize; + const COMPRESSABLE_ADDR2: usize = 0b110_11110111_01101010_11011101_11101000usize; + + #[test] + pub fn load_compressed() { + // Note: We cannot guarantee GC will allocate an object in the low address region. + // So we make up addresses just for testing the bit operations of compressed OOP edges. + let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; + let objref1 = unsafe { Address::from_usize(COMPRESSABLE_ADDR1).to_object_reference() }; + + let mut slot: Atomic = Atomic::new(compressed1); + + let edge = CompressedOopEdge::from_address(Address::from_ref(&mut slot)); + let objref = edge.load(); + + assert_eq!(objref, objref1); + } + + #[test] + pub fn store_compressed() { + // Note: We cannot guarantee GC will allocate an object in the low address region. + // So we make up addresses just for testing the bit operations of compressed OOP edges. + let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; + let compressed2 = (COMPRESSABLE_ADDR2 >> 3) as u32; + let objref2 = unsafe { Address::from_usize(COMPRESSABLE_ADDR2).to_object_reference() }; + + let mut slot: Atomic = Atomic::new(compressed1); + + let edge = CompressedOopEdge::from_address(Address::from_ref(&mut slot)); + edge.store(objref2); + assert_eq!(slot.load(Ordering::SeqCst), compressed2); + + let objref = edge.load(); + assert_eq!(objref, objref2); + } +} + +#[test] +pub fn load_offset() { + const OFFSET: usize = 48; + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_address(); + let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + + let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); + let objref = edge.load(); + + assert_eq!(objref, fixture.objref1); + }); +} + +#[test] +pub fn store_offset() { + const OFFSET: usize = 48; + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_address(); + let addr2 = fixture.objref2.to_address(); + let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + + let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); + edge.store(fixture.objref2); + assert_eq!(slot.load(Ordering::SeqCst), addr2 + OFFSET); + + let objref = edge.load(); + assert_eq!(objref, fixture.objref2); + }); +} + +#[test] +pub fn load_value() { + FIXTURE.with_fixture(|fixture| { + let edge = ValueEdge::new(fixture.objref1); + let objref = edge.load(); + + assert_eq!(objref, fixture.objref1); + }); +} + +#[test] +pub fn mixed() { + const OFFSET: usize = 48; + + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_address(); + let addr2 = fixture.objref2.to_address(); + + let mut slot1: Atomic = Atomic::new(fixture.objref1); + let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); + + let edge1 = SimpleEdge::from_address(Address::from_ref(&mut slot1)); + let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&mut slot3), OFFSET); + let edge4 = ValueEdge::new(fixture.objref1); + + let de1 = DummyVMEdge::Simple(edge1); + let de3 = DummyVMEdge::Offset(edge3); + let de4 = DummyVMEdge::Value(edge4); + + let edges = vec![de1, de3, de4]; + for (i, edge) in edges.iter().enumerate() { + let objref = edge.load(); + assert_eq!(objref, fixture.objref1, "Edge {} is not properly loaded", i); + } + + let mutable_edges = vec![de1, de3]; + for (i, edge) in mutable_edges.iter().enumerate() { + edge.store(fixture.objref2); + let objref = edge.load(); + assert_eq!( + objref, fixture.objref2, + "Edge {} is not properly loaded after store", + i + ); + } + + assert_eq!(slot1.load(Ordering::SeqCst), fixture.objref2); + assert_eq!(slot3.load(Ordering::SeqCst), addr2 + OFFSET); + }); +} diff --git a/vmbindings/dummyvm/src/tests/fixtures/mod.rs b/vmbindings/dummyvm/src/tests/fixtures/mod.rs index a433f231b7..4d70d3b242 100644 --- a/vmbindings/dummyvm/src/tests/fixtures/mod.rs +++ b/vmbindings/dummyvm/src/tests/fixtures/mod.rs @@ -109,3 +109,34 @@ impl FixtureContent for MMTKSingleton { } } } + +pub struct TwoObjects { + pub objref1: ObjectReference, + pub objref2: ObjectReference, +} + +impl FixtureContent for TwoObjects { + fn create() -> Self { + const MB: usize = 1024 * 1024; + // 1MB heap + mmtk_init(MB); + mmtk_initialize_collection(VMThread::UNINITIALIZED); + // Make sure GC does not run during test. + mmtk_disable_collection(); + let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); + + let size = 128; + let semantics = AllocationSemantics::Default; + + let addr = mmtk_alloc(handle, size, 8, 0, semantics); + assert!(!addr.is_zero()); + + let objref1 = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() }; + mmtk_post_alloc(handle, objref1, size, semantics); + + let objref2 = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() }; + mmtk_post_alloc(handle, objref2, size, semantics); + + TwoObjects { objref1, objref2 } + } +} diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs index 095ff11dc2..353156d856 100644 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ b/vmbindings/dummyvm/src/tests/mod.rs @@ -20,3 +20,4 @@ mod malloc_ms; mod conservatism; mod is_in_mmtk_spaces; mod fixtures; +mod edges_test; From 55ef8564553f1bce6804f619799933e6f9b730f1 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 26 Aug 2022 14:08:34 +0800 Subject: [PATCH 2/4] Comments and representation. More comments on why bindings should use `SimpleEdge` instead of `Address`. Add `#[repr(transparent)]` to `SimpleEdge`. --- src/vm/edge_shape.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/src/vm/edge_shape.rs b/src/vm/edge_shape.rs index d931475b1d..a68743285a 100644 --- a/src/vm/edge_shape.rs +++ b/src/vm/edge_shape.rs @@ -60,6 +60,7 @@ pub trait Edge: Copy + Send + Debug + PartialEq + Eq + Hash { /// A simple edge implementation that represents a word-sized slot where an ObjectReference value /// is stored as is. It is the default edge type, and should be suitable for most VMs. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[repr(transparent)] pub struct SimpleEdge { slot_addr: *mut Atomic, } @@ -99,9 +100,16 @@ impl Edge for SimpleEdge { } } -/// For backword compatibility. -/// We let Address implement Edge so existing bindings that use `Address` to represent an edge can -/// continue to work. +/// For backword compatibility, we let `Address` implement `Edge` so that existing bindings that +/// use `Address` to represent an edge can continue to work. +/// +/// However, we should use `SimpleEdge` directly instead of using `Address`. The purpose of the +/// `Address` type is to represent an address in memory. It is not directly related to fields +/// that hold references to other objects. Calling `load()` and `store()` on an `Address` does +/// not indicate how many bytes to load or store, or how to interpret those bytes. On the other +/// hand, `SimpleEdge` is all about how to access a field that holds a reference represented +/// simply as an `ObjectReference`. The intention and the semantics are clearer with +/// `SimpleEdge`. impl Edge for Address { #[inline(always)] fn load(&self) -> ObjectReference { @@ -113,3 +121,11 @@ impl Edge for Address { unsafe { Address::store(*self, object) } } } + +#[test] +fn a_simple_edge_should_have_the_same_size_as_a_pointer() { + assert_eq!( + std::mem::size_of::(), + std::mem::size_of::<*mut libc::c_void>() + ); +} From 58ac993d1ec768fa7d3d20451823ea9b048526a6 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 26 Aug 2022 15:14:58 +0800 Subject: [PATCH 3/4] Replace ValueEdge example with TaggedEdge --- vmbindings/dummyvm/src/edges.rs | 40 +++++++++------ vmbindings/dummyvm/src/object_model.rs | 5 +- vmbindings/dummyvm/src/tests/edges_test.rs | 57 +++++++++++++++++++--- 3 files changed, 74 insertions(+), 28 deletions(-) diff --git a/vmbindings/dummyvm/src/edges.rs b/vmbindings/dummyvm/src/edges.rs index 31bd2b815e..6bb6c5496d 100644 --- a/vmbindings/dummyvm/src/edges.rs +++ b/vmbindings/dummyvm/src/edges.rs @@ -11,7 +11,7 @@ pub enum DummyVMEdge { #[cfg(target_pointer_width = "64")] Compressed(only_64_bit::CompressedOopEdge), Offset(OffsetEdge), - Value(ValueEdge), + Tagged(TaggedEdge), } unsafe impl Send for DummyVMEdge {} @@ -23,7 +23,7 @@ impl Edge for DummyVMEdge { #[cfg(target_pointer_width = "64")] DummyVMEdge::Compressed(e) => e.load(), DummyVMEdge::Offset(e) => e.load(), - DummyVMEdge::Value(e) => e.load(), + DummyVMEdge::Tagged(e) => e.load(), } } @@ -33,7 +33,7 @@ impl Edge for DummyVMEdge { #[cfg(target_pointer_width = "64")] DummyVMEdge::Compressed(e) => e.store(object), DummyVMEdge::Offset(e) => e.store(object), - DummyVMEdge::Value(e) => e.store(object), + DummyVMEdge::Tagged(e) => e.store(object), } } } @@ -130,28 +130,36 @@ impl Edge for OffsetEdge { /// This edge presents the object reference itself to mmtk-core. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct ValueEdge { - value: ObjectReference, +pub struct TaggedEdge { + slot_addr: *mut Atomic, } -unsafe impl Send for ValueEdge {} +unsafe impl Send for TaggedEdge {} -impl ValueEdge { - pub fn new(value: ObjectReference) -> Self { - Self { value } - } +impl TaggedEdge { + // The DummyVM has OBJECT_REF_OFFSET = 4. + // Using a two-bit tag should be safe on both 32-bit and 64-bit platforms. + const TAG_BITS_MASK: usize = 0b11; - pub fn value(&self) -> ObjectReference { - self.value + #[inline(always)] + pub fn new(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + } } } -impl Edge for ValueEdge { +impl Edge for TaggedEdge { fn load(&self) -> ObjectReference { - self.value + let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let untagged = tagged & !Self::TAG_BITS_MASK; + unsafe { Address::from_usize(untagged).to_object_reference() } } - fn store(&self, _object: ObjectReference) { - // No-op. Value edges are immutable. + fn store(&self, object: ObjectReference) { + let old_tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let new_untagged = object.to_address().as_usize(); + let new_tagged = new_untagged | (old_tagged & Self::TAG_BITS_MASK); + unsafe { (*self.slot_addr).store(new_tagged, atomic::Ordering::Relaxed) } } } diff --git a/vmbindings/dummyvm/src/object_model.rs b/vmbindings/dummyvm/src/object_model.rs index 55950dcda8..c9db0be0b9 100644 --- a/vmbindings/dummyvm/src/object_model.rs +++ b/vmbindings/dummyvm/src/object_model.rs @@ -9,10 +9,7 @@ pub struct VMObjectModel {} // This is intentionally set to a non-zero value to see if it breaks. // Change this if you want to test other values. -#[cfg(target_pointer_width = "64")] -pub const OBJECT_REF_OFFSET: usize = 6; -#[cfg(target_pointer_width = "32")] -pub const OBJECT_REF_OFFSET: usize = 2; +pub const OBJECT_REF_OFFSET: usize = 4; impl ObjectModel for VMObjectModel { const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); diff --git a/vmbindings/dummyvm/src/tests/edges_test.rs b/vmbindings/dummyvm/src/tests/edges_test.rs index ed66c6c387..00da47292f 100644 --- a/vmbindings/dummyvm/src/tests/edges_test.rs +++ b/vmbindings/dummyvm/src/tests/edges_test.rs @@ -7,7 +7,7 @@ use mmtk::{ }; use crate::{ - edges::{DummyVMEdge, OffsetEdge, ValueEdge}, + edges::{DummyVMEdge, OffsetEdge, TaggedEdge}, tests::fixtures::{Fixture, TwoObjects}, }; @@ -117,13 +117,53 @@ pub fn store_offset() { }); } +const TAG1: usize = 0b01; +const TAG2: usize = 0b10; + #[test] -pub fn load_value() { +pub fn load_tagged() { FIXTURE.with_fixture(|fixture| { - let edge = ValueEdge::new(fixture.objref1); - let objref = edge.load(); + let mut slot1: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG1); + let mut slot2: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG2); - assert_eq!(objref, fixture.objref1); + let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); + let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); + let objref1 = edge1.load(); + let objref2 = edge2.load(); + + // Tags should not affect loaded values. + assert_eq!(objref1, fixture.objref1); + assert_eq!(objref2, fixture.objref1); + }); +} + +#[test] +pub fn store_tagged() { + FIXTURE.with_fixture(|fixture| { + let mut slot1: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG1); + let mut slot2: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG2); + + let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); + let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); + edge1.store(fixture.objref2); + edge2.store(fixture.objref2); + + // Tags should be preserved. + assert_eq!( + slot1.load(Ordering::SeqCst), + fixture.objref2.to_address().as_usize() | TAG1 + ); + assert_eq!( + slot2.load(Ordering::SeqCst), + fixture.objref2.to_address().as_usize() | TAG2 + ); + + let objref1 = edge1.load(); + let objref2 = edge2.load(); + + // Tags should not affect loaded values. + assert_eq!(objref1, fixture.objref2); + assert_eq!(objref2, fixture.objref2); }); } @@ -137,14 +177,15 @@ pub fn mixed() { let mut slot1: Atomic = Atomic::new(fixture.objref1); let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); + let mut slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); let edge1 = SimpleEdge::from_address(Address::from_ref(&mut slot1)); let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&mut slot3), OFFSET); - let edge4 = ValueEdge::new(fixture.objref1); + let edge4 = TaggedEdge::new(Address::from_ref(&mut slot4)); let de1 = DummyVMEdge::Simple(edge1); let de3 = DummyVMEdge::Offset(edge3); - let de4 = DummyVMEdge::Value(edge4); + let de4 = DummyVMEdge::Tagged(edge4); let edges = vec![de1, de3, de4]; for (i, edge) in edges.iter().enumerate() { @@ -152,7 +193,7 @@ pub fn mixed() { assert_eq!(objref, fixture.objref1, "Edge {} is not properly loaded", i); } - let mutable_edges = vec![de1, de3]; + let mutable_edges = vec![de1, de3, de4]; for (i, edge) in mutable_edges.iter().enumerate() { edge.store(fixture.objref2); let objref = edge.load(); From 9af3a936f4bff303343b669212010dc5292e874e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 26 Aug 2022 17:57:21 +0800 Subject: [PATCH 4/4] Fix test for offset change. --- vmbindings/dummyvm/src/tests/conservatism.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index 4e6d361d2b..d66de2d6fa 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -13,7 +13,7 @@ lazy_static! { } fn basic_filter(addr: Address) -> bool { - !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == OBJECT_REF_OFFSET + !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % ALLOC_BIT_REGION_SIZE) } fn assert_filter_pass(addr: Address) {