diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/Monitor.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/Monitor.CoreCLR.cs
index 9fd823ba175708..ab3e7ad42504c1 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Threading/Monitor.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/Monitor.CoreCLR.cs
@@ -183,10 +183,12 @@ public static void PulseAll(object obj)
ObjPulseAll(obj);
}
+#pragma warning disable CA2252 // Opt in to preview features before using them (Lock)
///
/// Gets the number of times there was contention upon trying to take a 's lock so far.
///
- public static long LockContentionCount => GetLockContentionCount();
+ public static long LockContentionCount => GetLockContentionCount() + Lock.ContentionCount;
+#pragma warning restore CA2252
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "ObjectNative_GetMonitorLockContentionCount")]
private static partial long GetLockContentionCount();
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
index 237895a8c0711b..73c81ddf45797c 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
@@ -79,7 +79,7 @@ protected ConcurrentUnifier()
public V GetOrAdd(K key)
{
Debug.Assert(key != null);
- Debug.Assert(!_lock.IsAcquired, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
+ Debug.Assert(!_lock.IsHeldByCurrentThread, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
int hashCode = key.GetHashCode();
V value;
@@ -89,7 +89,7 @@ public V GetOrAdd(K key)
V checkedValue;
bool checkedFound;
// In debug builds, always exercise a locked TryGet (this is a good way to detect deadlock/reentrancy through Equals/GetHashCode()).
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
_container.VerifyUnifierConsistency();
int h = key.GetHashCode();
@@ -110,7 +110,7 @@ public V GetOrAdd(K key)
value = this.Factory(key);
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
V heyIWasHereFirst;
if (_container.TryGetValue(key, hashCode, out heyIWasHereFirst))
@@ -171,7 +171,7 @@ public bool TryGetValue(K key, int hashCode, out V value)
public void Add(K key, int hashCode, V value)
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
int bucket = ComputeBucket(hashCode, _buckets.Length);
@@ -194,14 +194,14 @@ public bool HasCapacity
{
get
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
return _nextFreeEntry != _entries.Length;
}
}
public void Resize()
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
int newSize = HashHelpers.GetPrime(_buckets.Length * 2);
#if DEBUG
@@ -257,7 +257,7 @@ public void VerifyUnifierConsistency()
if (_nextFreeEntry >= 5000 && (0 != (_nextFreeEntry % 100)))
return;
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
Debug.Assert(_nextFreeEntry >= 0 && _nextFreeEntry <= _entries.Length);
int numEntriesEncountered = 0;
for (int bucket = 0; bucket < _buckets.Length; bucket++)
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
index 049ce1ee078ab2..321419e4aa5c0c 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
@@ -89,7 +89,7 @@ protected ConcurrentUnifierW()
public V GetOrAdd(K key)
{
Debug.Assert(key != null);
- Debug.Assert(!_lock.IsAcquired, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
+ Debug.Assert(!_lock.IsHeldByCurrentThread, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
int hashCode = key.GetHashCode();
V? value;
@@ -99,7 +99,7 @@ public V GetOrAdd(K key)
V? checkedValue;
bool checkedFound;
// In debug builds, always exercise a locked TryGet (this is a good way to detect deadlock/reentrancy through Equals/GetHashCode()).
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
_container.VerifyUnifierConsistency();
int h = key.GetHashCode();
@@ -137,7 +137,7 @@ public V GetOrAdd(K key)
return null;
}
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
V? heyIWasHereFirst;
if (_container.TryGetValue(key, hashCode, out heyIWasHereFirst))
@@ -201,7 +201,7 @@ public bool TryGetValue(K key, int hashCode, out V? value)
public void Add(K key, int hashCode, V value)
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
int bucket = ComputeBucket(hashCode, _buckets.Length);
@@ -251,14 +251,14 @@ public bool HasCapacity
{
get
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
return _nextFreeEntry != _entries.Length;
}
}
public void Resize()
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
// Before we actually grow the size of the table, figure out how much we can recover just by dropping entries with
// expired weak references.
@@ -341,7 +341,7 @@ public void VerifyUnifierConsistency()
if (_nextFreeEntry >= 5000 || (0 != (_nextFreeEntry % 100)))
return;
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
Debug.Assert(_nextFreeEntry >= 0 && _nextFreeEntry <= _entries.Length);
int numEntriesEncountered = 0;
for (int bucket = 0; bucket < _buckets.Length; bucket++)
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
index 48708eac12a982..22c0fb5f680527 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
@@ -102,7 +102,7 @@ protected ConcurrentUnifierWKeyed()
public V GetOrAdd(K key)
{
Debug.Assert(key != null);
- Debug.Assert(!_lock.IsAcquired, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
+ Debug.Assert(!_lock.IsHeldByCurrentThread, "GetOrAdd called while lock already acquired. A possible cause of this is an Equals or GetHashCode method that causes reentrancy in the table.");
int hashCode = key.GetHashCode();
V value;
@@ -112,7 +112,7 @@ public V GetOrAdd(K key)
V checkedValue;
bool checkedFound;
// In debug builds, always exercise a locked TryGet (this is a good way to detect deadlock/reentrancy through Equals/GetHashCode()).
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
_container.VerifyUnifierConsistency();
int h = key.GetHashCode();
@@ -154,7 +154,7 @@ public V GetOrAdd(K key)
// it needs to produce the key quickly and in a deadlock-free manner once we're inside the lock.
value.PrepareKey();
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
V heyIWasHereFirst;
if (_container.TryGetValue(key, hashCode, out heyIWasHereFirst))
@@ -220,7 +220,7 @@ public bool TryGetValue(K key, int hashCode, out V value)
public void Add(int hashCode, V value)
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
int bucket = ComputeBucket(hashCode, _buckets.Length);
int newEntryIdx = _nextFreeEntry;
@@ -241,14 +241,14 @@ public bool HasCapacity
{
get
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
return _nextFreeEntry != _entries.Length;
}
}
public void Resize()
{
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
// Before we actually grow the size of the table, figure out how much we can recover just by dropping entries with
// expired weak references.
@@ -330,7 +330,7 @@ public void VerifyUnifierConsistency()
if (_nextFreeEntry >= 5000 && (0 != (_nextFreeEntry % 100)))
return;
- Debug.Assert(_owner._lock.IsAcquired);
+ Debug.Assert(_owner._lock.IsHeldByCurrentThread);
Debug.Assert(_nextFreeEntry >= 0 && _nextFreeEntry <= _entries.Length);
int numEntriesEncountered = 0;
for (int bucket = 0; bucket < _buckets.Length; bucket++)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml b/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
index 2347f0973c539d..50058746b67b33 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
@@ -929,14 +929,6 @@
CP0001T:System.Threading.Condition
-
- CP0001
- T:System.Threading.Lock
-
-
- CP0001
- T:System.Threading.LockHolder
- CP0002M:System.ModuleHandle.#ctor(System.Reflection.Module)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/SynchronizedMethodHelpers.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/SynchronizedMethodHelpers.cs
index 4f4db6916b511c..56701f033500b1 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/SynchronizedMethodHelpers.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/SynchronizedMethodHelpers.cs
@@ -14,7 +14,8 @@ internal static class SynchronizedMethodHelpers
private static void MonitorEnter(object obj, ref bool lockTaken)
{
// Inlined Monitor.Enter with a few tweaks
- int resultOrIndex = ObjectHeader.Acquire(obj);
+ int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
+ int resultOrIndex = ObjectHeader.Acquire(obj, currentThreadID);
if (resultOrIndex < 0)
{
lockTaken = true;
@@ -25,7 +26,7 @@ private static void MonitorEnter(object obj, ref bool lockTaken)
ObjectHeader.GetLockObject(obj) :
SyncTable.GetLockObject(resultOrIndex);
- Monitor.TryAcquireSlow(lck, obj, Timeout.Infinite);
+ lck.TryEnterSlow(Timeout.Infinite, currentThreadID, obj);
lockTaken = true;
}
private static void MonitorExit(object obj, ref bool lockTaken)
@@ -42,7 +43,8 @@ private static unsafe void MonitorEnterStatic(MethodTable* pMT, ref bool lockTak
{
// Inlined Monitor.Enter with a few tweaks
object obj = GetStaticLockObject(pMT);
- int resultOrIndex = ObjectHeader.Acquire(obj);
+ int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
+ int resultOrIndex = ObjectHeader.Acquire(obj, currentThreadID);
if (resultOrIndex < 0)
{
lockTaken = true;
@@ -53,7 +55,7 @@ private static unsafe void MonitorEnterStatic(MethodTable* pMT, ref bool lockTak
ObjectHeader.GetLockObject(obj) :
SyncTable.GetLockObject(resultOrIndex);
- Monitor.TryAcquireSlow(lck, obj, Timeout.Infinite);
+ lck.TryEnterSlow(Timeout.Infinite, currentThreadID, obj);
lockTaken = true;
}
private static unsafe void MonitorExitStatic(MethodTable* pMT, ref bool lockTaken)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
index a007a3ed7fbba5..12ae4d8a6b0c45 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
@@ -3,6 +3,8 @@
true$(NoWarn);AD0001
+
+ true
@@ -231,10 +233,9 @@
-
+
-
@@ -305,9 +306,6 @@
Interop\Unix\System.Native\Interop.Exit.cs
-
- Interop\Unix\System.Native\Interop.Threading.cs
-
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
index 8093c0f71ff147..7e8293e1a1653f 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
@@ -111,7 +111,7 @@ public static unsafe void EnsureClassConstructorRun(StaticClassConstructionConte
cctors[cctorIndex].HoldingThread = ManagedThreadIdNone;
NoisyLog("Releasing cctor lock, context={0}, thread={1}", pContext, currentManagedThreadId);
- cctorLock.Release();
+ cctorLock.Exit();
}
}
else
@@ -142,10 +142,10 @@ private static unsafe bool DeadlockAwareAcquire(CctorHandle cctor, StaticClassCo
int cctorIndex = cctor.Index;
Cctor[] cctors = cctor.Array;
Lock lck = cctors[cctorIndex].Lock;
- if (lck.IsAcquired)
+ if (lck.IsHeldByCurrentThread)
return false; // Thread recursively triggered the same cctor.
- if (lck.TryAcquire(waitIntervalInMS))
+ if (lck.TryEnter(waitIntervalInMS))
return true;
// We couldn't acquire the lock. See if this .cctor is involved in a cross-thread deadlock. If so, break
@@ -164,7 +164,7 @@ private static unsafe bool DeadlockAwareAcquire(CctorHandle cctor, StaticClassCo
// deadlock themselves, then that's a bug in user code.
for (;;)
{
- using (LockHolder.Hold(s_cctorGlobalLock))
+ using (s_cctorGlobalLock.EnterScope())
{
// Ask the guy who holds the cctor lock we're trying to acquire who he's waiting for. Keep
// walking down that chain until we either discover a cycle or reach a non-blocking state. Note
@@ -233,7 +233,7 @@ private static unsafe bool DeadlockAwareAcquire(CctorHandle cctor, StaticClassCo
waitIntervalInMS *= 2;
// We didn't find a cycle yet, try to take the lock again.
- if (lck.TryAcquire(waitIntervalInMS))
+ if (lck.TryEnter(waitIntervalInMS))
return true;
} // infinite loop
}
@@ -283,7 +283,7 @@ public static CctorHandle GetCctor(StaticClassConstructionContext* pContext)
}
#endif // TARGET_WASM
- using (LockHolder.Hold(s_cctorGlobalLock))
+ using (s_cctorGlobalLock.EnterScope())
{
Cctor[]? resultArray = null;
int resultIndex = -1;
@@ -355,14 +355,14 @@ public static int Count
{
get
{
- Debug.Assert(s_cctorGlobalLock.IsAcquired);
+ Debug.Assert(s_cctorGlobalLock.IsHeldByCurrentThread);
return s_count;
}
}
public static void Release(CctorHandle cctor)
{
- using (LockHolder.Hold(s_cctorGlobalLock))
+ using (s_cctorGlobalLock.EnterScope())
{
Cctor[] cctors = cctor.Array;
int cctorIndex = cctor.Index;
@@ -419,7 +419,7 @@ public static int MarkThreadAsBlocked(int managedThreadId, CctorHandle blockedOn
#else
const int Grow = 10;
#endif
- using (LockHolder.Hold(s_cctorGlobalLock))
+ using (s_cctorGlobalLock.EnterScope())
{
s_blockingRecords ??= new BlockingRecord[Grow];
int found;
@@ -450,14 +450,14 @@ public static int MarkThreadAsBlocked(int managedThreadId, CctorHandle blockedOn
public static void UnmarkThreadAsBlocked(int blockRecordIndex)
{
// This method must never throw
- s_cctorGlobalLock.Acquire();
+ s_cctorGlobalLock.Enter();
s_blockingRecords[blockRecordIndex].BlockedOn = new CctorHandle(null, 0);
- s_cctorGlobalLock.Release();
+ s_cctorGlobalLock.Exit();
}
public static CctorHandle GetCctorThatThreadIsBlockedOn(int managedThreadId)
{
- Debug.Assert(s_cctorGlobalLock.IsAcquired);
+ Debug.Assert(s_cctorGlobalLock.IsHeldByCurrentThread);
for (int i = 0; i < s_nextBlockingRecordIndex; i++)
{
if (s_blockingRecords[i].ManagedThreadId == managedThreadId)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
index e776860562ca49..0fc6330a1c88e4 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
@@ -951,7 +951,7 @@ private unsafe bool TryGetOrCreateObjectForComInstanceInternal(
if (!flags.HasFlag(CreateObjectFlags.UniqueInstance))
{
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
if (_rcwCache.TryGetValue(identity, out GCHandle handle))
{
@@ -1047,7 +1047,7 @@ private unsafe bool TryGetOrCreateObjectForComInstanceInternal(
return true;
}
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
object? cachedWrapper = null;
if (_rcwCache.TryGetValue(identity, out var existingHandle))
@@ -1092,7 +1092,7 @@ private unsafe bool TryGetOrCreateObjectForComInstanceInternal(
private void RemoveRCWFromCache(IntPtr comPointer, GCHandle expectedValue)
{
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
// TryGetOrCreateObjectForComInstanceInternal may have put a new entry into the cache
// in the time between the GC cleared the contents of the GC handle but before the
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
index a567debf03708e..c8fdbb60384348 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
@@ -56,7 +56,7 @@ private unsafe void AssertIsNotInList(Waiter waiter)
private unsafe void AddWaiter(Waiter waiter)
{
- Debug.Assert(_lock.IsAcquired);
+ Debug.Assert(_lock.IsHeldByCurrentThread);
AssertIsNotInList(waiter);
waiter.prev = _waitersTail;
@@ -70,7 +70,7 @@ private unsafe void AddWaiter(Waiter waiter)
private unsafe void RemoveWaiter(Waiter waiter)
{
- Debug.Assert(_lock.IsAcquired);
+ Debug.Assert(_lock.IsHeldByCurrentThread);
AssertIsInList(waiter);
if (waiter.next != null)
@@ -101,13 +101,13 @@ public unsafe bool Wait(int millisecondsTimeout)
{
ArgumentOutOfRangeException.ThrowIfLessThan(millisecondsTimeout, -1);
- if (!_lock.IsAcquired)
+ if (!_lock.IsHeldByCurrentThread)
throw new SynchronizationLockException();
Waiter waiter = GetWaiterForCurrentThread();
AddWaiter(waiter);
- uint recursionCount = _lock.ReleaseAll();
+ uint recursionCount = _lock.ExitAll();
bool success = false;
try
{
@@ -115,8 +115,8 @@ public unsafe bool Wait(int millisecondsTimeout)
}
finally
{
- _lock.Reacquire(recursionCount);
- Debug.Assert(_lock.IsAcquired);
+ _lock.Reenter(recursionCount);
+ Debug.Assert(_lock.IsHeldByCurrentThread);
if (!waiter.signalled)
{
@@ -140,7 +140,7 @@ public unsafe bool Wait(int millisecondsTimeout)
public unsafe void SignalAll()
{
- if (!_lock.IsAcquired)
+ if (!_lock.IsHeldByCurrentThread)
throw new SynchronizationLockException();
while (_waitersHead != null)
@@ -149,7 +149,7 @@ public unsafe void SignalAll()
public unsafe void SignalOne()
{
- if (!_lock.IsAcquired)
+ if (!_lock.IsHeldByCurrentThread)
throw new SynchronizationLockException();
Waiter? waiter = _waitersHead;
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
new file mode 100644
index 00000000000000..690014f91691fa
--- /dev/null
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
@@ -0,0 +1,229 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using System.Diagnostics.Tracing;
+using System.Runtime.CompilerServices;
+
+namespace System.Threading
+{
+ public sealed partial class Lock
+ {
+ private const short SpinCountNotInitialized = short.MinValue;
+
+ // NOTE: Lock must not have a static (class) constructor, as Lock itself is used to synchronize
+ // class construction. If Lock has its own class constructor, this can lead to infinite recursion.
+ // All static data in Lock must be lazy-initialized.
+ private static int s_staticsInitializationStage;
+ private static bool s_isSingleProcessor;
+ private static short s_maxSpinCount;
+ private static short s_minSpinCount;
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ public Lock() => _spinCount = SpinCountNotInitialized;
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal bool TryEnterOneShot(int currentManagedThreadId)
+ {
+ Debug.Assert(currentManagedThreadId != 0);
+
+ if (State.TryLock(this))
+ {
+ Debug.Assert(_owningThreadId == 0);
+ Debug.Assert(_recursionCount == 0);
+ _owningThreadId = (uint)currentManagedThreadId;
+ return true;
+ }
+
+ return false;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal void Exit(int currentManagedThreadId)
+ {
+ Debug.Assert(currentManagedThreadId != 0);
+
+ if (_owningThreadId != (uint)currentManagedThreadId)
+ {
+ ThrowHelper.ThrowSynchronizationLockException_LockExit();
+ }
+
+ ExitImpl();
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private ThreadId TryEnterSlow(int timeoutMs, ThreadId currentThreadId) =>
+ TryEnterSlow(timeoutMs, currentThreadId, this);
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal bool TryEnterSlow(int timeoutMs, int currentManagedThreadId, object associatedObject) =>
+ TryEnterSlow(timeoutMs, new ThreadId((uint)currentManagedThreadId), associatedObject).IsInitialized;
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal bool GetIsHeldByCurrentThread(int currentManagedThreadId)
+ {
+ Debug.Assert(currentManagedThreadId != 0);
+
+ bool isHeld = _owningThreadId == (uint)currentManagedThreadId;
+ Debug.Assert(!isHeld || new State(this).IsLocked);
+ return isHeld;
+ }
+
+ internal uint ExitAll()
+ {
+ Debug.Assert(IsHeldByCurrentThread);
+
+ uint recursionCount = _recursionCount;
+ _owningThreadId = 0;
+ _recursionCount = 0;
+
+ State state = State.Unlock(this);
+ if (state.HasAnyWaiters)
+ {
+ SignalWaiterIfNecessary(state);
+ }
+
+ return recursionCount;
+ }
+
+ internal void Reenter(uint previousRecursionCount)
+ {
+ Debug.Assert(!IsHeldByCurrentThread);
+
+ Enter();
+ _recursionCount = previousRecursionCount;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private TryLockResult LazyInitializeOrEnter()
+ {
+ StaticsInitializationStage stage = (StaticsInitializationStage)Volatile.Read(ref s_staticsInitializationStage);
+ switch (stage)
+ {
+ case StaticsInitializationStage.Complete:
+ if (_spinCount == SpinCountNotInitialized)
+ {
+ _spinCount = s_maxSpinCount;
+ }
+ return TryLockResult.Spin;
+
+ case StaticsInitializationStage.Started:
+ // Spin-wait until initialization is complete or the lock is acquired to prevent class construction cycles
+ // later during a full wait
+ bool sleep = true;
+ while (true)
+ {
+ if (sleep)
+ {
+ Thread.UninterruptibleSleep0();
+ }
+ else
+ {
+ Thread.SpinWait(1);
+ }
+
+ stage = (StaticsInitializationStage)Volatile.Read(ref s_staticsInitializationStage);
+ if (stage == StaticsInitializationStage.Complete)
+ {
+ goto case StaticsInitializationStage.Complete;
+ }
+ else if (stage == StaticsInitializationStage.NotStarted)
+ {
+ goto default;
+ }
+
+ if (State.TryLock(this))
+ {
+ return TryLockResult.Locked;
+ }
+
+ sleep = !sleep;
+ }
+
+ default:
+ Debug.Assert(stage == StaticsInitializationStage.NotStarted);
+ if (TryInitializeStatics())
+ {
+ goto case StaticsInitializationStage.Complete;
+ }
+ goto case StaticsInitializationStage.Started;
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static bool TryInitializeStatics()
+ {
+ // Since Lock is used to synchronize class construction, and some of the statics initialization may involve class
+ // construction, update the stage first to avoid infinite recursion
+ switch (
+ (StaticsInitializationStage)
+ Interlocked.CompareExchange(
+ ref s_staticsInitializationStage,
+ (int)StaticsInitializationStage.Started,
+ (int)StaticsInitializationStage.NotStarted))
+ {
+ case StaticsInitializationStage.Started:
+ return false;
+ case StaticsInitializationStage.Complete:
+ return true;
+ }
+
+ try
+ {
+ s_isSingleProcessor = Environment.IsSingleProcessor;
+ s_maxSpinCount = DetermineMaxSpinCount();
+ s_minSpinCount = DetermineMinSpinCount();
+
+ // Also initialize some types that are used later to prevent potential class construction cycles
+ NativeRuntimeEventSource.Log.IsEnabled();
+ }
+ catch
+ {
+ s_staticsInitializationStage = (int)StaticsInitializationStage.NotStarted;
+ throw;
+ }
+
+ Volatile.Write(ref s_staticsInitializationStage, (int)StaticsInitializationStage.Complete);
+ return true;
+ }
+
+ // Returns false until the static variable is lazy-initialized
+ internal static bool IsSingleProcessor => s_isSingleProcessor;
+
+ // Used to transfer the state when inflating thin locks
+ internal void InitializeLocked(int managedThreadId, uint recursionCount)
+ {
+ Debug.Assert(recursionCount == 0 || managedThreadId != 0);
+
+ _state = managedThreadId == 0 ? State.InitialStateValue : State.LockedStateValue;
+ _owningThreadId = (uint)managedThreadId;
+ _recursionCount = recursionCount;
+ }
+
+ internal struct ThreadId
+ {
+ private uint _id;
+
+ public ThreadId(uint id) => _id = id;
+ public uint Id => _id;
+ public bool IsInitialized => _id != 0;
+ public static ThreadId Current_NoInitialize => new ThreadId((uint)ManagedThreadId.CurrentManagedThreadIdUnchecked);
+
+ public void InitializeForCurrentThread()
+ {
+ Debug.Assert(!IsInitialized);
+ _id = (uint)ManagedThreadId.Current;
+ Debug.Assert(IsInitialized);
+ }
+ }
+
+ private enum StaticsInitializationStage
+ {
+ NotStarted,
+ Started,
+ Complete
+ }
+ }
+}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.cs
deleted file mode 100644
index 30bc946ad42546..00000000000000
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.cs
+++ /dev/null
@@ -1,544 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Diagnostics;
-using System.IO;
-using System.Runtime;
-using System.Runtime.CompilerServices;
-
-namespace System.Threading
-{
- public sealed class Lock : IDisposable
- {
- //
- // This lock is a hybrid spinning/blocking lock with dynamically adjusted spinning.
- // On a multiprocessor machine an acquiring thread will try to acquire multiple times
- // before going to sleep. The amount of spinning is dynamically adjusted based on past
- // history of the lock and will stay in the following range.
- //
- // We use doubling-up delays with a cap while spinning (1,2,4,8,16,32,64,64,64,64, ...)
- // Thus 20 iterations is about 1000 speenwaits (20-50 ns each)
- // Context switch costs may vary and typically in 2-20 usec range
- // Even if we are the only thread trying to acquire the lock at 20-50 usec the cost of being
- // blocked+awaken may not be more than 2x of what we have already spent, so that is the max CPU time
- // that we will allow to burn while spinning.
- //
- // This may not be always optimal, but should be close enough.
- // I.E. in a system consisting of exactly 2 threads, unlimited spinning may work better, but we
- // will not optimize specifically for that.
- private const ushort MaxSpinLimit = 20;
- private const ushort MinSpinLimit = 3;
- private const ushort SpinningNotInitialized = MaxSpinLimit + 1;
- private const ushort SpinningDisabled = 0;
-
- //
- // We will use exponential backoff in rare cases when we need to change state atomically and cannot
- // make progress due to concurrent state changes by other threads.
- // While we cannot know the ideal amount of wait needed before making a successfull attempt,
- // the exponential backoff will generally be not more than 2X worse than the perfect guess and
- // will do a lot less attempts than an simple retry. On multiprocessor machine fruitless attempts
- // will cause unnecessary sharing of the contended state which may make modifying the state more expensive.
- // To protect against degenerate cases we will cap the per-iteration wait to 1024 spinwaits.
- //
- private const uint MaxExponentialBackoffBits = 10;
-
- //
- // This lock is unfair and permits acquiring a contended lock by a nonwaiter in the presence of waiters.
- // It is possible for one thread to keep holding the lock long enough that waiters go to sleep and
- // then release and reacquire fast enough that waiters have no chance to get the lock.
- // In extreme cases one thread could keep retaking the lock starving everybody else.
- // If we see woken waiters not able to take the lock for too long we will ask nonwaiters to wait.
- //
- private const uint WaiterWatchdogTicks = 100;
-
- //
- // NOTE: Lock must not have a static (class) constructor, as Lock itself is used to synchronize
- // class construction. If Lock has its own class constructor, this can lead to infinite recursion.
- // All static data in Lock must be lazy-initialized.
- //
- internal static int s_processorCount;
-
- //
- // m_state layout:
- //
- // bit 0: True if the lock is held, false otherwise.
- //
- // bit 1: True if we've set the event to wake a waiting thread. The waiter resets this to false when it
- // wakes up. This avoids the overhead of setting the event multiple times.
- //
- // bit 2: True if nonwaiters must not get ahead of waiters when acquiring a contended lock.
- //
- // everything else: A count of the number of threads waiting on the event.
- //
- private const int Uncontended = 0;
- private const int Locked = 1;
- private const int WaiterWoken = 2;
- private const int YieldToWaiters = 4;
- private const int WaiterCountIncrement = 8;
-
- // state of the lock
- private AutoResetEvent? _lazyEvent;
- private int _owningThreadId;
- private uint _recursionCount;
- private int _state;
- private ushort _spinLimit = SpinningNotInitialized;
- private short _wakeWatchDog;
-
- // used to transfer the state when inflating thin locks
- internal void InitializeLocked(int threadId, int recursionCount)
- {
- Debug.Assert(recursionCount == 0 || threadId != 0);
-
- _state = threadId == 0 ? Uncontended : Locked;
- _owningThreadId = threadId;
- _recursionCount = (uint)recursionCount;
- }
-
- private AutoResetEvent Event
- {
- get
- {
- if (_lazyEvent == null)
- Interlocked.CompareExchange(ref _lazyEvent, new AutoResetEvent(false), null);
-
- return _lazyEvent;
- }
- }
-
- public void Dispose()
- {
- _lazyEvent?.Dispose();
- }
-
- private static int CurrentThreadId => Environment.CurrentManagedThreadId;
-
- [MethodImpl(MethodImplOptions.NoInlining)]
- public void Acquire()
- {
- int currentThreadId = CurrentThreadId;
- if (TryAcquireOneShot(currentThreadId))
- return;
-
- //
- // Fall back to the slow path for contention
- //
- bool success = TryAcquireSlow(currentThreadId, Timeout.Infinite);
- Debug.Assert(success);
- }
-
- public bool TryAcquire(TimeSpan timeout)
- {
- return TryAcquire(WaitHandle.ToTimeoutMilliseconds(timeout));
- }
-
- public bool TryAcquire(int millisecondsTimeout)
- {
- ArgumentOutOfRangeException.ThrowIfLessThan(millisecondsTimeout, -1);
-
- int currentThreadId = CurrentThreadId;
- if (TryAcquireOneShot(currentThreadId))
- return true;
-
- //
- // Fall back to the slow path for contention
- //
- return TryAcquireSlow(currentThreadId, millisecondsTimeout, trackContentions: false);
- }
-
- internal bool TryAcquireNoSpin()
- {
- //
- // Make one quick attempt to acquire an uncontended lock
- //
- int currentThreadId = CurrentThreadId;
- if (TryAcquireOneShot(currentThreadId))
- return true;
-
- //
- // If we already own the lock, just increment the recursion count.
- //
- if (_owningThreadId == currentThreadId)
- {
- checked { _recursionCount++; }
- return true;
- }
-
- return false;
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal bool TryAcquireOneShot(int currentThreadId)
- {
- int origState = _state;
- int expectedState = origState & ~(YieldToWaiters | Locked);
- int newState = origState | Locked;
- if (Interlocked.CompareExchange(ref _state, newState, expectedState) == expectedState)
- {
- Debug.Assert(_owningThreadId == 0);
- Debug.Assert(_recursionCount == 0);
- _owningThreadId = currentThreadId;
- return true;
- }
-
- return false;
- }
-
- private static unsafe void ExponentialBackoff(uint iteration)
- {
- if (iteration > 0)
- {
- // no need for much randomness here, we will just hash the stack address + iteration.
- uint rand = ((uint)&iteration + iteration) * 2654435769u;
- // set the highmost bit to ensure minimum number of spins is exponentialy increasing
- // that is in case some stack location results in a sequence of very low spin counts
- // it basically gurantees that we spin at least 1, 2, 4, 8, 16, times, and so on
- rand |= (1u << 31);
- uint spins = rand >> (byte)(32 - Math.Min(iteration, MaxExponentialBackoffBits));
- Thread.SpinWaitInternal((int)spins);
- }
- }
-
- internal bool TryAcquireSlow(int currentThreadId, int millisecondsTimeout, bool trackContentions = false)
- {
- //
- // If we already own the lock, just increment the recursion count.
- //
- if (_owningThreadId == currentThreadId)
- {
- checked { _recursionCount++; }
- return true;
- }
-
- //
- // We've already made one lock attempt at this point, so bail early if the timeout is zero.
- //
- if (millisecondsTimeout == 0)
- return false;
-
- // since we have just made an attempt to accuire and failed, do a small pause
- Thread.SpinWaitInternal(1);
-
- if (_spinLimit == SpinningNotInitialized)
- {
- // Use RhGetProcessCpuCount directly to avoid Environment.ProcessorCount->ClassConstructorRunner->Lock->Environment.ProcessorCount cycle
- if (s_processorCount == 0)
- s_processorCount = RuntimeImports.RhGetProcessCpuCount();
-
- _spinLimit = (s_processorCount > 1) ? MinSpinLimit : SpinningDisabled;
- }
-
- bool hasWaited = false;
- // we will retry after waking up
- while (true)
- {
- uint iteration = 0;
-
- // We will count when we failed to change the state of the lock and increase pauses
- // so that bursts of activity are better tolerated. This should not happen often.
- uint collisions = 0;
-
- // We will track the changes of ownership while we are trying to acquire the lock.
- int oldOwner = _owningThreadId;
- uint ownerChanged = 0;
-
- uint localSpinLimit = _spinLimit;
- // inner loop where we try acquiring the lock or registering as a waiter
- while (true)
- {
- //
- // Try to grab the lock. We may take the lock here even if there are existing waiters. This creates the possibility
- // of starvation of waiters, but it also prevents lock convoys and preempted waiters from destroying perf.
- // However, if we do not see _wakeWatchDog cleared for long enough, we go into YieldToWaiters mode to ensure some
- // waiter progress.
- //
- int oldState = _state;
- bool canAcquire = ((oldState & Locked) == 0) &&
- (hasWaited || ((oldState & YieldToWaiters) == 0));
-
- if (canAcquire)
- {
- int newState = oldState | Locked;
- if (hasWaited)
- newState = (newState - WaiterCountIncrement) & ~(WaiterWoken | YieldToWaiters);
-
- if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
- {
- // GOT THE LOCK!!
- if (hasWaited)
- _wakeWatchDog = 0;
-
- // now we can estimate how busy the lock is and adjust spinning accordingly
- ushort spinLimit = _spinLimit;
- if (ownerChanged != 0)
- {
- // The lock has changed ownership while we were trying to acquire it.
- // It is a signal that we might want to spin less next time.
- // Pursuing a lock that is being "stolen" by other threads is inefficient
- // due to cache misses and unnecessary sharing of state that keeps invalidating.
- if (spinLimit > MinSpinLimit)
- {
- _spinLimit = (ushort)(spinLimit - 1);
- }
- }
- else if (spinLimit < MaxSpinLimit && iteration > spinLimit / 2)
- {
- // we used more than 50% of allowed iterations, but the lock does not look very contested,
- // we can allow a bit more spinning.
- _spinLimit = (ushort)(spinLimit + 1);
- }
-
- Debug.Assert((_state | Locked) != 0);
- Debug.Assert(_owningThreadId == 0);
- Debug.Assert(_recursionCount == 0);
- _owningThreadId = currentThreadId;
- return true;
- }
- }
-
- if (iteration++ < localSpinLimit)
- {
- int newOwner = _owningThreadId;
- if (newOwner != 0 && newOwner != oldOwner)
- {
- ownerChanged++;
- oldOwner = newOwner;
- }
-
- if (canAcquire)
- {
- collisions++;
- }
-
- // We failed to acquire the lock and want to retry after a pause.
- // Ideally we will retry right when the lock becomes free, but we cannot know when that will happen.
- // We will use a pause that doubles up on every iteration. It will not be more than 2x worse
- // than the ideal guess, while minimizing the number of retries.
- // We will allow pauses up to 64~128 spinwaits, or more if there are collisions.
- ExponentialBackoff(Math.Min(iteration, 6) + collisions);
- continue;
- }
- else if (!canAcquire)
- {
- //
- // We reached our spin limit, and need to wait. Increment the waiter count.
- // Note that we do not do any overflow checking on this increment. In order to overflow,
- // we'd need to have about 1 billion waiting threads, which is inconceivable anytime in the
- // forseeable future.
- //
- int newState = oldState + WaiterCountIncrement;
- if (hasWaited)
- newState = (newState - WaiterCountIncrement) & ~WaiterWoken;
-
- if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
- break;
-
- collisions++;
- }
-
- ExponentialBackoff(collisions);
- }
-
- //
- // Now we wait.
- //
-
- if (trackContentions)
- {
- Monitor.IncrementLockContentionCount();
- }
-
- TimeoutTracker timeoutTracker = TimeoutTracker.Start(millisecondsTimeout);
- Debug.Assert(_state >= WaiterCountIncrement);
- bool waitSucceeded = Event.WaitOne(millisecondsTimeout);
- Debug.Assert(_state >= WaiterCountIncrement);
-
- if (!waitSucceeded)
- break;
-
- // we did not time out and will try acquiring the lock
- hasWaited = true;
- millisecondsTimeout = timeoutTracker.Remaining;
- }
-
- // We timed out. We're not going to wait again.
- {
- uint iteration = 0;
- while (true)
- {
- int oldState = _state;
- Debug.Assert(oldState >= WaiterCountIncrement);
-
- int newState = oldState - WaiterCountIncrement;
-
- // We could not have consumed a wake, or the wait would've succeeded.
- // If we are the last waiter though, we will clear WaiterWoken and YieldToWaiters
- // just so that lock would not look like contended.
- if (newState < WaiterCountIncrement)
- newState = newState & ~WaiterWoken & ~YieldToWaiters;
-
- if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
- return false;
-
- ExponentialBackoff(iteration++);
- }
- }
- }
-
- public bool IsAcquired
- {
- get
- {
- //
- // Compare the current owning thread ID with the current thread ID. We need
- // to read the current thread's ID before we read m_owningThreadId. Otherwise,
- // the following might happen:
- //
- // 1) We read m_owningThreadId, and get, say 42, which belongs to another thread.
- // 2) Thread 42 releases the lock, and exits.
- // 3) We call ManagedThreadId.Current. If this is the first time it's been called
- // on this thread, we'll go get a new ID. We may reuse thread 42's ID, since
- // that thread is dead.
- // 4) Now we're thread 42, and it looks like we own the lock, even though we don't.
- //
- // However, as long as we get this thread's ID first, we know it won't be reused,
- // because while we're doing this check the current thread is definitely still
- // alive.
- //
- int currentThreadId = CurrentThreadId;
- return IsAcquiredByThread(currentThreadId);
- }
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal bool IsAcquiredByThread(int currentThreadId)
- {
- bool acquired = (currentThreadId == _owningThreadId);
- Debug.Assert(!acquired || (_state & Locked) != 0);
- return acquired;
- }
-
- [MethodImpl(MethodImplOptions.NoInlining)]
- public void Release()
- {
- ReleaseByThread(CurrentThreadId);
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal void ReleaseByThread(int threadId)
- {
- if (threadId != _owningThreadId)
- throw new SynchronizationLockException();
-
- if (_recursionCount == 0)
- {
- ReleaseCore();
- return;
- }
-
- _recursionCount--;
- }
-
- internal uint ReleaseAll()
- {
- Debug.Assert(IsAcquired);
-
- uint recursionCount = _recursionCount;
- _recursionCount = 0;
-
- ReleaseCore();
-
- return recursionCount;
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- private void ReleaseCore()
- {
- Debug.Assert(_recursionCount == 0);
- _owningThreadId = 0;
- int origState = Interlocked.Decrement(ref _state);
- if (origState < WaiterCountIncrement || (origState & WaiterWoken) != 0)
- {
- return;
- }
-
- //
- // We have waiters; take the slow path.
- //
- AwakeWaiterIfNeeded();
- }
-
- private void AwakeWaiterIfNeeded()
- {
- uint iteration = 0;
- while (true)
- {
- int oldState = _state;
- if (oldState >= WaiterCountIncrement && (oldState & WaiterWoken) == 0)
- {
- // there are waiters, and nobody has woken one.
- int newState = oldState | WaiterWoken;
-
- short lastWakeTicks = _wakeWatchDog;
- if (lastWakeTicks != 0 && (short)Environment.TickCount - lastWakeTicks > WaiterWatchdogTicks)
- {
- newState |= YieldToWaiters;
- }
-
- if (Interlocked.CompareExchange(ref _state, newState, oldState) == oldState)
- {
- if (lastWakeTicks == 0)
- {
- // nonzero timestamp of the last wake
- _wakeWatchDog = (short)(Environment.TickCount | 1);
- }
-
- Event.Set();
- return;
- }
- }
- else
- {
- // no need to wake a waiter.
- return;
- }
-
- ExponentialBackoff(iteration++);
- }
- }
-
- internal void Reacquire(uint previousRecursionCount)
- {
- Acquire();
- Debug.Assert(_recursionCount == 0);
- _recursionCount = previousRecursionCount;
- }
-
- internal struct TimeoutTracker
- {
- private int _start;
- private int _timeout;
-
- public static TimeoutTracker Start(int timeout)
- {
- TimeoutTracker tracker = new TimeoutTracker();
- tracker._timeout = timeout;
- if (timeout != Timeout.Infinite)
- tracker._start = Environment.TickCount;
- return tracker;
- }
-
- public int Remaining
- {
- get
- {
- if (_timeout == Timeout.Infinite)
- return Timeout.Infinite;
- int elapsed = Environment.TickCount - _start;
- if (elapsed > _timeout)
- return 0;
- return _timeout - elapsed;
- }
- }
- }
- }
-}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/LockHolder.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/LockHolder.cs
deleted file mode 100644
index 784a5d0fe3555e..00000000000000
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/LockHolder.cs
+++ /dev/null
@@ -1,27 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Runtime.CompilerServices;
-
-namespace System.Threading
-{
- public struct LockHolder : IDisposable
- {
- private Lock _lock;
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static LockHolder Hold(Lock l)
- {
- LockHolder h;
- l.Acquire();
- h._lock = l;
- return h;
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public void Dispose()
- {
- _lock.Release();
- }
- }
-}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Monitor.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Monitor.NativeAot.cs
index ce2e58a975abf0..89a8505663d307 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Monitor.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Monitor.NativeAot.cs
@@ -41,7 +41,8 @@ private static Condition GetCondition(object obj)
[MethodImpl(MethodImplOptions.NoInlining)]
public static void Enter(object obj)
{
- int resultOrIndex = ObjectHeader.Acquire(obj);
+ int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
+ int resultOrIndex = ObjectHeader.Acquire(obj, currentThreadID);
if (resultOrIndex < 0)
return;
@@ -49,7 +50,7 @@ public static void Enter(object obj)
ObjectHeader.GetLockObject(obj) :
SyncTable.GetLockObject(resultOrIndex);
- TryAcquireSlow(lck, obj, Timeout.Infinite);
+ lck.TryEnterSlow(Timeout.Infinite, currentThreadID, obj);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -66,7 +67,8 @@ public static void Enter(object obj, ref bool lockTaken)
[MethodImpl(MethodImplOptions.NoInlining)]
public static bool TryEnter(object obj)
{
- int resultOrIndex = ObjectHeader.TryAcquire(obj);
+ int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
+ int resultOrIndex = ObjectHeader.TryAcquire(obj, currentThreadID);
if (resultOrIndex < 0)
return true;
@@ -74,7 +76,13 @@ public static bool TryEnter(object obj)
return false;
Lock lck = SyncTable.GetLockObject(resultOrIndex);
- return lck.TryAcquire(0);
+
+ // The one-shot fast path is not covered by the slow path below for a zero timeout when the thread ID is
+ // initialized, so cover it here in case it wasn't already done
+ if (currentThreadID != 0 && lck.TryEnterOneShot(currentThreadID))
+ return true;
+
+ return lck.TryEnterSlow(0, currentThreadID, obj);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -92,7 +100,8 @@ public static bool TryEnter(object obj, int millisecondsTimeout)
{
ArgumentOutOfRangeException.ThrowIfLessThan(millisecondsTimeout, -1);
- int resultOrIndex = ObjectHeader.TryAcquire(obj);
+ int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
+ int resultOrIndex = ObjectHeader.TryAcquire(obj, currentThreadID);
if (resultOrIndex < 0)
return true;
@@ -100,10 +109,12 @@ public static bool TryEnter(object obj, int millisecondsTimeout)
ObjectHeader.GetLockObject(obj) :
SyncTable.GetLockObject(resultOrIndex);
- if (millisecondsTimeout == 0)
- return lck.TryAcquireNoSpin();
+ // The one-shot fast path is not covered by the slow path below for a zero timeout when the thread ID is
+ // initialized, so cover it here in case it wasn't already done
+ if (millisecondsTimeout == 0 && currentThreadID != 0 && lck.TryEnterOneShot(currentThreadID))
+ return true;
- return TryAcquireSlow(lck, obj, millisecondsTimeout);
+ return lck.TryEnterSlow(millisecondsTimeout, currentThreadID, obj);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -159,18 +170,6 @@ public static void PulseAll(object obj)
#endregion
- #region Slow path for Entry/TryEnter methods.
-
- internal static bool TryAcquireSlow(Lock lck, object obj, int millisecondsTimeout)
- {
- using (new DebugBlockingScope(obj, DebugBlockingItemType.MonitorCriticalSection, millisecondsTimeout, out _))
- {
- return lck.TryAcquireSlow(Environment.CurrentManagedThreadId, millisecondsTimeout, trackContentions: true);
- }
- }
-
- #endregion
-
#region Debugger support
// The debugger binds to the fields below by name. Do not change any names or types without
@@ -185,14 +184,14 @@ internal static bool TryAcquireSlow(Lock lck, object obj, int millisecondsTimeou
// Different ways a thread can be blocked that the debugger will expose.
// Do not change or add members without updating the debugger code.
- private enum DebugBlockingItemType
+ internal enum DebugBlockingItemType
{
MonitorCriticalSection = 0,
MonitorEvent = 1
}
// Represents an item a thread is blocked on. This structure is allocated on the stack and accessed by the debugger.
- private struct DebugBlockingItem
+ internal struct DebugBlockingItem
{
// The object the thread is waiting on
public object _object;
@@ -207,7 +206,7 @@ private struct DebugBlockingItem
public IntPtr _next;
}
- private unsafe struct DebugBlockingScope : IDisposable
+ internal unsafe struct DebugBlockingScope : IDisposable
{
public DebugBlockingScope(object obj, DebugBlockingItemType blockingType, int timeout, out DebugBlockingItem blockingItem)
{
@@ -229,28 +228,10 @@ public void Dispose()
#region Metrics
- private static readonly ThreadInt64PersistentCounter s_lockContentionCounter = new ThreadInt64PersistentCounter();
-
- [ThreadStatic]
- private static object t_ContentionCountObject;
-
- [MethodImpl(MethodImplOptions.NoInlining)]
- private static object CreateThreadLocalContentionCountObject()
- {
- Debug.Assert(t_ContentionCountObject == null);
-
- object threadLocalContentionCountObject = s_lockContentionCounter.CreateThreadLocalCountObject();
- t_ContentionCountObject = threadLocalContentionCountObject;
- return threadLocalContentionCountObject;
- }
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal static void IncrementLockContentionCount() => ThreadInt64PersistentCounter.Increment(t_ContentionCountObject ?? CreateThreadLocalContentionCountObject());
-
///
/// Gets the number of times there was contention upon trying to take a 's lock so far.
///
- public static long LockContentionCount => s_lockContentionCounter.Count;
+ public static long LockContentionCount => Lock.ContentionCount;
#endregion
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
index 327586771fe074..d411f997ee11a4 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/ObjectHeader.cs
@@ -217,7 +217,7 @@ public static unsafe void SetSyncEntryIndex(int* pHeader, int syncIndex)
{
// Holding this lock implies there is at most one thread setting the sync entry index at
// any given time. We also require that the sync entry index has not been already set.
- Debug.Assert(SyncTable.s_lock.IsAcquired);
+ Debug.Assert(SyncTable.s_lock.IsHeldByCurrentThread);
int oldBits, newBits;
do
@@ -239,7 +239,7 @@ public static unsafe void SetSyncEntryIndex(int* pHeader, int syncIndex)
SyncTable.MoveThinLockToNewEntry(
syncIndex,
oldBits & SBLK_MASK_LOCK_THREADID,
- (oldBits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT);
+ (uint)((oldBits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT));
}
// Store the sync entry index
@@ -284,24 +284,22 @@ public static unsafe void SetSyncEntryIndex(int* pHeader, int syncIndex)
// 0 - failed
// syncIndex - retry with the Lock
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static unsafe int Acquire(object obj)
+ public static unsafe int Acquire(object obj, int currentThreadID)
{
- return TryAcquire(obj, oneShot: false);
+ return TryAcquire(obj, currentThreadID, oneShot: false);
}
// -1 - success
// 0 - failed
// syncIndex - retry with the Lock
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static unsafe int TryAcquire(object obj, bool oneShot = true)
+ public static unsafe int TryAcquire(object obj, int currentThreadID, bool oneShot = true)
{
ArgumentNullException.ThrowIfNull(obj);
Debug.Assert(!(obj is Lock),
"Do not use Monitor.Enter or TryEnter on a Lock instance; use Lock methods directly instead.");
- int currentThreadID = ManagedThreadId.CurrentManagedThreadIdUnchecked;
-
// if thread ID is uninitialized or too big, we do "uncommon" part.
if ((uint)(currentThreadID - 1) <= (uint)SBLK_MASK_LOCK_THREADID)
{
@@ -323,7 +321,7 @@ public static unsafe int TryAcquire(object obj, bool oneShot = true)
}
else if (GetSyncEntryIndex(oldBits, out int syncIndex))
{
- if (SyncTable.GetLockObject(syncIndex).TryAcquireOneShot(currentThreadID))
+ if (SyncTable.GetLockObject(syncIndex).TryEnterOneShot(currentThreadID))
{
return -1;
}
@@ -334,23 +332,25 @@ public static unsafe int TryAcquire(object obj, bool oneShot = true)
}
}
- return TryAcquireUncommon(obj, oneShot);
+ return TryAcquireUncommon(obj, currentThreadID, oneShot);
}
// handling uncommon cases here - recursive lock, contention, retries
// -1 - success
// 0 - failed
// syncIndex - retry with the Lock
- private static unsafe int TryAcquireUncommon(object obj, bool oneShot)
+ private static unsafe int TryAcquireUncommon(object obj, int currentThreadID, bool oneShot)
{
+ if (currentThreadID == 0)
+ currentThreadID = Environment.CurrentManagedThreadId;
+
// does thread ID fit?
- int currentThreadID = Environment.CurrentManagedThreadId;
if (currentThreadID > SBLK_MASK_LOCK_THREADID)
return GetSyncIndex(obj);
- // Lock.s_processorCount is lazy-initialized at fist contended acquire
- // untill then it is 0 and we assume we have multicore machine
- int retries = oneShot || Lock.s_processorCount == 1 ? 0 : 16;
+ // Lock.IsSingleProcessor gets a value that is lazy-initialized at the first contended acquire.
+ // Until then it is false and we assume we have multicore machine.
+ int retries = oneShot || Lock.IsSingleProcessor ? 0 : 16;
// retry when the lock is owned by somebody else.
// this loop will spinwait between iterations.
@@ -422,9 +422,12 @@ private static unsafe int TryAcquireUncommon(object obj, bool oneShot)
}
}
- // spin a bit before retrying (1 spinwait is roughly 35 nsec)
- // the object is not pinned here
- Thread.SpinWaitInternal(i);
+ if (retries != 0)
+ {
+ // spin a bit before retrying (1 spinwait is roughly 35 nsec)
+ // the object is not pinned here
+ Thread.SpinWaitInternal(i);
+ }
}
// owned by somebody else
@@ -481,7 +484,7 @@ public static unsafe void Release(object obj)
}
}
- fatLock.ReleaseByThread(currentThreadID);
+ fatLock.Exit(currentThreadID);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -510,7 +513,7 @@ public static unsafe bool IsAcquired(object obj)
if (GetSyncEntryIndex(oldBits, out int syncIndex))
{
- return SyncTable.GetLockObject(syncIndex).IsAcquiredByThread(currentThreadID);
+ return SyncTable.GetLockObject(syncIndex).GetIsHeldByCurrentThread(currentThreadID);
}
// someone else owns or noone.
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
index de07c986834ec1..02d7b4167ca6b2 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
@@ -106,7 +106,7 @@ public static unsafe int AssignEntry(object obj, int* pHeader)
try
{
- using (LockHolder.Hold(s_lock))
+ using (s_lock.EnterScope())
{
// After acquiring the lock check whether another thread already assigned the sync entry
if (ObjectHeader.GetSyncEntryIndex(*pHeader, out int syncIndex))
@@ -172,7 +172,7 @@ public static unsafe int AssignEntry(object obj, int* pHeader)
///
private static void Grow()
{
- Debug.Assert(s_lock.IsAcquired);
+ Debug.Assert(s_lock.IsHeldByCurrentThread);
int oldSize = s_entries.Length;
int newSize = CalculateNewSize(oldSize);
@@ -242,7 +242,7 @@ public static int SetHashCode(int syncIndex, int hashCode)
// Acquire the lock to ensure we are updating the latest version of s_entries. This
// lock may be avoided if we store the hash code and Monitor synchronization data in
// the same object accessed by a reference.
- using (LockHolder.Hold(s_lock))
+ using (s_lock.EnterScope())
{
int currentHash = s_entries[syncIndex].HashCode;
if (currentHash != 0)
@@ -260,7 +260,7 @@ public static int SetHashCode(int syncIndex, int hashCode)
///
public static void MoveHashCodeToNewEntry(int syncIndex, int hashCode)
{
- Debug.Assert(s_lock.IsAcquired);
+ Debug.Assert(s_lock.IsHeldByCurrentThread);
Debug.Assert((0 < syncIndex) && (syncIndex < s_unusedEntryIndex));
s_entries[syncIndex].HashCode = hashCode;
}
@@ -269,9 +269,9 @@ public static void MoveHashCodeToNewEntry(int syncIndex, int hashCode)
/// Initializes the Lock assuming the caller holds s_lock. Use for not yet
/// published entries only.
///
- public static void MoveThinLockToNewEntry(int syncIndex, int threadId, int recursionLevel)
+ public static void MoveThinLockToNewEntry(int syncIndex, int threadId, uint recursionLevel)
{
- Debug.Assert(s_lock.IsAcquired);
+ Debug.Assert(s_lock.IsHeldByCurrentThread);
Debug.Assert((0 < syncIndex) && (syncIndex < s_unusedEntryIndex));
s_entries[syncIndex].Lock.InitializeLocked(threadId, recursionLevel);
@@ -305,7 +305,7 @@ public DeadEntryCollector()
Lock? lockToDispose = default;
DependentHandle dependentHandleToDispose = default;
- using (LockHolder.Hold(s_lock))
+ using (s_lock.EnterScope())
{
ref Entry entry = ref s_entries[_index];
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
index 467e13cfd60338..7d8c2f2a2f290f 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
@@ -251,7 +251,7 @@ private bool SetApartmentStateUnchecked(ApartmentState state, bool throwOnError)
if (this != CurrentThread)
{
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
if (HasStarted())
throw new ThreadStateException();
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
index cda114276746c8..ef35ed5358fafc 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
@@ -230,7 +230,7 @@ public ThreadPriority Priority
}
// Prevent race condition with starting this thread
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
if (HasStarted() && !SetPriorityLive(value))
{
@@ -358,7 +358,7 @@ public static void SpinWait(int iterations)
private void StartCore()
{
- using (LockHolder.Hold(_lock))
+ using (_lock.EnterScope())
{
if (!GetThreadStateBit(ThreadState.Unstarted))
{
diff --git a/src/coreclr/nativeaot/System.Private.DisabledReflection/src/System.Private.DisabledReflection.csproj b/src/coreclr/nativeaot/System.Private.DisabledReflection/src/System.Private.DisabledReflection.csproj
index 163f809979cc42..25af7c63691ee8 100644
--- a/src/coreclr/nativeaot/System.Private.DisabledReflection/src/System.Private.DisabledReflection.csproj
+++ b/src/coreclr/nativeaot/System.Private.DisabledReflection/src/System.Private.DisabledReflection.csproj
@@ -2,6 +2,8 @@
false
+
+ true
diff --git a/src/coreclr/nativeaot/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj b/src/coreclr/nativeaot/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj
index 5527d5920f6018..2610014e0d3c14 100644
--- a/src/coreclr/nativeaot/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj
+++ b/src/coreclr/nativeaot/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj
@@ -8,6 +8,8 @@
$(CompilerCommonPath)\Internal\NativeFormatfalse
+
+ true
diff --git a/src/coreclr/nativeaot/System.Private.StackTraceMetadata/src/System.Private.StackTraceMetadata.csproj b/src/coreclr/nativeaot/System.Private.StackTraceMetadata/src/System.Private.StackTraceMetadata.csproj
index cc44031e9ac552..620a94d91c40ab 100644
--- a/src/coreclr/nativeaot/System.Private.StackTraceMetadata/src/System.Private.StackTraceMetadata.csproj
+++ b/src/coreclr/nativeaot/System.Private.StackTraceMetadata/src/System.Private.StackTraceMetadata.csproj
@@ -2,6 +2,8 @@
false
+
+ true
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericMethodsLookup.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericMethodsLookup.cs
index 9fe72daf5045db..228da851da728f 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericMethodsLookup.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericMethodsLookup.cs
@@ -274,7 +274,7 @@ public bool TryGetGenericVirtualMethodPointer(InstantiatedMethod method, out Int
if (!TryLookupGenericMethodDictionary(new MethodDescBasedGenericMethodLookup(method), out dictionaryPointer))
{
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
// Now that we hold the lock, we may find that existing types can now find
// their associated RuntimeTypeHandle. Flush the type builder states as a way
@@ -297,7 +297,7 @@ private bool TryGetDynamicGenericMethodDictionary(GenericMethodLookupData lookup
{
result = IntPtr.Zero;
- using (LockHolder.Hold(_dynamicGenericsLock))
+ using (_dynamicGenericsLock.EnterScope())
{
GenericMethodEntry entry;
if (!_dynamicGenericMethods.TryGetValue(lookupData, out entry))
@@ -349,7 +349,7 @@ private bool TryGetDynamicGenericMethodComponents(IntPtr methodDictionary, out R
methodNameAndSignature = null;
genericMethodArgumentHandles = null;
- using (LockHolder.Hold(_dynamicGenericsLock))
+ using (_dynamicGenericsLock.EnterScope())
{
GenericMethodEntry entry;
if (!_dynamicGenericMethodComponents.TryGetValue(methodDictionary, out entry))
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericTypesLookup.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericTypesLookup.cs
index 2db0a24b5586d9..257803bd9026ef 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericTypesLookup.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericTypesLookup.cs
@@ -217,7 +217,7 @@ public bool TryLookupConstructedGenericTypeForComponents(RuntimeTypeHandle gener
public bool TryLookupConstructedLazyDictionaryForContext(IntPtr context, IntPtr signature, out IntPtr dictionary)
{
- Debug.Assert(_typeLoaderLock.IsAcquired);
+ Debug.Assert(_typeLoaderLock.IsHeldByCurrentThread);
return _lazyGenericDictionaries.TryGetValue(new LazyDictionaryContext { _context = context, _signature = signature }, out dictionary);
}
@@ -226,7 +226,7 @@ private unsafe bool TryGetDynamicGenericTypeForComponents(GenericTypeLookupData
{
runtimeTypeHandle = default(RuntimeTypeHandle);
- using (LockHolder.Hold(_dynamicGenericsLock))
+ using (_dynamicGenericsLock.EnterScope())
{
GenericTypeEntry entry;
if (!_dynamicGenericTypes.TryGetValue(lookupData, out entry))
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
index 8c9922d1a29c73..cebd5d917896fe 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
@@ -28,7 +28,7 @@ internal struct DynamicGenericsRegistrationData
internal void RegisterDynamicGenericTypesAndMethods(DynamicGenericsRegistrationData registrationData)
{
- using (LockHolder.Hold(_dynamicGenericsLock))
+ using (_dynamicGenericsLock.EnterScope())
{
int registeredTypesCount = 0;
int registeredMethodsCount = 0;
@@ -130,7 +130,7 @@ internal void RegisterDynamicGenericTypesAndMethods(DynamicGenericsRegistrationD
public void RegisterConstructedLazyDictionaryForContext(IntPtr context, IntPtr signature, IntPtr dictionary)
{
- Debug.Assert(_typeLoaderLock.IsAcquired);
+ Debug.Assert(_typeLoaderLock.IsHeldByCurrentThread);
_lazyGenericDictionaries.Add(new LazyDictionaryContext { _context = context, _signature = signature }, dictionary);
}
}
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs
index 8cc5e5844218fa..ad8c68b171e5c0 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs
@@ -57,7 +57,7 @@ private static unsafe string GetStringFromMemoryInNativeFormat(IntPtr pointerToD
///
public IntPtr GetNativeFormatStringForString(string str)
{
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
IntPtr result;
if (_nativeFormatStrings.TryGetValue(str, out result))
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
index 4a28836cb33ce9..28cd9bf3276380 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
@@ -145,7 +145,7 @@ public IntPtr TryGetThreadStaticFieldData(RuntimeTypeHandle runtimeTypeHandle)
public IntPtr GetThreadStaticGCDescForDynamicType(TypeManagerHandle typeManagerHandle, uint index)
{
- using (LockHolder.Hold(_threadStaticsLock))
+ using (_threadStaticsLock.EnterScope())
{
return _dynamicGenericsThreadStaticDescs[typeManagerHandle.GetIntPtrUNSAFE()][index];
}
@@ -168,7 +168,7 @@ public void RegisterDynamicThreadStaticsInfo(RuntimeTypeHandle runtimeTypeHandle
IntPtr typeManager = runtimeTypeHandle.GetTypeManager().GetIntPtrUNSAFE();
- _threadStaticsLock.Acquire();
+ _threadStaticsLock.Enter();
try
{
if (!_dynamicGenericsThreadStaticDescs.TryGetValue(typeManager, out LowLevelDictionary gcDescs))
@@ -188,7 +188,7 @@ public void RegisterDynamicThreadStaticsInfo(RuntimeTypeHandle runtimeTypeHandle
}
}
- _threadStaticsLock.Release();
+ _threadStaticsLock.Exit();
}
}
#endregion
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
index ab037c2b41e39c..1bd3cbd3672fac 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
@@ -144,13 +144,13 @@ internal static void Initialize()
public void VerifyTypeLoaderLockHeld()
{
- if (!_typeLoaderLock.IsAcquired)
+ if (!_typeLoaderLock.IsHeldByCurrentThread)
Environment.FailFast("TypeLoaderLock not held");
}
public void RunUnderTypeLoaderLock(Action action)
{
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
action();
}
@@ -160,7 +160,7 @@ public IntPtr GenericLookupFromContextAndSignature(IntPtr context, IntPtr signat
{
IntPtr result;
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
try
{
@@ -191,7 +191,7 @@ private bool EnsureTypeHandleForType(TypeDesc type)
{
if (type.RuntimeTypeHandle.IsNull())
{
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
// Now that we hold the lock, we may find that existing types can now find
// their associated RuntimeTypeHandle. Flush the type builder states as a way
@@ -340,7 +340,7 @@ public bool TryGetConstructedGenericTypeForComponents(RuntimeTypeHandle genericT
if (TryLookupConstructedGenericTypeForComponents(genericTypeDefinitionHandle, genericTypeArgumentHandles, out runtimeTypeHandle))
return true;
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
return TypeBuilder.TryBuildGenericType(genericTypeDefinitionHandle, genericTypeArgumentHandles, out runtimeTypeHandle);
}
@@ -351,7 +351,7 @@ public bool TryGetFunctionPointerTypeForComponents(RuntimeTypeHandle returnTypeH
if (TryLookupFunctionPointerTypeForComponents(returnTypeHandle, parameterHandles, isUnmanaged, out runtimeTypeHandle))
return true;
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
return TypeBuilder.TryBuildFunctionPointerType(returnTypeHandle, parameterHandles, isUnmanaged, out runtimeTypeHandle);
}
@@ -390,7 +390,7 @@ public bool TryGetArrayTypeForElementType(RuntimeTypeHandle elementTypeHandle, b
return true;
}
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
if (isMdArray && (rank < MDArray.MinRank) && (rank > MDArray.MaxRank))
{
@@ -432,7 +432,7 @@ public bool TryGetPointerTypeForTargetType(RuntimeTypeHandle pointeeTypeHandle,
if (TryGetPointerTypeForTargetType_LookupOnly(pointeeTypeHandle, out pointerTypeHandle))
return true;
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
if (TypeSystemContext.PointerTypesCache.TryGetValue(pointeeTypeHandle, out pointerTypeHandle))
return true;
@@ -461,7 +461,7 @@ public bool TryGetByRefTypeForTargetType(RuntimeTypeHandle pointeeTypeHandle, ou
if (TryGetByRefTypeForTargetType_LookupOnly(pointeeTypeHandle, out byRefTypeHandle))
return true;
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
if (TypeSystemContext.ByRefTypesCache.TryGetValue(pointeeTypeHandle, out byRefTypeHandle))
return true;
@@ -525,7 +525,7 @@ public bool TryGetGenericMethodDictionaryForComponents(RuntimeTypeHandle declari
return true;
}
- using (LockHolder.Hold(_typeLoaderLock))
+ using (_typeLoaderLock.EnterScope())
{
bool success = TypeBuilder.TryBuildGenericMethod(methodBeingLoaded, out methodDictionary);
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
index 4d3fa768fdd2d6..b0175eaaa48034 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
@@ -22,7 +22,7 @@ public static class TypeSystemContextFactory
public static TypeSystemContext Create()
{
- using (LockHolder.Hold(s_lock))
+ using (s_lock.EnterScope())
{
TypeSystemContext context = (TypeSystemContext)s_cachedContext.Target;
if (context != null)
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj b/src/coreclr/nativeaot/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj
index 61799cfc159b1f..b25a5cfcf7d364 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj
@@ -9,6 +9,8 @@
GENERICS_FORCE_USG;$(DefineConstants)false
+
+ true
diff --git a/src/libraries/Common/src/Interop/Unix/System.Native/Interop.Threading.cs b/src/libraries/Common/src/Interop/Unix/System.Native/Interop.Threading.cs
index 9fe84d213e290c..edcdaf3ee9f852 100644
--- a/src/libraries/Common/src/Interop/Unix/System.Native/Interop.Threading.cs
+++ b/src/libraries/Common/src/Interop/Unix/System.Native/Interop.Threading.cs
@@ -11,5 +11,13 @@ internal unsafe partial class Sys
[LibraryImport(Libraries.SystemNative, EntryPoint = "SystemNative_CreateThread")]
[return: MarshalAs(UnmanagedType.Bool)]
internal static unsafe partial bool CreateThread(IntPtr stackSize, delegate* unmanaged startAddress, IntPtr parameter);
+
+#if TARGET_OSX
+ [LibraryImport(Libraries.SystemNative, EntryPoint = "SystemNative_GetUInt64OSThreadId")]
+ internal static unsafe partial ulong GetUInt64OSThreadId();
+#else
+ [LibraryImport(Libraries.SystemNative, EntryPoint = "SystemNative_TryGetUInt32OSThreadId")]
+ internal static unsafe partial uint TryGetUInt32OSThreadId();
+#endif
}
}
diff --git a/src/libraries/System.ComponentModel.Composition.Registration/src/System.ComponentModel.Composition.Registration.csproj b/src/libraries/System.ComponentModel.Composition.Registration/src/System.ComponentModel.Composition.Registration.csproj
index b8ae87f03d00b9..66378b9fac149a 100644
--- a/src/libraries/System.ComponentModel.Composition.Registration/src/System.ComponentModel.Composition.Registration.csproj
+++ b/src/libraries/System.ComponentModel.Composition.Registration/src/System.ComponentModel.Composition.Registration.csproj
@@ -27,8 +27,8 @@ System.ComponentModel.Composition.Registration.ExportBuilder
-
+
diff --git a/src/libraries/System.ComponentModel.Composition.Registration/src/System/ComponentModel/Composition/Registration/RegistrationBuilder.cs b/src/libraries/System.ComponentModel.Composition.Registration/src/System/ComponentModel/Composition/Registration/RegistrationBuilder.cs
index c684ababe674eb..5397b13063aa4a 100644
--- a/src/libraries/System.ComponentModel.Composition.Registration/src/System/ComponentModel/Composition/Registration/RegistrationBuilder.cs
+++ b/src/libraries/System.ComponentModel.Composition.Registration/src/System/ComponentModel/Composition/Registration/RegistrationBuilder.cs
@@ -20,7 +20,7 @@ internal sealed class InnerRC : ReflectionContext
private static readonly ReflectionContext s_inner = new InnerRC();
private static readonly List