1414
1515#include " finalizerthread.h"
1616#include " dbginterface.h"
17+ #include < minipal/time.h>
1718
1819#define HIJACK_NONINTERRUPTIBLE_THREADS
1920
@@ -2188,6 +2189,8 @@ void Thread::RareDisablePreemptiveGC()
21882189#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX)
21892190 ResetThreadState (Thread::TS_GCSuspendRedirected);
21902191#endif
2192+ // make sure this is cleared - in case a signal is lost or somehow we did not act on it
2193+ m_hasPendingActivation = false ;
21912194
21922195 DWORD status = GCHeapUtilities::GetGCHeap ()->WaitUntilGCComplete ();
21932196 if (status != S_OK)
@@ -3207,44 +3210,6 @@ COR_PRF_SUSPEND_REASON GCSuspendReasonToProfSuspendReason(ThreadSuspend::SUSPEND
32073210}
32083211#endif // PROFILING_SUPPORTED
32093212
3210- static int64_t QueryPerformanceCounter ()
3211- {
3212- LARGE_INTEGER ts;
3213- QueryPerformanceCounter (&ts);
3214- return ts.QuadPart ;
3215- }
3216-
3217- static int64_t QueryPerformanceFrequency ()
3218- {
3219- LARGE_INTEGER ts;
3220- QueryPerformanceFrequency (&ts);
3221- return ts.QuadPart ;
3222- }
3223-
3224- // exponential spinwait with an approximate time limit for waiting in microsecond range.
3225- // when iteration == -1, only usecLimit is used
3226- void SpinWait (int iteration, int usecLimit)
3227- {
3228- int64_t startTicks = QueryPerformanceCounter ();
3229- int64_t ticksPerSecond = QueryPerformanceFrequency ();
3230- int64_t endTicks = startTicks + (usecLimit * ticksPerSecond) / 1000000 ;
3231-
3232- int l = iteration >= 0 ? min (iteration, 30 ): 30 ;
3233- for (int i = 0 ; i < l; i++)
3234- {
3235- for (int j = 0 ; j < (1 << i); j++)
3236- {
3237- System_YieldProcessor ();
3238- }
3239-
3240- int64_t currentTicks = QueryPerformanceCounter ();
3241- if (currentTicks > endTicks)
3242- {
3243- break ;
3244- }
3245- }
3246- }
3247-
32483213// ************************************************************************************
32493214//
32503215// SuspendRuntime is responsible for ensuring that all managed threads reach a
@@ -3335,16 +3300,14 @@ void ThreadSuspend::SuspendAllThreads()
33353300 // See VSW 475315 and 488918 for details.
33363301 ::FlushProcessWriteBuffers ();
33373302
3338- int retries = 0 ;
3339- int prevRemaining = 0 ;
3340- int remaining = 0 ;
3341- bool observeOnly = false ;
3303+ int prevRemaining = INT32_MAX ;
3304+ bool observeOnly = true ;
3305+ uint32_t rehijackDelay = 8 ;
3306+ uint32_t usecsSinceYield = 0 ;
33423307
33433308 while (true )
33443309 {
3345- prevRemaining = remaining;
3346- remaining = 0 ;
3347-
3310+ int remaining = 0 ;
33483311 Thread* pTargetThread = NULL ;
33493312 while ((pTargetThread = ThreadStore::GetThreadList (pTargetThread)) != NULL )
33503313 {
@@ -3361,30 +3324,42 @@ void ThreadSuspend::SuspendAllThreads()
33613324 }
33623325 }
33633326
3364- if (! remaining)
3327+ if (remaining == 0 )
33653328 break ;
33663329
33673330 // if we see progress or have just done a hijacking pass
33683331 // do not hijack in the next iteration
33693332 if (remaining < prevRemaining || !observeOnly)
33703333 {
33713334 // 5 usec delay, then check for more progress
3372- SpinWait (- 1 , 5 );
3335+ minipal_microdelay ( 5 , &usecsSinceYield );
33733336 observeOnly = true ;
33743337 }
33753338 else
33763339 {
3377- SpinWait (retries++, 100 );
3340+ minipal_microdelay (rehijackDelay, &usecsSinceYield );
33783341 observeOnly = false ;
33793342
3380- // make sure our spining is not starving other threads, but not too often,
3381- // this can cause a 1-15 msec delay, depending on OS, and that is a lot while
3382- // very rarely needed, since threads are supposed to be releasing their CPUs
3383- if ((retries & 127 ) == 0 )
3343+ // double up rehijack delay in case we are rehjacking too often
3344+ // up to 100 usec, as that should be enough to make progress.
3345+ if (rehijackDelay < 100 )
33843346 {
3385- SwitchToThread () ;
3347+ rehijackDelay *= 2 ;
33863348 }
33873349 }
3350+
3351+ prevRemaining = remaining;
3352+
3353+ // If we see 1 msec of uninterrupted wait, it is a concern.
3354+ // Since we are stopping threads, there should be free cores to run on. Perhaps
3355+ // some thread that we need to stop needs to run on the same core as ours.
3356+ // Let's yield the timeslice to make sure such threads can run.
3357+ // We will not do this often though, since this can introduce arbitrary delays.
3358+ if (usecsSinceYield > 1000 )
3359+ {
3360+ SwitchToThread ();
3361+ usecsSinceYield = 0 ;
3362+ }
33883363 }
33893364
33903365#if defined(TARGET_ARM) || defined(TARGET_ARM64)
@@ -5937,7 +5912,13 @@ bool Thread::InjectActivation(ActivationReason reason)
59375912 if (hThread != INVALID_HANDLE_VALUE)
59385913 {
59395914 m_hasPendingActivation = true ;
5940- return ::PAL_InjectActivation (hThread);
5915+ BOOL success = ::PAL_InjectActivation (hThread);
5916+ if (!success)
5917+ {
5918+ m_hasPendingActivation = false ;
5919+ }
5920+
5921+ return success;
59415922 }
59425923 }
59435924
0 commit comments