@@ -402,9 +402,69 @@ void TTableLocks::RemoveWriteLock(TLockInfo* lock) {
402402
403403// TLockLocker
404404
405+ namespace {
406+
407+ static constexpr ui64 DefaultLockLimit () {
408+ // Valgrind and sanitizers are too slow
409+ // Some tests cannot exhaust default limit in under 5 minutes
410+ return NValgrind::PlainOrUnderValgrind (
411+ NSan::PlainOrUnderSanitizer (
412+ 20000 ,
413+ 1000 ),
414+ 1000);
415+ }
416+
417+ static constexpr ui64 DefaultLockRangesLimit () {
418+ return 50000 ;
419+ }
420+
421+ static constexpr ui64 DefaultTotalRangesLimit () {
422+ return 1000000 ;
423+ }
424+
425+ static std::atomic<ui64> g_LockLimit{ DefaultLockLimit () };
426+ static std::atomic<ui64> g_LockRangesLimit{ DefaultLockRangesLimit () };
427+ static std::atomic<ui64> g_TotalRangesLimit{ DefaultTotalRangesLimit () };
428+
429+ } // namespace
430+
431+ ui64 TLockLocker::LockLimit () {
432+ return g_LockLimit.load (std::memory_order_relaxed);
433+ }
434+
435+ ui64 TLockLocker::LockRangesLimit () {
436+ return g_LockRangesLimit.load (std::memory_order_relaxed);
437+ }
438+
439+ ui64 TLockLocker::TotalRangesLimit () {
440+ return g_TotalRangesLimit.load (std::memory_order_relaxed);
441+ }
442+
443+ std::shared_ptr<void > TLockLocker::OverrideLockLimit (ui64 newLimit) {
444+ ui64 oldLimit = g_LockLimit.exchange (newLimit, std::memory_order_relaxed);
445+ return std::shared_ptr<void >(nullptr , [oldLimit](void *) {
446+ g_LockLimit.store (oldLimit, std::memory_order_relaxed);
447+ });
448+ }
449+
450+ std::shared_ptr<void > TLockLocker::OverrideLockRangesLimit (ui64 newLimit) {
451+ ui64 oldLimit = g_LockRangesLimit.exchange (newLimit, std::memory_order_relaxed);
452+ return std::shared_ptr<void >(nullptr , [oldLimit](void *) {
453+ g_LockRangesLimit.store (oldLimit, std::memory_order_relaxed);
454+ });
455+ }
456+
457+ std::shared_ptr<void > TLockLocker::OverrideTotalRangesLimit (ui64 newLimit) {
458+ ui64 oldLimit = g_TotalRangesLimit.exchange (newLimit, std::memory_order_relaxed);
459+ return std::shared_ptr<void >(nullptr , [oldLimit](void *) {
460+ g_TotalRangesLimit.store (oldLimit, std::memory_order_relaxed);
461+ });
462+ }
463+
405464void TLockLocker::AddPointLock (const TLockInfo::TPtr& lock, const TPointKey& key) {
406465 if (lock->AddPoint (key)) {
407466 key.Table ->AddPointLock (key, lock.Get ());
467+ LocksWithRanges.PushBack (lock.Get ());
408468 } else {
409469 key.Table ->AddShardLock (lock.Get ());
410470 }
@@ -413,21 +473,27 @@ void TLockLocker::AddPointLock(const TLockInfo::TPtr& lock, const TPointKey& key
413473void TLockLocker::AddRangeLock (const TLockInfo::TPtr& lock, const TRangeKey& key) {
414474 if (lock->AddRange (key)) {
415475 key.Table ->AddRangeLock (key, lock.Get ());
476+ LocksWithRanges.PushBack (lock.Get ());
416477 } else {
417478 key.Table ->AddShardLock (lock.Get ());
418479 }
419480}
420481
421- void TLockLocker::AddShardLock ( const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables ) {
482+ void TLockLocker::MakeShardLock ( TLockInfo* lock) {
422483 if (!lock->IsShardLock ()) {
423484 for (const TPathId& tableId : lock->GetReadTables ()) {
424- Tables.at (tableId)->RemoveRangeLock (lock. Get () );
485+ Tables.at (tableId)->RemoveRangeLock (lock);
425486 }
426487 lock->MakeShardLock ();
488+ LocksWithRanges.Remove (lock);
427489 for (const TPathId& tableId : lock->GetReadTables ()) {
428- Tables.at (tableId)->AddShardLock (lock. Get () );
490+ Tables.at (tableId)->AddShardLock (lock);
429491 }
430492 }
493+ }
494+
495+ void TLockLocker::AddShardLock (const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) {
496+ MakeShardLock (lock.Get ());
431497 for (auto & table : readTables) {
432498 const TPathId& tableId = table.GetTableId ();
433499 Y_ABORT_UNLESS (Tables.at (tableId).Get () == &table);
@@ -519,6 +585,9 @@ void TLockLocker::RemoveBrokenRanges() {
519585TLockInfo::TPtr TLockLocker::GetOrAddLock (ui64 lockId, ui32 lockNodeId) {
520586 auto it = Locks.find (lockId);
521587 if (it != Locks.end ()) {
588+ if (it->second ->IsInList <TLockInfoRangesListTag>()) {
589+ LocksWithRanges.PushBack (it->second .Get ());
590+ }
522591 if (it->second ->IsInList <TLockInfoExpireListTag>()) {
523592 ExpireQueue.PushBack (it->second .Get ());
524593 }
@@ -591,6 +660,7 @@ void TLockLocker::RemoveOneLock(ui64 lockTxId, ILocksDb* db) {
591660 for (const TPathId& tableId : txLock->GetWriteTables ()) {
592661 Tables.at (tableId)->RemoveWriteLock (txLock.Get ());
593662 }
663+ LocksWithRanges.Remove (txLock.Get ());
594664 txLock->CleanupConflicts ();
595665 Locks.erase (it);
596666
@@ -634,6 +704,7 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
634704 Y_ABORT_UNLESS (Tables.empty ());
635705 Locks.clear ();
636706 ShardLocks.clear ();
707+ LocksWithRanges.Clear ();
637708 ExpireQueue.Clear ();
638709 BrokenLocks.Clear ();
639710 BrokenPersistentLocks.Clear ();
@@ -643,21 +714,41 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
643714 PendingSubscribeLocks.clear ();
644715}
645716
646- bool TLockLocker::ForceShardLock (const TPathId& tableId) const {
647- auto it = Tables.find (tableId);
648- if (it != Tables.end ()) {
649- if (it->second ->RangeCount () > LockLimit ()) {
650- return true ;
651- }
717+ bool TLockLocker::ForceShardLock (
718+ const TLockInfo::TPtr& lock,
719+ const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables,
720+ ui64 newRanges)
721+ {
722+ if (lock->NumPoints () + lock->NumRanges () + newRanges > LockRangesLimit ()) {
723+ // Lock has too many ranges, will never fit in
724+ return true ;
652725 }
653- return false ;
654- }
655726
656- bool TLockLocker::ForceShardLock (const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) const {
657727 for (auto & table : readTables) {
658- if (table.RangeCount () > LockLimit ())
659- return true ;
728+ while (table.RangeCount () + newRanges > TotalRangesLimit ()) {
729+ if (LocksWithRanges.Empty ()) {
730+ // Too many new ranges (e.g. TotalRangesLimit < LockRangesLimit)
731+ return true ;
732+ }
733+
734+ // Try to reduce the number of ranges until new ranges fit in
735+ TLockInfo* next = LocksWithRanges.PopFront ();
736+ if (next == lock.Get ()) {
737+ bool wasLast = LocksWithRanges.Empty ();
738+ LocksWithRanges.PushBack (next);
739+ if (wasLast) {
740+ return true ;
741+ }
742+ // We want to handle the newest lock last
743+ continue ;
744+ }
745+
746+ // Reduce the number of ranges by making the oldest lock into a shard lock
747+ MakeShardLock (next);
748+ Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
749+ }
660750 }
751+
661752 return false ;
662753}
663754
@@ -771,8 +862,6 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
771862 return TVector<TLock>();
772863 }
773864
774- bool shardLock = Locker.ForceShardLock (Update->ReadTables );
775-
776865 TLockInfo::TPtr lock;
777866 ui64 counter = TLock::ErrorNotSet;
778867
@@ -791,6 +880,12 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
791880 } else if (lock->IsBroken ()) {
792881 counter = TLock::ErrorBroken;
793882 } else {
883+ bool shardLock = (
884+ lock->IsShardLock () ||
885+ Locker.ForceShardLock (
886+ lock,
887+ Update->ReadTables ,
888+ Update->PointLocks .size () + Update->RangeLocks .size ()));
794889 if (shardLock) {
795890 Locker.AddShardLock (lock, Update->ReadTables );
796891 Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
0 commit comments