@@ -402,9 +402,53 @@ void TTableLocks::RemoveWriteLock(TLockInfo* lock) {
402402
403403// TLockLocker
404404
405+ namespace {
406+
407+ static constexpr ui64 DefaultLockLimit () {
408+ // Valgrind and sanitizers are too slow
409+ // Some tests cannot exhaust default limit in under 5 minutes
410+ return NValgrind::PlainOrUnderValgrind (
411+ NSan::PlainOrUnderSanitizer (
412+ 20000 ,
413+ 1000 ),
414+ 1000);
415+ }
416+
417+ static constexpr ui64 DefaultLockRangesLimit () {
418+ return DefaultLockLimit () * 50 ;
419+ }
420+
421+ static std::atomic<ui64> g_LockLimit{ DefaultLockLimit () };
422+ static std::atomic<ui64> g_LockRangesLimit{ DefaultLockRangesLimit () };
423+
424+ } // namespace
425+
426+ ui64 TLockLocker::LockLimit () {
427+ return g_LockLimit.load (std::memory_order_relaxed);
428+ }
429+
430+ ui64 TLockLocker::LockRangesLimit () {
431+ return g_LockRangesLimit.load (std::memory_order_relaxed);
432+ }
433+
434+ std::shared_ptr<void > TLockLocker::OverrideLockLimit (ui64 newLimit) {
435+ ui64 oldLimit = g_LockLimit.exchange (newLimit, std::memory_order_relaxed);
436+ return std::shared_ptr<void >(nullptr , [oldLimit](void *) {
437+ g_LockLimit.store (oldLimit, std::memory_order_relaxed);
438+ });
439+ }
440+
441+ std::shared_ptr<void > TLockLocker::OverrideLockRangesLimit (ui64 newLimit) {
442+ ui64 oldLimit = g_LockRangesLimit.exchange (newLimit, std::memory_order_relaxed);
443+ return std::shared_ptr<void >(nullptr , [oldLimit](void *) {
444+ g_LockRangesLimit.store (oldLimit, std::memory_order_relaxed);
445+ });
446+ }
447+
405448void TLockLocker::AddPointLock (const TLockInfo::TPtr& lock, const TPointKey& key) {
406449 if (lock->AddPoint (key)) {
407450 key.Table ->AddPointLock (key, lock.Get ());
451+ LocksWithRanges.PushBack (lock.Get ());
408452 } else {
409453 key.Table ->AddShardLock (lock.Get ());
410454 }
@@ -413,21 +457,27 @@ void TLockLocker::AddPointLock(const TLockInfo::TPtr& lock, const TPointKey& key
413457void TLockLocker::AddRangeLock (const TLockInfo::TPtr& lock, const TRangeKey& key) {
414458 if (lock->AddRange (key)) {
415459 key.Table ->AddRangeLock (key, lock.Get ());
460+ LocksWithRanges.PushBack (lock.Get ());
416461 } else {
417462 key.Table ->AddShardLock (lock.Get ());
418463 }
419464}
420465
421- void TLockLocker::AddShardLock ( const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables ) {
466+ void TLockLocker::MakeShardLock ( TLockInfo* lock) {
422467 if (!lock->IsShardLock ()) {
423468 for (const TPathId& tableId : lock->GetReadTables ()) {
424- Tables.at (tableId)->RemoveRangeLock (lock. Get () );
469+ Tables.at (tableId)->RemoveRangeLock (lock);
425470 }
426471 lock->MakeShardLock ();
472+ LocksWithRanges.Remove (lock);
427473 for (const TPathId& tableId : lock->GetReadTables ()) {
428- Tables.at (tableId)->AddShardLock (lock. Get () );
474+ Tables.at (tableId)->AddShardLock (lock);
429475 }
430476 }
477+ }
478+
479+ void TLockLocker::AddShardLock (const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) {
480+ MakeShardLock (lock.Get ());
431481 for (auto & table : readTables) {
432482 const TPathId& tableId = table.GetTableId ();
433483 Y_ABORT_UNLESS (Tables.at (tableId).Get () == &table);
@@ -519,6 +569,9 @@ void TLockLocker::RemoveBrokenRanges() {
519569TLockInfo::TPtr TLockLocker::GetOrAddLock (ui64 lockId, ui32 lockNodeId) {
520570 auto it = Locks.find (lockId);
521571 if (it != Locks.end ()) {
572+ if (it->second ->IsInList <TLockInfoRangesListTag>()) {
573+ LocksWithRanges.PushBack (it->second .Get ());
574+ }
522575 if (it->second ->IsInList <TLockInfoExpireListTag>()) {
523576 ExpireQueue.PushBack (it->second .Get ());
524577 }
@@ -591,6 +644,7 @@ void TLockLocker::RemoveOneLock(ui64 lockTxId, ILocksDb* db) {
591644 for (const TPathId& tableId : txLock->GetWriteTables ()) {
592645 Tables.at (tableId)->RemoveWriteLock (txLock.Get ());
593646 }
647+ LocksWithRanges.Remove (txLock.Get ());
594648 txLock->CleanupConflicts ();
595649 Locks.erase (it);
596650
@@ -634,6 +688,7 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
634688 Y_ABORT_UNLESS (Tables.empty ());
635689 Locks.clear ();
636690 ShardLocks.clear ();
691+ LocksWithRanges.Clear ();
637692 ExpireQueue.Clear ();
638693 BrokenLocks.Clear ();
639694 BrokenPersistentLocks.Clear ();
@@ -643,21 +698,41 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
643698 PendingSubscribeLocks.clear ();
644699}
645700
646- bool TLockLocker::ForceShardLock (const TPathId& tableId) const {
647- auto it = Tables.find (tableId);
648- if (it != Tables.end ()) {
649- if (it->second ->RangeCount () > LockLimit ()) {
650- return true ;
651- }
701+ bool TLockLocker::ForceShardLock (
702+ const TLockInfo::TPtr& lock,
703+ const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables,
704+ ui64 newRanges)
705+ {
706+ if (lock->NumPoints () + lock->NumRanges () + newRanges > LockRangesLimit ()) {
707+ // Lock has too many ranges, will never fit in
708+ return true ;
652709 }
653- return false ;
654- }
655710
656- bool TLockLocker::ForceShardLock (const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) const {
657711 for (auto & table : readTables) {
658- if (table.RangeCount () > LockLimit ())
659- return true ;
712+ while (table.RangeCount () + newRanges > LockRangesLimit ()) {
713+ if (LocksWithRanges.Empty ()) {
714+ // Too many new ranges (should never happen)
715+ return true ;
716+ }
717+
718+ // Try to reduce the number of ranges until new ranges fit in
719+ TLockInfo* next = LocksWithRanges.PopFront ();
720+ if (next == lock.Get ()) {
721+ bool wasLast = LocksWithRanges.Empty ();
722+ LocksWithRanges.PushBack (next);
723+ if (wasLast) {
724+ return true ;
725+ }
726+ // We want to handle the newest lock last
727+ continue ;
728+ }
729+
730+ // Reduce the number of ranges by making the oldest lock into a shard lock
731+ MakeShardLock (next);
732+ Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
733+ }
660734 }
735+
661736 return false ;
662737}
663738
@@ -771,8 +846,6 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
771846 return TVector<TLock>();
772847 }
773848
774- bool shardLock = Locker.ForceShardLock (Update->ReadTables );
775-
776849 TLockInfo::TPtr lock;
777850 ui64 counter = TLock::ErrorNotSet;
778851
@@ -791,6 +864,12 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
791864 } else if (lock->IsBroken ()) {
792865 counter = TLock::ErrorBroken;
793866 } else {
867+ bool shardLock = (
868+ lock->IsShardLock () ||
869+ Locker.ForceShardLock (
870+ lock,
871+ Update->ReadTables ,
872+ Update->PointLocks .size () + Update->RangeLocks .size ()));
794873 if (shardLock) {
795874 Locker.AddShardLock (lock, Update->ReadTables );
796875 Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
0 commit comments