@@ -22,24 +22,23 @@ def setup_class(cls):
2222 def teardown_class (cls ):
2323 cls .cluster .stop ()
2424
25- def test (self ):
26- """As per https://github.com/ydb-platform/ydb/issues/13529"""
27-
25+ def make_session (self ):
2826 driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = '/Root' )
2927 session = ydb .QuerySessionPool (driver )
3028 driver .wait (5 , fail_fast = True )
29+ return session
3130
32- def create_table ( table ):
33- return session .execute_with_retries (f"""
31+ def create_test_table ( self , session , table ):
32+ return session .execute_with_retries (f"""
3433 CREATE TABLE { table } (
3534 k Int32 NOT NULL,
3635 v Uint64,
3736 PRIMARY KEY (k)
3837 ) WITH (STORE = COLUMN)
3938 """ )
4039
41- def upsert_chunk ( table , chunk_id , retries = 10 ):
42- return session .execute_with_retries (f"""
40+ def upsert_test_chunk ( self , session , table , chunk_id , retries = 10 ):
41+ return session .execute_with_retries (f"""
4342 $n = { ROWS_CHUNK_SIZE } ;
4443 $values_list = ListReplicate(42ul, $n);
4544 $rows_list = ListFoldMap($values_list, { chunk_id * ROWS_CHUNK_SIZE } , ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
@@ -48,17 +47,54 @@ def upsert_chunk(table, chunk_id, retries=10):
4847 SELECT * FROM AS_TABLE($rows_list);
4948 """ , None , ydb .retries .RetrySettings (max_retries = retries ))
5049
51- create_table ('huge' )
52-
50+ def upsert_until_overload (self , session , table ):
5351 try :
5452 for i in range (ROWS_CHUNKS_COUNT ):
55- res = upsert_chunk ( 'huge' , i , retries = 0 )
56- print (f"query #{ i } ok, result:" , res , file = sys .stderr )
53+ res = self . upsert_test_chunk ( session , table , i , retries = 0 )
54+ print (f"upsert #{ i } ok, result:" , res , file = sys .stderr )
5755 except ydb .issues .Overloaded :
58- print ('got overload issue' , file = sys .stderr )
56+ print ('upsert: got overload issue' , file = sys .stderr )
57+
58+ def test (self ):
59+ """As per https://github.com/ydb-platform/ydb/issues/13529"""
60+ session = self .make_session ()
5961
62+ # Overflow the database
63+ self .create_test_table (session , 'huge' )
64+ self .upsert_until_overload (session , 'huge' )
65+
66+ # Cleanup
6067 session .execute_with_retries ("""DROP TABLE huge""" )
6168
6269 # Check database health after cleanup
63- create_table ('small' )
64- upsert_chunk ('small' , 0 )
70+ self .create_test_table (session , 'small' )
71+ self .upsert_test_chunk (session , 'small' , 0 )
72+
73+ def delete_test_chunk (self , session , table , chunk_id , retries = 10 ):
74+ session .execute_with_retries (f"""
75+ DELETE FROM { table }
76+ WHERE { chunk_id * ROWS_CHUNK_SIZE } <= k AND k <= { chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE }
77+ """ , None , ydb .retries .RetrySettings (max_retries = retries ))
78+
79+ def delete_until_overload (self , session , table ):
80+ for i in range (ROWS_CHUNKS_COUNT ):
81+ try :
82+ self .delete_test_chunk (session , table , i , retries = 0 )
83+ print (f"delete #{ i } ok" , file = sys .stderr )
84+ except ydb .issues .Overloaded :
85+ print ('delete: got overload issue' , file = sys .stderr )
86+ return i
87+
88+ def test_delete (self ):
89+ """As per https://github.com/ydb-platform/ydb/issues/13653"""
90+ session = self .make_session ()
91+
92+ # Overflow the database
93+ self .create_test_table (session , 'huge' )
94+ self .upsert_until_overload (session , 'huge' )
95+
96+ # Check that deletions will lead to overflow, too
97+ i = self .delete_until_overload (session , 'huge' )
98+
99+ # Try to wait until deletion works again (after compaction)
100+ self .delete_test_chunk (session , 'huge' , i )
0 commit comments