Skip to content

Commit 56797d1

Browse files
committed
Merge branch 'mysql-8.0' into mysql-8.4
Change-Id: I3ee4034c4c0c05d851a7416a72d67c1f02219646
2 parents 20b006e + 2d54bdb commit 56797d1

File tree

5 files changed

+322
-9
lines changed

5 files changed

+322
-9
lines changed
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
# [connection default]
2+
CALL create_tables(20);
3+
CALL insert_row(20);
4+
SELECT COUNT(*) FROM t1;
5+
COUNT(*)
6+
1
7+
SELECT COUNT(*) FROM t20;
8+
COUNT(*)
9+
1
10+
SET GLOBAL ndb_metadata_sync = 'ON';
11+
# Start truncating
12+
CALL truncate_list(1,10);
13+
# Now fail all drops (1st phase of truncate)
14+
SET @saved_debug = @@GLOBAL.debug;
15+
SET @@GLOBAL.debug = '+d,ndb_fail_drop';
16+
CALL truncate_list(11, 20);
17+
ERROR HY000: Got error 761 'Unable to drop table as backup is in progress' from NDBCLUSTER
18+
CALL truncate_list(11, 20);
19+
ERROR HY000: Got error 761 'Unable to drop table as backup is in progress' from NDBCLUSTER
20+
# t1->t10 must have been truncated
21+
SELECT COUNT(*) FROM t1;
22+
COUNT(*)
23+
0
24+
SELECT COUNT(*) FROM t10;
25+
COUNT(*)
26+
0
27+
# t11->t20 must have failed
28+
SELECT COUNT(*) FROM t11;
29+
COUNT(*)
30+
1
31+
SELECT COUNT(*) FROM t20;
32+
COUNT(*)
33+
1
34+
# [connection server1]
35+
SET DEBUG_SYNC='truncate_stop_after_execute SIGNAL signal1 WAIT_FOR go_signal1';
36+
CALL truncate_list(1,20);
37+
# [connection server2]
38+
CALL truncate_list(1,20);
39+
# [connection default]
40+
SET DEBUG_SYNC='now SIGNAL go_signal1';
41+
# [connection server1]
42+
ERROR HY000: Got error 761 'Unable to drop table as backup is in progress' from NDBCLUSTER
43+
# [connection server2]
44+
# [connection server1]
45+
SET DEBUG_SYNC='truncate_stop_after_execute SIGNAL signal1 WAIT_FOR go_signal1';
46+
CALL truncate_list(1,20);
47+
# [connection server2]
48+
CALL truncate_list(1,20);
49+
# [connection default]
50+
SET DEBUG_SYNC='now SIGNAL go_signal1';
51+
# [connection server1]
52+
ERROR HY000: Got error 761 'Unable to drop table as backup is in progress' from NDBCLUSTER
53+
# [connection server2]
54+
# [connection default]
55+
# Re-insert data to assess that share locks were properly cleaned
56+
CALL insert_row(20);
57+
# All tables should have data (t1->t20 1 row, by server2)
58+
SELECT COUNT(*) FROM t1;
59+
COUNT(*)
60+
1
61+
SELECT COUNT(*) FROM t20;
62+
COUNT(*)
63+
1
64+
# Cleanup
65+
DROP PROCEDURE truncate_list;
66+
SET GLOBAL debug = @saved_debug;
67+
CALL drop_tables(20);
68+
DROP PROCEDURE create_tables;
69+
DROP PROCEDURE drop_tables;
70+
DROP PROCEDURE insert_row;
71+
DROP PROCEDURE truncate_list;
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
--source include/have_ndb.inc
2+
--source include/have_debug.inc
3+
4+
connect(server1,127.0.0.1,root,,test,$MASTER_MYPORT,);
5+
connect(server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
6+
--echo # [connection default]
7+
--connection default
8+
9+
--disable_query_log
10+
DELIMITER //;
11+
CREATE PROCEDURE create_tables (IN ntables INT)
12+
BEGIN
13+
SET @idx = 1;
14+
WHILE @idx <= ntables DO
15+
SET @pstmt = CONCAT('CREATE TABLE t', @idx, ' (a INT PRIMARY KEY AUTO_INCREMENT, c CHAR(3)) ENGINE = NDB');
16+
PREPARE stmt FROM @pstmt;
17+
EXECUTE stmt;
18+
DEALLOCATE PREPARE stmt;
19+
SET @idx = @idx + 1;
20+
END WHILE;
21+
END //
22+
23+
CREATE PROCEDURE drop_tables (IN ntables INT)
24+
BEGIN
25+
SET @idx = 1;
26+
WHILE @idx <= ntables DO
27+
SET @pstmt = CONCAT('DROP TABLE t', @idx);
28+
PREPARE stmt FROM @pstmt;
29+
EXECUTE stmt;
30+
DEALLOCATE PREPARE stmt;
31+
SET @idx = @idx + 1;
32+
END WHILE;
33+
END //
34+
35+
CREATE PROCEDURE insert_row (IN ntables INT)
36+
BEGIN
37+
SET @idx = 1;
38+
WHILE @idx <= ntables DO
39+
SET @pstmt = CONCAT('INSERT INTO t', @idx, ' (c) VALUES (\'val\')');
40+
PREPARE stmt FROM @pstmt;
41+
EXECUTE stmt;
42+
DEALLOCATE PREPARE stmt;
43+
SET @idx = @idx + 1;
44+
END WHILE;
45+
END //
46+
47+
CREATE PROCEDURE truncate_list(IN first_tab_idx INT, IN last_tab_idx INT)
48+
BEGIN
49+
SET @idx = first_tab_idx;
50+
WHILE @idx <= last_tab_idx DO
51+
SET @pstmt = CONCAT('TRUNCATE TABLE t', @idx);
52+
PREPARE stmt FROM @pstmt;
53+
EXECUTE stmt;
54+
DEALLOCATE PREPARE stmt;
55+
SET @idx = @idx + 1;
56+
END WHILE;
57+
END //
58+
59+
--connection server2
60+
CREATE PROCEDURE truncate_list(IN first_tab_idx INT, IN last_tab_idx INT)
61+
BEGIN
62+
SET @idx = first_tab_idx;
63+
WHILE @idx <= last_tab_idx DO
64+
SET @pstmt = CONCAT('TRUNCATE TABLE t', @idx);
65+
PREPARE stmt FROM @pstmt;
66+
EXECUTE stmt;
67+
DEALLOCATE PREPARE stmt;
68+
SET @idx = @idx + 1;
69+
END WHILE;
70+
END //
71+
72+
--connection default
73+
DELIMITER ;//
74+
--enable_query_log
75+
76+
CALL create_tables(20);
77+
CALL insert_row(20);
78+
79+
# Data at t1
80+
SELECT COUNT(*) FROM t1;
81+
# Data at t20
82+
SELECT COUNT(*) FROM t20;
83+
SET GLOBAL ndb_metadata_sync = 'ON';
84+
85+
--echo # Start truncating
86+
CALL truncate_list(1,10);
87+
88+
--echo # Now fail all drops (1st phase of truncate)
89+
SET @saved_debug = @@GLOBAL.debug;
90+
SET @@GLOBAL.debug = '+d,ndb_fail_drop';
91+
--error 1296
92+
CALL truncate_list(11, 20);
93+
# repeat once again to stress out share locks
94+
--error 1296
95+
CALL truncate_list(11, 20);
96+
97+
--echo # t1->t10 must have been truncated
98+
SELECT COUNT(*) FROM t1;
99+
SELECT COUNT(*) FROM t10;
100+
--echo # t11->t20 must have failed
101+
SELECT COUNT(*) FROM t11;
102+
SELECT COUNT(*) FROM t20;
103+
104+
# Do the same with two connections. Server1 fails all but server2 queues and is successful
105+
--let $iter=2
106+
while($iter)
107+
{
108+
--echo # [connection server1]
109+
--connection server1
110+
SET DEBUG_SYNC='truncate_stop_after_execute SIGNAL signal1 WAIT_FOR go_signal1';
111+
--send CALL truncate_list(1,20)
112+
113+
--echo # [connection server2]
114+
--connection server2
115+
--send CALL truncate_list(1,20)
116+
117+
--echo # [connection default]
118+
--connection default
119+
SET DEBUG_SYNC='now SIGNAL go_signal1';
120+
121+
--echo # [connection server1]
122+
--connection server1
123+
--error 1296
124+
--reap
125+
--echo # [connection server2]
126+
--connection server2
127+
--reap
128+
129+
--dec $iter
130+
}
131+
132+
--echo # [connection default]
133+
--connection default
134+
--echo # Re-insert data to assess that share locks were properly cleaned
135+
CALL insert_row(20);
136+
--echo # All tables should have data (t1->t20 1 row, by server2)
137+
SELECT COUNT(*) FROM t1;
138+
SELECT COUNT(*) FROM t20;
139+
140+
--echo # Cleanup
141+
--connection server2
142+
DROP PROCEDURE truncate_list;
143+
--connection default
144+
SET GLOBAL debug = @saved_debug;
145+
CALL drop_tables(20);
146+
DROP PROCEDURE create_tables;
147+
DROP PROCEDURE drop_tables;
148+
DROP PROCEDURE insert_row;
149+
DROP PROCEDURE truncate_list;
150+
--disconnect server1
151+
--disconnect server2

storage/ndb/plugin/ha_ndbcluster.cc

Lines changed: 63 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9397,7 +9397,12 @@ int ha_ndbcluster::create(const char *path [[maybe_unused]],
93979397
const char *dbname = table_share->db.str;
93989398
const char *tabname = table_share->table_name.str;
93999399

9400-
ndb_log_info("Creating table '%s.%s'", dbname, tabname);
9400+
{
9401+
const int sql_cmd = thd_sql_command(thd);
9402+
ndb_log_info("%s table '%s.%s'",
9403+
sql_cmd == SQLCOM_TRUNCATE ? "Truncating" : "Creating", dbname,
9404+
tabname);
9405+
}
94019406

94029407
Ndb_schema_dist_client schema_dist_client(thd);
94039408

@@ -10395,21 +10400,40 @@ int ha_ndbcluster::truncate(dd::Table *table_def) {
1039510400
/* Fill in create_info from the open table */
1039610401
HA_CREATE_INFO create_info;
1039710402
update_create_info_from_table(&create_info, table);
10398-
10399-
// Close the table, will always return 0
10400-
(void)close();
10403+
#ifndef NDEBUG
10404+
const NDB_SHARE *old_share_ptr_for_sanity_check = m_share;
10405+
#endif
1040110406

1040210407
// Call ha_ndbcluster::create which will detect that this is a
1040310408
// truncate and thus drop the table before creating it again.
1040410409
const int truncate_error =
1040510410
create(table->s->normalized_path.str, table, &create_info, table_def);
1040610411

10407-
// Open the table again even if the truncate failed, the caller
10408-
// expect the table to be open. Report any error during open.
10409-
const int open_error = open(table->s->normalized_path.str, 0, 0, table_def);
10412+
DBUG_PRINT("debug", ("truncate res: %d", truncate_error));
10413+
#ifndef NDEBUG
10414+
/**
10415+
* This sync point is used by tests that want to assess the
10416+
* concurrency of the truncate, specially the correct state of the
10417+
* THR_LOCK_DATA (m_lock) to avoid deadlocks.
10418+
*/
10419+
if (current_thd) DEBUG_SYNC(current_thd, "truncate_stop_after_execute");
10420+
/**
10421+
* create() creates a new ndb_share, but it is NOT set as this
10422+
* handler's m_share, because the currently opened ndb_share is the
10423+
* old one. This old share will thus be released through the closing
10424+
* of this handler's usage of the table. Following is a sanity check
10425+
* that this handler's share pointer does not change despite there
10426+
* being a new share.
10427+
*/
10428+
if (unlikely(old_share_ptr_for_sanity_check != m_share)) {
10429+
ndb_log_error(
10430+
"Fatal! Truncate table re-create modified "
10431+
"the handler's currently opened share pointer.");
10432+
abort();
10433+
}
10434+
#endif
1041010435

10411-
if (truncate_error) return truncate_error;
10412-
return open_error;
10436+
return truncate_error;
1041310437
}
1041410438

1041510439
int ha_ndbcluster::prepare_inplace__add_index(THD *thd, KEY *key_info,
@@ -11129,6 +11153,11 @@ static bool drop_table_and_related(THD *thd, Ndb *ndb,
1112911153
return false;
1113011154
}
1113111155

11156+
DBUG_EXECUTE_IF("ndb_fail_drop", {
11157+
// Simulate failure. A bogus error code will be set on the caller.
11158+
return false;
11159+
});
11160+
1113211161
// Drop the table
1113311162
if (dict->dropTableGlobal(*table, drop_flags) != 0) {
1113411163
const NdbError &ndb_err = dict->getNdbError();
@@ -11241,6 +11270,11 @@ int drop_table_impl(THD *thd, Ndb *ndb,
1124111270

1124211271
Thd_ndb *thd_ndb = get_thd_ndb(thd);
1124311272
const int dict_error_code = dict->getNdbError().code;
11273+
DBUG_EXECUTE_IF("ndb_fail_drop", {
11274+
int *ec = const_cast<int *>(&dict_error_code);
11275+
// backup in progress (e.g.)
11276+
*ec = 761;
11277+
});
1124411278
// Check if an error has occurred. Note that if the table didn't exist in NDB
1124511279
// (denoted by error codes 709 or 723), it's considered a success
1124611280
if (dict_error_code && dict_error_code != 709 && dict_error_code != 723) {
@@ -11557,6 +11591,7 @@ int ha_ndbcluster::open(const char *path [[maybe_unused]],
1155711591
return HA_ERR_NO_CONNECTION;
1155811592
}
1155911593

11594+
DBUG_EXECUTE("debug", NDB_SHARE::dbg_print_locks(m_share););
1156011595
// Init table lock structure
1156111596
thr_lock_data_init(&m_share->lock, &m_lock, (void *)nullptr);
1156211597

@@ -11873,6 +11908,23 @@ inline void ha_ndbcluster::release_key_fields() {
1187311908
}
1187411909
}
1187511910

11911+
static void check_thr_lock_data_unused(const THR_LOCK_DATA *thr_lock_data) {
11912+
/**
11913+
* Check that the handler is not involved in any SQL (thr_lock) locking before
11914+
* ending its lifecycle.
11915+
*/
11916+
if (unlikely(thr_lock_data->type > TL_UNLOCK)) {
11917+
ndb_log_error(
11918+
"Fatal! Closing handler involved in thr_lock: "
11919+
"thread_id %u "
11920+
"type %u "
11921+
"thr_lock %p",
11922+
thr_lock_data->owner ? thr_lock_data->owner->thread_id : 0,
11923+
thr_lock_data->type, thr_lock_data->lock);
11924+
abort();
11925+
}
11926+
}
11927+
1187611928
/**
1187711929
Close an open ha_ndbcluster instance.
1187811930

@@ -11897,6 +11949,8 @@ inline void ha_ndbcluster::release_key_fields() {
1189711949
int ha_ndbcluster::close(void) {
1189811950
DBUG_TRACE;
1189911951

11952+
check_thr_lock_data_unused(&m_lock);
11953+
1190011954
release_key_fields();
1190111955
release_ndb_share();
1190211956

0 commit comments

Comments
 (0)