@@ -394,7 +394,7 @@ def test_create_table_with_real_custom_policy(self):
394394 taxonomy_parent = f"projects/{ Config .CLIENT .project } /locations/us"
395395
396396 new_taxonomy = datacatalog_types .Taxonomy (
397- display_name = "Custom test taxonomy" ,
397+ display_name = "Custom test taxonomy" + unique_resource_id () ,
398398 description = "This taxonomy is ony used for a test." ,
399399 activated_policy_types = [
400400 datacatalog_types .Taxonomy .PolicyType .FINE_GRAINED_ACCESS_CONTROL
@@ -2370,6 +2370,75 @@ def test_parameterized_types_round_trip(self):
23702370
23712371 self .assertEqual (tuple (s ._key ()[:2 ] for s in table2 .schema ), fields )
23722372
2373+ def test_table_snapshots (self ):
2374+ from google .cloud .bigquery import CopyJobConfig
2375+ from google .cloud .bigquery import OperationType
2376+
2377+ client = Config .CLIENT
2378+
2379+ source_table_path = f"{ client .project } .{ Config .DATASET } .test_table"
2380+ snapshot_table_path = f"{ source_table_path } _snapshot"
2381+
2382+ # Create the table before loading so that the column order is predictable.
2383+ schema = [
2384+ bigquery .SchemaField ("foo" , "INTEGER" ),
2385+ bigquery .SchemaField ("bar" , "STRING" ),
2386+ ]
2387+ source_table = helpers .retry_403 (Config .CLIENT .create_table )(
2388+ Table (source_table_path , schema = schema )
2389+ )
2390+ self .to_delete .insert (0 , source_table )
2391+
2392+ # Populate the table with initial data.
2393+ rows = [{"foo" : 1 , "bar" : "one" }, {"foo" : 2 , "bar" : "two" }]
2394+ load_job = Config .CLIENT .load_table_from_json (rows , source_table )
2395+ load_job .result ()
2396+
2397+ # Now create a snapshot before modifying the original table data.
2398+ copy_config = CopyJobConfig ()
2399+ copy_config .operation_type = OperationType .SNAPSHOT
2400+
2401+ copy_job = client .copy_table (
2402+ sources = source_table_path ,
2403+ destination = snapshot_table_path ,
2404+ job_config = copy_config ,
2405+ )
2406+ copy_job .result ()
2407+
2408+ snapshot_table = client .get_table (snapshot_table_path )
2409+ self .to_delete .insert (0 , snapshot_table )
2410+
2411+ # Modify data in original table.
2412+ sql = f'INSERT INTO `{ source_table_path } `(foo, bar) VALUES (3, "three")'
2413+ query_job = client .query (sql )
2414+ query_job .result ()
2415+
2416+ # List rows from the source table and compare them to rows from the snapshot.
2417+ rows_iter = client .list_rows (source_table_path )
2418+ rows = sorted (row .values () for row in rows_iter )
2419+ assert rows == [(1 , "one" ), (2 , "two" ), (3 , "three" )]
2420+
2421+ rows_iter = client .list_rows (snapshot_table_path )
2422+ rows = sorted (row .values () for row in rows_iter )
2423+ assert rows == [(1 , "one" ), (2 , "two" )]
2424+
2425+ # Now restore the table from the snapshot and it should again contain the old
2426+ # set of rows.
2427+ copy_config = CopyJobConfig ()
2428+ copy_config .operation_type = OperationType .RESTORE
2429+ copy_config .write_disposition = bigquery .WriteDisposition .WRITE_TRUNCATE
2430+
2431+ copy_job = client .copy_table (
2432+ sources = snapshot_table_path ,
2433+ destination = source_table_path ,
2434+ job_config = copy_config ,
2435+ )
2436+ copy_job .result ()
2437+
2438+ rows_iter = client .list_rows (source_table_path )
2439+ rows = sorted (row .values () for row in rows_iter )
2440+ assert rows == [(1 , "one" ), (2 , "two" )]
2441+
23732442 def temp_dataset (self , dataset_id , location = None ):
23742443 project = Config .CLIENT .project
23752444 dataset_ref = bigquery .DatasetReference (project , dataset_id )
0 commit comments