2020import random
2121import re
2222import shutil
23- import statistics
2423import sys
2524import uuid
2625
2726import click
27+ import numpy
2828import pystache
2929import yaml
3030from google .cloud import storage
3131
3232from fireci import ci_command
3333from fireci .dir_utils import chdir
34+ from fireci import prow_utils
35+ from fireci import uploader
3436
3537_logger = logging .getLogger ('fireci.macrobenchmark' )
3638
@@ -55,9 +57,7 @@ async def _launch_macrobenchmark_test():
5557 runners = [MacrobenchmarkTest (k , v , artifact_versions ) for k , v in config .items ()]
5658 results = await asyncio .gather (* [x .run () for x in runners ], return_exceptions = True )
5759
58- if any (map (lambda x : isinstance (x , Exception ), results )):
59- _logger .error (f'Exceptions: { [x for x in results if (isinstance (x , Exception ))]} ' )
60- raise click .ClickException ('Macrobenchmark test failed with above errors.' )
60+ await _post_processing (results )
6161
6262 _logger .info ('Macrobenchmark test finished.' )
6363
@@ -104,6 +104,23 @@ async def _copy_google_services():
104104 shutil .copyfile (src , dst )
105105
106106
107+ async def _post_processing (results ):
108+ # Upload successful measurements to the metric service
109+ measurements = []
110+ for result in results :
111+ if not isinstance (result , Exception ):
112+ measurements .extend (result )
113+
114+ metrics_service_url = os .getenv ('METRICS_SERVICE_URL' )
115+ access_token = prow_utils .gcloud_identity_token ()
116+ uploader .post_report (measurements , metrics_service_url , access_token , metric = 'macrobenchmark' )
117+
118+ # Raise exceptions for failed measurements
119+ if any (map (lambda x : isinstance (x , Exception ), results )):
120+ _logger .error (f'Exceptions: { [x for x in results if isinstance (x , Exception )]} ' )
121+ raise click .ClickException ('Macrobenchmark test failed with above errors.' )
122+
123+
107124class MacrobenchmarkTest :
108125 """Builds the test based on configurations and runs the test on FTL."""
109126 def __init__ (
@@ -127,7 +144,7 @@ async def run(self):
127144 await self ._create_benchmark_projects ()
128145 await self ._assemble_benchmark_apks ()
129146 await self ._execute_benchmark_tests ()
130- await self ._upload_benchmark_results ()
147+ return await self ._aggregate_benchmark_results ()
131148
132149 async def _create_benchmark_projects (self ):
133150 app_name = self .test_app_config ['name' ]
@@ -205,7 +222,7 @@ async def _prepare_mustache_context(self):
205222
206223 return mustache_context
207224
208- async def _upload_benchmark_results (self ):
225+ async def _aggregate_benchmark_results (self ):
209226 results = []
210227 blobs = self .gcs_client .list_blobs (self .test_results_bucket , prefix = self .test_results_dir )
211228 files = [x for x in blobs if re .search (r'artifacts/[^/]*\.json' , x .name )]
@@ -222,14 +239,13 @@ async def _upload_benchmark_results(self):
222239 'name' : f'{ clazz } .{ method } ' ,
223240 'min' : min (runs ),
224241 'max' : max (runs ),
225- 'mean ' : statistics . mean (runs ),
226- 'median ' : statistics . median (runs ),
227- 'stdev ' : statistics . stdev (runs ),
242+ 'p50 ' : numpy . percentile (runs , 50 ),
243+ 'p90 ' : numpy . percentile (runs , 90 ),
244+ 'p99 ' : numpy . percentile (runs , 99 ),
228245 'unit' : 'ms' ,
229246 })
230247 self .logger .info (f'Benchmark results: { results } ' )
231-
232- # TODO(yifany): upload to metric service once it is ready
248+ return results
233249
234250 async def _exec_subprocess (self , executable , args ):
235251 command = " " .join ([executable , * args ])
0 commit comments