|  | 
|  | 1 | +# coding: utf8 | 
|  | 2 | + | 
|  | 3 | +import os | 
|  | 4 | +import sys | 
|  | 5 | +curr_dir = os.path.dirname(os.path.abspath(__file__)) | 
|  | 6 | +sys.path.insert(0, os.path.split(curr_dir)[0]) | 
|  | 7 | + | 
|  | 8 | +import time | 
|  | 9 | +import argparse | 
|  | 10 | +import asyncio as aio | 
|  | 11 | +from asyncio_pool import AioPool, result_noraise | 
|  | 12 | + | 
|  | 13 | + | 
|  | 14 | +async def loadtest_spawn(tasks, pool_size, duration): | 
|  | 15 | + futures = [] | 
|  | 16 | + async with AioPool(size=pool_size) as pool: | 
|  | 17 | + for i in range(tasks): | 
|  | 18 | + fut = await pool.spawn(aio.sleep(duration)) | 
|  | 19 | + futures.append(fut) | 
|  | 20 | + | 
|  | 21 | + return [result_noraise(f) for f in futures] | 
|  | 22 | + | 
|  | 23 | + | 
|  | 24 | +async def loadtest_spawn_n(tasks, pool_size, duration): | 
|  | 25 | + futures = [] | 
|  | 26 | + async with AioPool(size=pool_size) as pool: | 
|  | 27 | + for i in range(tasks): | 
|  | 28 | + fut = await pool.spawn_n(aio.sleep(duration)) | 
|  | 29 | + futures.append(fut) | 
|  | 30 | + | 
|  | 31 | + return [result_noraise(f) for f in futures] | 
|  | 32 | + | 
|  | 33 | + | 
|  | 34 | +async def loadtest_map(tasks, pool_size, duration): | 
|  | 35 | + async def wrk(i): | 
|  | 36 | + await aio.sleep(duration) | 
|  | 37 | + | 
|  | 38 | + async with AioPool(size=pool_size) as pool: | 
|  | 39 | + return await pool.map(wrk, range(tasks)) | 
|  | 40 | + | 
|  | 41 | + | 
|  | 42 | +async def loadtest_itermap(tasks, pool_size, duration): | 
|  | 43 | + async def wrk(i): | 
|  | 44 | + await aio.sleep(duration) | 
|  | 45 | + | 
|  | 46 | + results = [] | 
|  | 47 | + async with AioPool(size=pool_size) as pool: | 
|  | 48 | + async for res in pool.itermap(wrk, range(tasks)): | 
|  | 49 | + results.append(res) | 
|  | 50 | + | 
|  | 51 | + return results | 
|  | 52 | + | 
|  | 53 | + | 
|  | 54 | +def print_stats(args, exec_time): | 
|  | 55 | + ideal = args.task_duration * (args.tasks / args.pool_size) | 
|  | 56 | + | 
|  | 57 | + overhead = exec_time - ideal | 
|  | 58 | + overhead_perc = ((exec_time / ideal) - 1) * 100 | 
|  | 59 | + | 
|  | 60 | + per_task = overhead / args.tasks | 
|  | 61 | + per_task_perc = (((args.task_duration + per_task) / args.task_duration) - 1) * 100 | 
|  | 62 | + | 
|  | 63 | + print(f'{ideal:15.5f}s -- ideal result') | 
|  | 64 | + print(f'{exec_time:15.5f}s -- were executing') | 
|  | 65 | + print(f'{overhead:15.5f}s -- overhead total') | 
|  | 66 | + print(f'{overhead_perc:15.5f}% -- overhead total percent') | 
|  | 67 | + print(f'{per_task:15.5f}s -- overhead per task') | 
|  | 68 | + print(f'{per_task_perc:15.5f}% -- overhead per task percent') | 
|  | 69 | + | 
|  | 70 | + | 
|  | 71 | +if __name__ == "__main__": | 
|  | 72 | + methods = { | 
|  | 73 | + 'spawn': loadtest_spawn, | 
|  | 74 | + 'spawn_n': loadtest_spawn_n, | 
|  | 75 | + 'map': loadtest_map, | 
|  | 76 | + 'itermap': loadtest_itermap, | 
|  | 77 | + } | 
|  | 78 | + | 
|  | 79 | + p = argparse.ArgumentParser() | 
|  | 80 | + p.add_argument('method', choices=methods.keys()) | 
|  | 81 | + p.add_argument('--tasks', '-t', type=int, default=10**5) | 
|  | 82 | + p.add_argument('--task-duration', '-d', type=float, default=0.2) | 
|  | 83 | + p.add_argument('--pool-size', '-p', type=int, default=10**3) | 
|  | 84 | + args = p.parse_args() | 
|  | 85 | + | 
|  | 86 | + print('>>> Running %d tasks in pool of size=%s, each task takes %.3f sec.' % | 
|  | 87 | + (args.tasks, args.pool_size, args.task_duration)) | 
|  | 88 | + print('>>> This will run more than %.5f seconds' % | 
|  | 89 | + (args.task_duration * (args.tasks / args.pool_size))) | 
|  | 90 | + | 
|  | 91 | + ts_start = time.perf_counter() | 
|  | 92 | + m = methods.get(args.method)(args.tasks, args.pool_size, args.task_duration) | 
|  | 93 | + aio.get_event_loop().run_until_complete(m) | 
|  | 94 | + exec_time = time.perf_counter() - ts_start | 
|  | 95 | + print_stats(args, exec_time) | 
0 commit comments