本文整理汇总了Python中python_utils.jobset.run函数的典型用法代码示例。如果您正苦于以下问题:Python run函数的具体用法?Python run怎么用?Python run使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: prepare_remote_hosts
def prepare_remote_hosts(hosts, prepare_local=False):
"""Prepares remote hosts (and maybe prepare localhost as well)."""
prepare_timeout = 5*60
prepare_jobs = []
for host in hosts:
user_at_host = '%[email protected]%s' % (_REMOTE_HOST_USERNAME, host)
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
shortname='remote_host_prepare.%s' % host,
environ = {'USER_AT_HOST': user_at_host},
timeout_seconds=prepare_timeout))
if prepare_local:
# Prepare localhost as well
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/kill_workers.sh'],
shortname='local_prepare',
timeout_seconds=prepare_timeout))
jobset.message('START', 'Preparing hosts.', do_newline=True)
num_failures, _ = jobset.run(
prepare_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message('SUCCESS',
'Prepare step completed successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Failed to prepare remote hosts.',
do_newline=True)
sys.exit(1)
开发者ID:makdharma,项目名称:grpc,代码行数:30,代码来源:run_performance_tests.py
示例2: build_on_remote_hosts
def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
"""Builds performance worker on remote hosts (and maybe also locally)."""
build_timeout = 15*60
build_jobs = []
for host in hosts:
user_at_host = '%[email protected]%s' % (_REMOTE_HOST_USERNAME, host)
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
shortname='remote_host_build.%s' % host,
environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
timeout_seconds=build_timeout))
if build_local:
# Build locally as well
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
shortname='local_build',
environ = {'CONFIG': 'opt'},
timeout_seconds=build_timeout))
jobset.message('START', 'Building.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message('SUCCESS',
'Built successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Build failed.',
do_newline=True)
sys.exit(1)
开发者ID:makdharma,项目名称:grpc,代码行数:31,代码来源:run_performance_tests.py
示例3: archive_repo
def archive_repo(languages):
"""Archives local version of repo including submodules."""
# Directory contains symlinks that can't be correctly untarred on Windows
# so we just skip them as a workaround.
# See https://github.com/grpc/grpc/issues/16334
bad_symlinks_dir = '../grpc/third_party/libcxx/test/std/experimental/filesystem/Inputs/static_test_env'
cmdline = [
'tar', '--exclude', bad_symlinks_dir, '-cf', '../grpc.tar', '../grpc/'
]
if 'java' in languages:
cmdline.append('../grpc-java')
if 'go' in languages:
cmdline.append('../grpc-go')
if 'node' in languages or 'node_purejs' in languages:
cmdline.append('../grpc-node')
archive_job = jobset.JobSpec(
cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
jobset.message('START', 'Archiving local repository.', do_newline=True)
num_failures, _ = jobset.run(
[archive_job], newline_on_success=True, maxjobs=1)
if num_failures == 0:
jobset.message(
'SUCCESS',
'Archive with local repository created successfully.',
do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to archive local repository.', do_newline=True)
sys.exit(1)
开发者ID:Falco20019,项目名称:grpc,代码行数:31,代码来源:run_performance_tests.py
示例4: run_collect_perf_profile_jobs
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name):
perf_report_jobs = []
global profile_output_files
for host_and_port in hosts_and_base_names:
perf_base_name = hosts_and_base_names[host_and_port]
output_filename = '%s-%s' % (scenario_name, perf_base_name)
# from the base filename, create .svg output filename
host = host_and_port.split(':')[0]
profile_output_files.append('%s.svg' % output_filename)
perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename))
jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
return failures
开发者ID:makdharma,项目名称:grpc,代码行数:15,代码来源:run_performance_tests.py
示例5: archive_repo
def archive_repo(languages):
"""Archives local version of repo including submodules."""
cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
if 'java' in languages:
cmdline.append('../grpc-java')
if 'go' in languages:
cmdline.append('../grpc-go')
archive_job = jobset.JobSpec(
cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
jobset.message('START', 'Archiving local repository.', do_newline=True)
num_failures, _ = jobset.run(
[archive_job], newline_on_success=True, maxjobs=1)
if num_failures == 0:
jobset.message(
'SUCCESS',
'Archive with local repository created successfully.',
do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to archive local repository.', do_newline=True)
sys.exit(1)
开发者ID:CCNITSilchar,项目名称:grpc,代码行数:23,代码来源:run_performance_tests.py
示例6: all
# Figure out which targets to build
targets = []
for label in args.build:
targets += _BUILD_MAP[label]
# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
targets = sorted(set(targets))
# Execute pre-build phase
prebuild_jobs = []
for target in targets:
prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
num_failures, _ = jobset.run(
prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures != 0:
jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
sys.exit(1)
build_jobs = []
for target in targets:
build_jobs.append(target.build_jobspec())
if not build_jobs:
print('Nothing to build.')
sys.exit(1)
jobset.message('START', 'Building targets.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures == 0:
开发者ID:makdharma,项目名称:grpc,代码行数:31,代码来源:task_runner.py
示例7: print
qps_workers_killed = 0
merged_resultset = {}
perf_report_failures = 0
for scenario in scenarios:
if args.dry_run:
print(scenario.name)
else:
scenario_failures = 0
try:
for worker in scenario.workers:
worker.start()
jobs = [scenario.jobspec]
if scenario.workers:
jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
total_scenario_failures += scenario_failures
merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
six.iteritems(resultset)))
finally:
# Consider qps workers that need to be killed as failures
qps_workers_killed += finish_qps_workers(scenario.workers)
if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
workers_and_base_names = {}
for worker in scenario.workers:
if not worker.perf_file_base_name:
raise Exception('using perf buf perf report filename is unspecified')
workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name)
开发者ID:makdharma,项目名称:grpc,代码行数:30,代码来源:run_performance_tests.py
示例8: collect_latency
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec(['bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
profile_analysis.append(
jobset.JobSpec([sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source', '%s.trace' % fnize(line), '--fmt', 'simple',
'--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
开发者ID:aaronjheng,项目名称:grpc,代码行数:42,代码来源:run_microbenchmark.py
示例9: collect_perf
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
'-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=10']))
profile_analysis.append(
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
environ = {
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
}))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
开发者ID:aaronjheng,项目名称:grpc,代码行数:43,代码来源:run_microbenchmark.py
示例10: str
build_jobs = []
for l in languages_to_build:
if str(l) == 'objc':
# we don't need to build a docker image for objc
continue
job = build_interop_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message('START', 'Building interop docker images.', do_newline=True)
if args.verbose:
print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures == 0:
jobset.message('SUCCESS', 'All docker images built successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Failed to build interop docker images.',
do_newline=True)
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers.
server_jobs = {}
开发者ID:endobson,项目名称:grpc,代码行数:31,代码来源:run_interop_tests.py
示例11: main
def main():
argp = argparse.ArgumentParser(description='Run performance tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
nargs='+',
required=True,
help='Languages to benchmark.')
argp.add_argument('--remote_driver_host',
default=None,
help='Run QPS driver on given host. By default, QPS driver is run locally.')
argp.add_argument('--remote_worker_host',
nargs='+',
default=[],
help='Worker hosts where to start QPS workers.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Just list scenarios to be run, but don\'t run them.')
argp.add_argument('-r', '--regex', default='.*', type=str,
help='Regex to select scenarios to run.')
argp.add_argument('--bq_result_table', default=None, type=str,
help='Bigquery "dataset.table" to upload results to.')
argp.add_argument('--category',
choices=['smoketest','all','scalable','sweep'],
default='all',
help='Select a category of tests to run.')
argp.add_argument('--netperf',
default=False,
action='store_const',
const=True,
help='Run netperf benchmark as one of the scenarios.')
argp.add_argument('--server_cpu_load',
default=0, type=int,
help='Select a targeted server cpu load to run. 0 means ignore this flag')
argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
help='Name of XML report file to generate.')
argp.add_argument('--perf_args',
help=('Example usage: "--perf_args=record -F 99 -g". '
'Wrap QPS workers in a perf command '
'with the arguments to perf specified here. '
'".svg" flame graph profiles will be '
'created for each Qps Worker on each scenario. '
'Files will output to "<repo_root>/<args.flame_graph_reports>" '
'directory. Output files from running the worker '
'under perf are saved in the repo root where its ran. '
'Note that the perf "-g" flag is necessary for '
'flame graphs generation to work (assuming the binary '
'being profiled uses frame pointers, check out '
'"--call-graph dwarf" option using libunwind otherwise.) '
'Also note that the entire "--perf_args=<arg(s)>" must '
'be wrapped in quotes as in the example usage. '
'If the "--perg_args" is unspecified, "perf" will '
'not be used at all. '
'See http://www.brendangregg.com/perf.html '
'for more general perf examples.'))
argp.add_argument('--skip_generate_flamegraphs',
default=False,
action='store_const',
const=True,
help=('Turn flame graph generation off. '
'May be useful if "perf_args" arguments do not make sense for '
'generating flamegraphs (e.g., "--perf_args=stat ...")'))
argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
help='Name of directory to output flame graph profiles to, if any are created.')
args = argp.parse_args()
languages = set(scenario_config.LANGUAGES[l]
for l in itertools.chain.from_iterable(
six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
else [x] for x in args.language))
# Put together set of remote hosts where to run and build
remote_hosts = set()
if args.remote_worker_host:
for host in args.remote_worker_host:
remote_hosts.add(host)
if args.remote_driver_host:
remote_hosts.add(args.remote_driver_host)
if not args.dry_run:
if remote_hosts:
archive_repo(languages=[str(l) for l in languages])
prepare_remote_hosts(remote_hosts, prepare_local=True)
else:
prepare_remote_hosts([], prepare_local=True)
build_local = False
if not args.remote_driver_host:
build_local = True
if not args.dry_run:
build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
perf_cmd = None
if args.perf_args:
print('Running workers under perf profiler')
# Expect /usr/bin/perf to be installed here, as is usual
perf_cmd = ['/usr/bin/perf']
#.........这里部分代码省略.........
开发者ID:endobson,项目名称:grpc,代码行数:101,代码来源:run_performance_tests.py
示例12: print
print('Will run these tests:')
for job in jobs:
if args.dry_run:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
else:
print(' %s' % job.shortname)
print
if args.dry_run:
print('--dry_run was used, exiting')
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
ignored_num_skipped_failures, skipped_results = jobset.run(
skipped_jobs, skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, 'report_%s' % _REPORT_SUFFIX,
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
开发者ID:makdharma,项目名称:grpc,代码行数:31,代码来源:run_tests_matrix.py
示例13: run_one_scenario
def run_one_scenario(scenario_config):
jobset.message('START', 'Run scenario: %s' % scenario_config['name'])
server_jobs = {}
server_addresses = {}
suppress_server_logs = True
try:
backend_addrs = []
fallback_ips = []
grpclb_ips = []
shortname_prefix = scenario_config['name']
# Start backends
for i in xrange(len(scenario_config['backend_configs'])):
backend_config = scenario_config['backend_configs'][i]
backend_shortname = shortname(shortname_prefix, 'backend_server', i)
backend_spec = backend_server_jobspec(
backend_config['transport_sec'], backend_shortname)
backend_job = dockerjob.DockerJob(backend_spec)
server_jobs[backend_shortname] = backend_job
backend_addrs.append('%s:%d' % (backend_job.ip_address(),
_BACKEND_SERVER_PORT))
# Start fallbacks
for i in xrange(len(scenario_config['fallback_configs'])):
fallback_config = scenario_config['fallback_configs'][i]
fallback_shortname = shortname(shortname_prefix, 'fallback_server',
i)
fallback_spec = fallback_server_jobspec(
fallback_config['transport_sec'], fallback_shortname)
fallback_job = dockerjob.DockerJob(fallback_spec)
server_jobs[fallback_shortname] = fallback_job
fallback_ips.append(fallback_job.ip_address())
# Start balancers
for i in xrange(len(scenario_config['balancer_configs'])):
balancer_config = scenario_config['balancer_configs'][i]
grpclb_shortname = shortname(shortname_prefix, 'grpclb_server', i)
grpclb_spec = grpclb_jobspec(balancer_config['transport_sec'],
balancer_config['short_stream'],
backend_addrs, grpclb_shortname)
grpclb_job = dockerjob.DockerJob(grpclb_spec)
server_jobs[grpclb_shortname] = grpclb_job
grpclb_ips.append(grpclb_job.ip_address())
# Start DNS server
dns_server_shortname = shortname(shortname_prefix, 'dns_server', 0)
dns_server_spec = dns_server_in_docker_jobspec(
grpclb_ips, fallback_ips, dns_server_shortname,
scenario_config['cause_no_error_no_data_for_balancer_a_record'])
dns_server_job = dockerjob.DockerJob(dns_server_spec)
server_jobs[dns_server_shortname] = dns_server_job
# Get the IP address of the docker container running the DNS server.
# The DNS server is running on port 53 of that IP address. Note we will
# point the DNS resolvers of grpc clients under test to our controlled
# DNS server by effectively modifying the /etc/resolve.conf "nameserver"
# lists of their docker containers.
dns_server_ip = dns_server_job.ip_address()
wait_until_dns_server_is_up(dns_server_ip)
# Run clients
jobs = []
for lang_name in languages:
# Skip languages that are known to not currently
# work for this test.
if not args.no_skips and lang_name in scenario_config.get(
'skip_langs', []):
jobset.message('IDLE',
'Skipping scenario: %s for language: %s\n' %
(scenario_config['name'], lang_name))
continue
lang = _LANGUAGES[lang_name]
test_job = lb_client_interop_jobspec(
lang,
dns_server_ip,
docker_image=docker_images.get(lang.safename),
transport_security=scenario_config['transport_sec'])
jobs.append(test_job)
jobset.message('IDLE', 'Jobs to run: \n%s\n' % '\n'.join(
str(job) for job in jobs))
num_failures, resultset = jobset.run(
jobs, newline_on_success=True, maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'sponge_log.xml')
if num_failures:
suppress_server_logs = False
jobset.message(
'FAILED',
'Scenario: %s. Some tests failed' % scenario_config['name'],
do_newline=True)
else:
jobset.message(
'SUCCESS',
'Scenario: %s. All tests passed' % scenario_config['name'],
do_newline=True)
return num_failures
finally:
# Check if servers are still running.
for server, job in server_jobs.items():
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
suppress_failure = suppress_server_logs and not args.verbose
dockerjob.finish_jobs(
[j for j in six.itervalues(server_jobs)],
suppress_failure=suppress_failure)
开发者ID:Falco20019,项目名称:grpc,代码行数:98,代码来源:run_grpclb_interop_tests.py
注:本文中的python_utils.jobset.run函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论