本文整理汇总了Python中radical.entk.Stage类的典型用法代码示例。如果您正苦于以下问题:Python Stage类的具体用法?Python Stage怎么用?Python Stage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Stage类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_wfp_check_processor
def test_wfp_check_processor():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp.start_processor()
assert wfp.check_processor()
wfp.terminate_processor()
assert not wfp.check_processor()
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:25,代码来源:test_wfp.py
示例2: generate_pipeline
def generate_pipeline(name, stages):
# Create a Pipeline object
p = Pipeline()
p.name = name
for s_cnt in range(stages):
# Create a Stage object
s = Stage()
s.name = 'Stage %s'%s_cnt
for t_cnt in range(5):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional)
t.executable = '/bin/echo' # Assign executable to the task
# Assign arguments for the task executable
t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
return p
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:29,代码来源:add_pipelines.py
示例3: test_pipeline_decrement_stage
def test_pipeline_decrement_stage():
p = Pipeline()
s1 = Stage()
t = Task()
t.executable = ['/bin/date']
s1.tasks = t
s2 = Stage()
t = Task()
t.executable = ['/bin/date']
s2.tasks = t
p.add_stages([s1, s2])
p._increment_stage()
p._increment_stage()
assert p._stage_count == 2
assert p._cur_stage == 2
assert p._completed_flag.is_set() == True
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 1
assert p._completed_flag.is_set() == False
p._decrement_stage()
assert p._stage_count == 2
assert p._cur_stage == 0
assert p._completed_flag.is_set() == False
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:28,代码来源:test_pipeline.py
示例4: test_stage_parent_pipeline_assignment
def test_stage_parent_pipeline_assignment(l, i, b):
s = Stage()
data_type = [l, i, b]
for data in data_type:
with pytest.raises(TypeError):
s.parent_pipeline = data
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:7,代码来源:test_stage.py
示例5: test_stage_post_exec
def test_stage_post_exec():
global p1
p1.name = 'p1'
s = Stage()
s.name = 's1'
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 30,
'cpus': 1,
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:28,代码来源:test_post_exec.py
示例6: test_state_order
def test_state_order():
"""
**Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
"""
def create_single_task():
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/date']
t1.copy_input_data = []
t1.copy_output_data = []
return t1
p1 = Pipeline()
p1.name = 'p1'
s = Stage()
s.name = 's1'
s.tasks = create_single_task()
s.add_tasks(create_single_task())
p1.add_stages(s)
res_dict = {
'resource': 'local.localhost',
'walltime': 5,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
appman = Amgr(hostname=hostname, port=port)
appman.resource_desc = res_dict
appman.workflow = [p1]
appman.run()
p_state_hist = p1.state_history
assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']
s_state_hist = p1.stages[0].state_history
assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']
tasks = p1.stages[0].tasks
for t in tasks:
t_state_hist = t.state_history
assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:57,代码来源:test_amgr.py
示例7: test_stage_to_dict
def test_stage_to_dict():
s = Stage()
d = s.to_dict()
assert d == {'uid': None,
'name': None,
'state': states.INITIAL,
'state_history': [states.INITIAL],
'parent_pipeline': {'uid': None, 'name': None}}
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:10,代码来源:test_stage.py
示例8: test_wfp_workflow_incomplete
def test_wfp_workflow_incomplete():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
assert wfp.workflow_incomplete()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
p.stages[0].state == states.SCHEDULING
p.state == states.SCHEDULED
for t in p.stages[0].tasks:
t.state = states.COMPLETED
import json
import pika
task_as_dict = json.dumps(t.to_dict())
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
mq_channel = mq_connection.channel()
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % amgr._sid,
body=task_as_dict)
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
assert not wfp.workflow_incomplete()
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:54,代码来源:test_wfp.py
示例9: test_pipeline_stage_assignment
def test_pipeline_stage_assignment():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.tasks = t
p.stages = s
assert type(p.stages) == list
assert p._stage_count == 1
assert p._cur_stage == 1
assert p.stages[0] == s
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:13,代码来源:test_pipeline.py
示例10: test_stage_task_assignment
def test_stage_task_assignment():
"""
***Purpose***: Test if necessary attributes are automatically updates upon task assignment
"""
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.tasks = t
assert type(s.tasks) == set
assert s._task_count == 1
assert t in s.tasks
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:13,代码来源:test_stage.py
示例11: test_amgr_synchronizer
def test_amgr_synchronizer():
logger = ru.Logger('radical.entk.temp_logger')
profiler = ru.Profiler(name='radical.entk.temp')
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
p = Pipeline()
s = Stage()
# Create and add 100 tasks to the stage
for cnt in range(100):
t = Task()
t.executable = ['some-executable-%s' % cnt]
s.add_tasks(t)
p.add_stages(s)
p._assign_uid(amgr._sid)
p._validate()
amgr.workflow = [p]
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
# Start the synchronizer method in a thread
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
# Start the synchronizer method in a thread
proc = Process(target=func_for_synchronizer_test, name='temp-proc',
args=(amgr._sid, p, logger, profiler))
proc.start()
proc.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULING
assert p.stages[0].state == states.SCHEDULING
assert p.state == states.SCHEDULING
amgr._terminate_sync.set()
sync_thread.join()
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:51,代码来源:test_amgr.py
示例12: generate_pipeline
def generate_pipeline():
def func_condition():
global CUR_NEW_STAGE, MAX_NEW_STAGE
if CUR_NEW_STAGE <= MAX_NEW_STAGE:
return True
return False
def func_on_true():
global CUR_NEW_STAGE
CUR_NEW_STAGE += 1
shuffle(p.stages[CUR_NEW_STAGE:])
def func_on_false():
print 'Done'
# Create a Pipeline object
p = Pipeline()
for s in range(MAX_NEW_STAGE+1):
# Create a Stage object
s1 = Stage()
for i in range(CUR_TASKS):
t1 = Task()
t1.executable = '/bin/sleep'
t1.arguments = [ '30']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add post-exec to the Stage
s1.post_exec = {
condition': func_condition,
on_true': func_on_true,
on_false': func_on_false
}
# Add Stage to the Pipeline
p.add_stages(s1)
return p
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:49,代码来源:adapt_to.py
示例13: generate_pipeline
def generate_pipeline():
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s1 = Stage()
# Create a Task object which creates a file named 'output.txt' of size 1 MB
t1 = Task()
t1.executable = '/bin/bash'
t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']
# Add the Task to the Stage
s1.add_tasks(t1)
# Add Stage to the Pipeline
p.add_stages(s1)
# Create another Stage object to hold character count tasks
s2 = Stage()
# Create a Task object
t2 = Task()
t2.executable = '/bin/bash'
t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
# Copy data from the task in the first stage to the current task's location
t2.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/output.txt' % (p.uid, s1.uid, t1.uid)]
# Add the Task to the Stage
s2.add_tasks(t2)
# Add Stage to the Pipeline
p.add_stages(s2)
# Create another Stage object to hold checksum tasks
s3 = Stage()
# Create a Task object
t3 = Task()
t3.executable = '/bin/bash'
t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt']
# Copy data from the task in the first stage to the current task's location
t3.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/ccount.txt' % (p.uid, s2.uid, t2.uid)]
# Download the output of the current task to the current location
t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt]
# Add the Task to the Stage
s3.add_tasks(t3)
# Add Stage to the Pipeline
p.add_stages(s3)
return p
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:54,代码来源:eop.py
示例14: test_stage_assign_uid
def test_stage_assign_uid():
s = Stage()
try:
import glob
import shutil
import os
home = os.environ.get('HOME','/home')
test_fold = glob.glob('%s/.radical/utils/test*'%home)
for f in test_fold:
shutil.rmtree(f)
except:
pass
s._assign_uid('test')
assert s.uid == 'stage.0000'
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:15,代码来源:test_stage.py
示例15: on_true
def on_true():
global NUM_TASKS, CUR_STAGE
NUM_TASKS *= 2
s = Stage()
s.name = 's%s'%CUR_STAGE
for t in range(NUM_TASKS):
s.add_tasks(create_single_task())
s.post_exec = condition
p1.add_stages(s)
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:15,代码来源:test_post_exec.py
示例16: test_stage_validate_entities
def test_stage_validate_entities(t, l, i, b, se):
s = Stage()
data_type = [t, l, i, b, se]
for data in data_type:
with pytest.raises(TypeError):
s._validate_entities(data)
t = Task()
assert isinstance(s._validate_entities(t), set)
t1 = Task()
t2 = Task()
assert set([t1, t2]) == s._validate_entities([t1, t2])
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:16,代码来源:test_stage.py
示例17: test_stage_state_assignment
def test_stage_state_assignment(t, l, i, b):
s = Stage()
data_type = [l, i, b]
for data in data_type:
with pytest.raises(TypeError):
s.state = data
if isinstance(t, str):
with pytest.raises(ValueError):
s.state = t
for val in states._stage_state_values.keys():
s.state = val
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:16,代码来源:test_stage.py
示例18: create_pipeline
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['sleep']
t1.arguments = ['10']
s.add_tasks(t1)
p.add_stages(s)
return p
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:16,代码来源:issue_255.py
示例19: test_wfp_enqueue
def test_wfp_enqueue():
p = Pipeline()
s = Stage()
t = Task()
t.executable = ['/bin/date']
s.add_tasks(t)
p.add_stages(s)
amgr = Amgr(hostname=hostname, port=port)
amgr._setup_mqs()
wfp = WFprocessor(sid=amgr._sid,
workflow=[p],
pending_queue=amgr._pending_queue,
completed_queue=amgr._completed_queue,
mq_hostname=amgr._mq_hostname,
port=amgr._port,
resubmit_failed=False)
wfp._initialize_workflow()
amgr.workflow = [p]
profiler = ru.Profiler(name='radical.entk.temp')
for t in p.stages[0].tasks:
assert t.state == states.INITIAL
assert p.stages[0].state == states.INITIAL
assert p.state == states.INITIAL
amgr._terminate_sync = Event()
sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
sync_thread.start()
proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
proc.start()
proc.join()
amgr._terminate_sync.set()
sync_thread.join()
for t in p.stages[0].tasks:
assert t.state == states.SCHEDULED
assert p.stages[0].state == states.SCHEDULED
assert p.state == states.SCHEDULING
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:47,代码来源:test_wfp.py
示例20: get_pipeline
def get_pipeline(shared_fs=False, size=1):
p = Pipeline()
p.name = 'p'
n = 4
s1 = Stage()
s1.name = 's1'
for x in range(n):
t = Task()
t.name = 't%s'%x
# dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>
t.executable = 'dd'
if not shared_fs:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
else:
t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.lfs_per_process = 1024
s1.add_tasks(t)
p.add_stages(s1)
s2 = Stage()
s2.name = 's2'
for x in range(n):
t = Task()
t.executable = ['dd']
if not shared_fs:
t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
else:
t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]
t.cpu_reqs['processes'] = 1
t.cpu_reqs['threads_per_process'] = 24
t.cpu_reqs['thread_type'] = ''
t.cpu_reqs['process_type'] = ''
t.tag = 't%s'%x
s2.add_tasks(t)
p.add_stages(s2)
return p
开发者ID:radical-cybertools,项目名称:radical.ensemblemd,代码行数:55,代码来源:lfs_tagging_dd.py
注:本文中的radical.entk.Stage类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论