本文整理汇总了Python中pyrsistent.thaw函数的典型用法代码示例。如果您正苦于以下问题:Python thaw函数的具体用法?Python thaw怎么用?Python thaw使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了thaw函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: setUp
def setUp(self):
self.tenant_id = 'tenant-id'
self.group_id = 'group-id'
self.state = GroupState(self.tenant_id, self.group_id, 'group-name',
{}, {}, None, {}, False,
ScalingGroupStatus.ACTIVE, desired=2)
self.group = mock_group(self.state, self.tenant_id, self.group_id)
self.lc = {'args': {'server': {'name': 'foo'}, 'loadBalancers': []}}
self.desired_lbs = s(CLBDescription(lb_id='23', port=80))
self.servers = (
server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1',
desired_lbs=self.desired_lbs,
links=freeze([{'href': 'link1', 'rel': 'self'}])),
server('b', ServerState.ACTIVE, servicenet_address='10.0.0.2',
desired_lbs=self.desired_lbs,
links=freeze([{'href': 'link2', 'rel': 'self'}]))
)
self.state_active = {}
self.cache = [thaw(self.servers[0].json), thaw(self.servers[1].json)]
self.gsgi = GetScalingGroupInfo(tenant_id='tenant-id',
group_id='group-id')
self.manifest = { # Many details elided!
'state': self.state,
'launchConfiguration': self.lc,
}
self.gsgi_result = (self.group, self.manifest)
self.now = datetime(1970, 1, 1)
开发者ID:pratikmallya,项目名称:otter,代码行数:27,代码来源:test_service.py
示例2: test_no_steps
def test_no_steps(self):
"""
If state of world matches desired, no steps are executed, but the
`active` servers are still updated, and SUCCESS is the return value.
"""
for serv in self.servers:
serv.desired_lbs = pset()
sequence = [
parallel_sequence([]),
(Log('execute-convergence', mock.ANY), noop),
(Log('execute-convergence-results',
{'results': [], 'worst_status': 'SUCCESS'}), noop),
(UpdateServersCache(
"tenant-id", "group-id", self.now,
[thaw(self.servers[0].json.set('_is_as_active', True)),
thaw(self.servers[1].json.set("_is_as_active", True))]),
noop)
]
self.state_active = {
'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
}
self.cache[0]["_is_as_active"] = True
self.cache[1]["_is_as_active"] = True
self.assertEqual(
perform_sequence(self.get_seq() + sequence, self._invoke()),
(StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
开发者ID:pratikmallya,项目名称:otter,代码行数:27,代码来源:test_service.py
示例3: test_reactivate_group_on_success_after_steps
def test_reactivate_group_on_success_after_steps(self):
"""
When the group started in ERROR state, and convergence succeeds, the
group is put back into ACTIVE.
"""
self.manifest['state'].status = ScalingGroupStatus.ERROR
def plan(*args, **kwargs):
return pbag([TestStep(Effect("step"))])
sequence = [
parallel_sequence([]),
(Log(msg='execute-convergence', fields=mock.ANY), noop),
parallel_sequence([
[("step", lambda i: (StepResult.SUCCESS, []))]
]),
(Log(msg='execute-convergence-results', fields=mock.ANY), noop),
(UpdateGroupStatus(scaling_group=self.group,
status=ScalingGroupStatus.ACTIVE),
noop),
(Log('group-status-active',
dict(cloud_feed=True, status='ACTIVE')),
noop),
(UpdateServersCache(
"tenant-id", "group-id", self.now,
[thaw(self.servers[0].json.set('_is_as_active', True)),
thaw(self.servers[1].json.set('_is_as_active', True))]),
noop),
]
self.assertEqual(
perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
(StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
开发者ID:pratikmallya,项目名称:otter,代码行数:32,代码来源:test_service.py
示例4: test_agent_yml
def test_agent_yml(self):
"""
```task_configure_flocker_agent`` writes a ``/etc/flocker/agent.yml``
file which contains the backend configuration passed to it.
"""
control_address = BASIC_AGENT_YML["control-service"]["hostname"]
expected_pool = u"some-test-pool"
expected_backend_configuration = dict(pool=expected_pool)
commands = task_configure_flocker_agent(
control_node=control_address,
dataset_backend=backend_loader.get(
BASIC_AGENT_YML["dataset"]["backend"]
),
dataset_backend_configuration=expected_backend_configuration,
logging_config=thaw(BASIC_AGENT_YML["logging"]),
)
[put_agent_yml] = list(
effect.intent
for effect in
commands.intent.effects
if isinstance(effect.intent, Put)
)
# Seems like transform should be usable here but I don't know how.
expected_agent_config = BASIC_AGENT_YML.set(
"dataset",
BASIC_AGENT_YML["dataset"].update(expected_backend_configuration)
)
self.assertEqual(
put(
content=yaml.safe_dump(thaw(expected_agent_config)),
path=THE_AGENT_YML_PATH,
log_content_filter=_remove_dataset_fields,
).intent,
put_agent_yml,
)
开发者ID:332054781,项目名称:flocker,代码行数:35,代码来源:test_install.py
示例5: test_reactivate_group_on_success_with_no_steps
def test_reactivate_group_on_success_with_no_steps(self):
"""
When the group started in ERROR state, and convergence succeeds, the
group is put back into ACTIVE, even if there were no steps to execute.
"""
self.manifest['state'].status = ScalingGroupStatus.ERROR
for serv in self.servers:
serv.desired_lbs = pset()
sequence = [
parallel_sequence([]),
(Log(msg='execute-convergence', fields=mock.ANY), noop),
(Log(msg='execute-convergence-results', fields=mock.ANY), noop),
(UpdateGroupStatus(scaling_group=self.group,
status=ScalingGroupStatus.ACTIVE),
noop),
(Log('group-status-active',
dict(cloud_feed=True, status='ACTIVE')),
noop),
(UpdateServersCache(
"tenant-id", "group-id", self.now,
[thaw(self.servers[0].json.set("_is_as_active", True)),
thaw(self.servers[1].json.set("_is_as_active", True))]),
noop)
]
self.state_active = {
'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
}
self.cache[0]["_is_as_active"] = True
self.cache[1]["_is_as_active"] = True
self.assertEqual(
perform_sequence(self.get_seq() + sequence, self._invoke()),
(StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
开发者ID:pratikmallya,项目名称:otter,代码行数:33,代码来源:test_service.py
示例6: test_linking
def test_linking(self, cluster):
"""
A link from an origin container to a destination container allows the
origin container to establish connections to the destination container
when the containers are running on different machines using an address
obtained from ``<ALIAS>_PORT_<PORT>_TCP_{ADDR,PORT}``-style environment
set in the origin container's environment.
"""
_, destination_port = find_free_port()
_, origin_port = find_free_port()
[destination, origin] = cluster.nodes
busybox = pmap({
u"image": u"busybox",
})
destination_container = busybox.update({
u"name": random_name(self),
u"node_uuid": destination.uuid,
u"ports": [{u"internal": 8080, u"external": destination_port}],
u"command_line": BUSYBOX_HTTP,
})
self.addCleanup(
cluster.remove_container, destination_container[u"name"]
)
origin_container = busybox.update({
u"name": random_name(self),
u"node_uuid": origin.uuid,
u"links": [{u"alias": "DEST", u"local_port": 80,
u"remote_port": destination_port}],
u"ports": [{u"internal": 9000, u"external": origin_port}],
u"command_line": [
u"sh", u"-c", u"""\
echo -n '#!/bin/sh
nc $DEST_PORT_80_TCP_ADDR $DEST_PORT_80_TCP_PORT
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 9000 -e /tmp/script.sh
"""]})
self.addCleanup(
cluster.remove_container, origin_container[u"name"]
)
running = gatherResults([
cluster.create_container(thaw(destination_container)),
cluster.create_container(thaw(origin_container)),
# Wait for the link target container to be accepting connections.
verify_socket(destination.public_address, destination_port),
# Wait for the link source container to be accepting connections.
verify_socket(origin.public_address, origin_port),
])
running.addCallback(
lambda _: self.assert_busybox_http(
origin.public_address, origin_port))
return running
开发者ID:Azulinho,项目名称:flocker,代码行数:57,代码来源:test_api.py
示例7: test_success
def test_success(self):
"""
Executes the plan and returns SUCCESS when that's the most severe
result.
"""
dgs = get_desired_group_state(self.group_id, self.lc, 2)
deleted = server(
'c', ServerState.DELETED, servicenet_address='10.0.0.3',
desired_lbs=self.desired_lbs,
links=freeze([{'href': 'link3', 'rel': 'self'}]))
self.servers += (deleted,)
steps = [
TestStep(
Effect(
{'dgs': dgs,
'servers': self.servers,
'lb_nodes': (),
'now': 0})
.on(lambda _: (StepResult.SUCCESS, [])))]
def plan(dgs, servers, lb_nodes, now, build_timeout):
self.assertEqual(build_timeout, 3600)
return steps
sequence = [
parallel_sequence([]),
(Log('execute-convergence',
dict(servers=self.servers, lb_nodes=(), steps=steps,
now=self.now, desired=dgs)), noop),
parallel_sequence([
[({'dgs': dgs, 'servers': self.servers,
'lb_nodes': (), 'now': 0},
noop)]
]),
(Log('execute-convergence-results',
{'results': [{'step': steps[0],
'result': StepResult.SUCCESS,
'reasons': []}],
'worst_status': 'SUCCESS'}), noop),
# Note that servers arg is non-deleted servers
(UpdateServersCache(
"tenant-id", "group-id", self.now,
[thaw(self.servers[0].json.set("_is_as_active", True)),
thaw(self.servers[1].json.set("_is_as_active", True))]),
noop)
]
# all the servers updated in cache in beginning
self.cache.append(thaw(deleted.json))
self.assertEqual(
perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
(StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
开发者ID:pratikmallya,项目名称:otter,代码行数:54,代码来源:test_service.py
示例8: collapse
def collapse(self, field_spec_list, name, reducer, append=False):
"""
Collapses this event's columns, represented by the fieldSpecList
into a single column. The collapsing itself is done with the reducer
function. Optionally the collapsed column could be appended to the
existing columns, or replace them (the default).
Parameters
----------
field_spec_list : list
List of columns to collapse. If you need to retrieve deep
nested values that ['can.be', 'done.with', 'this.notation'].
name : str
Name of new column with collapsed data.
reducer : function
Function to pass to reducer.
append : bool, optional
Set True to add new column to existing data dict, False to create
a new Event with just the collapsed data.
Returns
-------
Event
New event object.
"""
data = thaw(self.data()) if append else dict()
vals = list()
for i in field_spec_list:
vals.append(self.get(i))
data[name] = reducer(vals)
return self.set_data(data)
开发者ID:esnet,项目名称:pypond,代码行数:34,代码来源:event.py
示例9: test_sequence
def test_sequence(self):
"""
The function generates a packer configuration file, runs packer
build and uploads the AMI ids to a given S3 bucket.
"""
options = PublishInstallerImagesOptions()
options.parseOptions(
[b'--source-ami-map', b'{"us-west-1": "ami-1234"}']
)
configuration_path = self.make_temporary_directory()
ami_map = PACKER_OUTPUT_US_ALL.output
perform_sequence(
seq=[
(PackerConfigure(
build_region=options["build_region"],
publish_regions=options["regions"],
source_ami_map=options["source-ami-map"],
template=options["template"],
), lambda intent: configuration_path),
(PackerBuild(
configuration_path=configuration_path,
), lambda intent: ami_map),
(StandardOut(
content=json.dumps(
thaw(ami_map),
encoding='utf-8',
) + b"\n",
), lambda intent: None),
],
eff=publish_installer_images_effects(options=options)
)
开发者ID:zhangwei5095,项目名称:flocker,代码行数:32,代码来源:test_images.py
示例10: _is_valid_linear_event
def _is_valid_linear_event(self, event):
"""
Check to see if an even has good values when doing
linear fill since we need to keep a completely intact
event for the values.
While we are inspecting the data payload, make a note if
any of the paths are pointing at a list. Then it
will trigger that filling code later.
"""
valid = True
field_path = self._field_path_to_array(self._field_spec[0])
val = nested_get(thaw(event.data()), field_path)
# this is pointing at a path that does not exist, issue a warning
# can call the event valid so it will be emitted. can't fill what
# isn't there.
if val == 'bad_path':
self._warn('path does not exist: {0}'.format(field_path), ProcessorWarning)
return valid
# a tracked field path is not valid so this is
# not a valid linear event. also, if it is not a numeric
# value, mark it as invalid and let _interpolate_event_list()
# complain about/skip it.
if not is_valid(val) or not isinstance(val, numbers.Number):
valid = False
return valid
开发者ID:esnet,项目名称:pypond,代码行数:32,代码来源:filler.py
示例11: to_point
def to_point(self, cols=None):
"""
Returns a flat array starting with the timestamp, followed by the values.
Can be given an optional list of columns so the returned list will
have the values in order. Primarily for the TimeSeries wire format.
Parameters
----------
cols : list, optional
List of data columns to order the data points in so the
TimeSeries wire format lines up correctly. If not specified,
the points will be whatever order that dict.values() decides
to return it in.
Returns
-------
list
Epoch ms followed by points.
"""
points = [self.timerange().to_json()]
data = thaw(self.data())
if isinstance(cols, list):
points += [data.get(x, None) for x in cols]
else:
points += [x for x in list(data.values())]
return points
开发者ID:esnet,项目名称:pypond,代码行数:30,代码来源:timerange_event.py
示例12: publish_installer_images_effects
def publish_installer_images_effects(options):
# Create configuration directory
configuration_path = yield Effect(
intent=PackerConfigure(
build_region=options["build_region"],
publish_regions=options["regions"],
template=options["template"],
distribution=options["distribution"],
source_ami=options["source_ami"],
)
)
# Build the Docker images
ami_map = yield Effect(
intent=PackerBuild(
configuration_path=configuration_path,
)
)
# Publish the regional AMI map to S3
yield Effect(
intent=WriteToS3(
content=json.dumps(thaw(ami_map), encoding="utf-8"),
target_bucket=options['target_bucket'],
target_key=options["template"],
)
)
开发者ID:AlexRRR,项目名称:flocker,代码行数:25,代码来源:_images.py
示例13: test_sequence
def test_sequence(self):
"""
The function generates a packer configuration file, runs packer
build and uploads the AMI ids to a given S3 bucket.
"""
options = default_options()
configuration_path = self.make_temporary_directory()
ami_map = PACKER_OUTPUT_US_ALL.output
perform_sequence(
seq=[
(
PackerConfigure(
build_region=options["build_region"],
publish_regions=options["regions"],
source_ami=options["source_ami"],
template=options["template"],
distribution=options["distribution"],
),
lambda intent: configuration_path,
),
(PackerBuild(configuration_path=configuration_path), lambda intent: ami_map),
(
WriteToS3(
content=json.dumps(thaw(ami_map), encoding="utf-8"),
target_bucket=options["target_bucket"],
target_key=options["template"],
),
lambda intent: None,
),
],
eff=publish_installer_images_effects(options=options),
)
开发者ID:neconest,项目名称:flocker,代码行数:32,代码来源:test_images.py
示例14: prepare_launch_config
def prepare_launch_config(scaling_group_uuid, launch_config):
"""
Prepare a launch_config for the specified scaling_group.
This is responsible for returning a copy of the launch config that
has metadata and unique server names added.
:param IScalingGroup scaling_group: The scaling group this server is
getting launched for.
:param dict launch_config: The complete launch_config args we want to build
servers from.
:return dict: The prepared launch config.
"""
launch_config = freeze(launch_config)
lb_descriptions = json_to_LBConfigs(launch_config.get('loadBalancers', []))
launch_config = prepare_server_launch_config(
scaling_group_uuid, launch_config, lb_descriptions)
suffix = generate_server_name()
launch_config = set_server_name(launch_config, suffix)
return thaw(launch_config)
开发者ID:meker12,项目名称:otter,代码行数:25,代码来源:launch_server_v1.py
示例15: _freeze
def _freeze(self, action=None):
"""
Freeze this message for logging, registering it with C{action}.
@param action: The L{Action} which is the context for this message. If
C{None}, the L{Action} will be deduced from the current call
stack.
@return: A L{PMap} with added C{timestamp}, C{task_uuid}, and
C{task_level} entries.
"""
if action is None:
action = currentAction()
if action is None:
task_uuid = unicode(uuid4())
task_level = [1]
else:
task_uuid = action._identification['task_uuid']
task_level = thaw(action._nextTaskLevel().level)
timestamp = self._timestamp()
return self._contents.update({
'timestamp': timestamp,
'task_uuid': task_uuid,
'task_level': task_level,
})
开发者ID:carriercomm,项目名称:eliot,代码行数:25,代码来源:_message.py
示例16: test_thaw_can_handle_subclasses_of_persistent_base_types
def test_thaw_can_handle_subclasses_of_persistent_base_types():
class R(PRecord):
x = field()
result = thaw(R(x=1))
assert result == {'x': 1}
assert type(result) is dict
开发者ID:ClusterHQ,项目名称:pyrsistent,代码行数:7,代码来源:freeze_test.py
示例17: main
def main():
from pprint import pprint
from pyrsistent import thaw
import sys
with open(sys.argv[1]) as f:
tasks = to_tasks(Message.new(x) for x in parse_json_stream(f))
pprint(thaw(tasks))
开发者ID:jml,项目名称:trial-eliot,代码行数:7,代码来源:_parse.py
示例18: update_servers_cache
def update_servers_cache(group, now, servers, lb_nodes, lbs,
include_deleted=True):
"""
Updates the cache, adding servers, with a flag if autoscale is active on
each one. All arguments after ``now`` are resources specific to
``launch_server`` config that are used by that planner. Here we only cache
servers that are in desired LBs since as it is needed by REST API and
ignore ``lbs``.
:param group: scaling group
:param list servers: list of NovaServer objects
:param list lb_nodes: list of CLBNode objects
:param dict lbs: load balancer objects keyed on ID (currently ignored)
:param include_deleted: Include deleted servers in cache. Defaults to True.
"""
server_dicts = []
for server in servers:
sd = thaw(server.json)
if is_autoscale_active(server, lb_nodes):
sd["_is_as_active"] = True
if server.state != ServerState.DELETED or include_deleted:
server_dicts.append(sd)
return Effect(
UpdateServersCache(group.tenant_id, group.uuid, now, server_dicts))
开发者ID:rackerlabs,项目名称:otter,代码行数:25,代码来源:service.py
示例19: to_point
def to_point(self, cols=None):
"""
Returns a flat array starting with the timestamp, followed by the values.
Doesn't include the groupByKey (key).
Can be given an optional list of columns so the returned list will
have the values in order. Primarily for the TimeSeries wire format.
Parameters
----------
cols : list, optional
List of columns to order the points in so the TimeSeries
wire format is rendered corectly.
Returns
-------
list
Epoch ms followed by points.
"""
points = [self.index_as_string()]
data = thaw(self.data())
if isinstance(cols, list):
points += [data.get(x, None) for x in cols]
else:
points += [x for x in list(data.values())]
return points
开发者ID:esnet,项目名称:pypond,代码行数:29,代码来源:indexed_event.py
示例20: _tojson_helper
def _tojson_helper(self):
def setup_time(time,scheduled):
result = time.JSONable()
result['mid'] = scheduled
return result
result = {'agents': thaw(self.agents),
'times': thaw(self.times),
'requirements': thaw({mid: {r.type: r for r in rs.values()}
for mid,rs in
self.requirements.iteritems()}),
'unsatisfied': thaw(self.unsatisfied),
'costs': thaw(self.costs),
'meetings': {a: [setup_time(t,ts.get(t,default=-1))
for t in self.times]
for a,ts in self.forward.iteritems()}}
return result
开发者ID:haberdashPI,项目名称:CSDscheduling,代码行数:17,代码来源:__init__.py
注:本文中的pyrsistent.thaw函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论