本文整理汇总了Python中spinn_utilities.progress_bar.ProgressBar类的典型用法代码示例。如果您正苦于以下问题:Python ProgressBar类的具体用法?Python ProgressBar怎么用?Python ProgressBar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProgressBar类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, machine_graph, machine, plan_n_timesteps):
""" Place a machine_graph so that each vertex is placed on a core
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine:\
The machine with respect to which to partition the application\
graph
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return: A set of placements
:rtype: :py:class:`pacman.model.placements.Placements`
:raise pacman.exceptions.PacmanPlaceException: \
If something goes wrong with the placement
"""
# check that the algorithm can handle the constraints
ResourceTracker.check_constraints(machine_graph.vertices)
placements = Placements()
vertices = sort_vertices_by_known_constraints(machine_graph.vertices)
# Iterate over vertices and generate placements
progress = ProgressBar(vertices, "Placing graph vertices")
resource_tracker = ResourceTracker(machine, plan_n_timesteps)
for vertex in progress.over(vertices):
# Create and store a new placement anywhere on the board
(x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
vertex.resources_required, vertex.constraints, None)
placement = Placement(vertex, x, y, p)
placements.add_placement(placement)
return placements
开发者ID:SpiNNakerManchester,项目名称:PACMAN,代码行数:34,代码来源:basic_placer.py
示例2: __call__
def __call__(self, report_default_directory, dsg_targets, transceiver):
""" Creates a report that states where in SDRAM each region is \
(read from machine)
:param report_default_directory: the folder where reports are written
:param dsg_targets: the map between placement and file writer
:param transceiver: the spinnMan instance
:rtype: None
"""
directory_name = os.path.join(
report_default_directory, MEM_MAP_SUBDIR_NAME)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
progress = ProgressBar(dsg_targets, "Writing memory map reports")
for (x, y, p) in progress.over(dsg_targets):
file_name = os.path.join(
directory_name, MEM_MAP_FILENAME.format(x, y, p))
try:
with open(file_name, "w") as f:
self._describe_mem_map(f, transceiver, x, y, p)
except IOError:
logger.exception("Generate_placement_reports: Can't open file"
" {} for writing.", file_name)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:25,代码来源:memory_map_on_host_chip_report.py
示例3: __call__
def __call__(self, report_folder, connection_holder, dsg_targets):
""" Convert synaptic matrix for every application edge.
"""
# Update the print options to display everything
print_opts = numpy.get_printoptions()
numpy.set_printoptions(threshold=numpy.nan)
if dsg_targets is None:
raise SynapticConfigurationException(
"dsg_targets should not be none, used as a check for "
"connection holder data to be generated")
# generate folder for synaptic reports
top_level_folder = os.path.join(report_folder, _DIRNAME)
if not os.path.exists(top_level_folder):
os.mkdir(top_level_folder)
# create progress bar
progress = ProgressBar(connection_holder.keys(),
"Generating synaptic matrix reports")
# for each application edge, write matrix in new file
for edge, _ in progress.over(connection_holder.keys()):
# only write matrix's for edges which have matrix's
if isinstance(edge, ProjectionApplicationEdge):
# figure new file name
file_name = os.path.join(
top_level_folder, _TMPL_FILENAME.format(edge.label))
self._write_file(file_name, connection_holder, edge)
# Reset the print options
numpy.set_printoptions(**print_opts)
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:33,代码来源:spynnaker_synaptic_matrix_report.py
示例4: __call__
def __call__(self, router_tables):
tables = MulticastRoutingTables()
previous_masks = dict()
progress = ProgressBar(
len(router_tables.routing_tables) * 2,
"Compressing Routing Tables")
# Create all masks without holes
allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)]
# Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole
for router_table in router_tables.routing_tables:
for entry in router_table.multicast_routing_entries:
if entry.mask not in allowed_masks:
raise PacmanRoutingException(
"Only masks without holes are allowed in tables for"
" BasicRouteMerger (disallowed mask={})".format(
hex(entry.mask)))
for router_table in progress.over(router_tables.routing_tables):
new_table = self._merge_routes(router_table, previous_masks)
tables.add_routing_table(new_table)
n_entries = len([
entry for entry in new_table.multicast_routing_entries
if not entry.defaultable])
# print("Reduced from {} to {}".format(
# len(router_table.multicast_routing_entries), n_entries))
if n_entries > 1023:
raise PacmanRoutingException(
"Cannot make table small enough: {} entries".format(
n_entries))
return tables
开发者ID:SpiNNakerManchester,项目名称:PACMAN,代码行数:34,代码来源:basic_route_merger.py
示例5: __call__
def __call__(self, router_tables, target_length=None):
# build storage
compressed_pacman_router_tables = MulticastRoutingTables()
# create progress bar
progress = ProgressBar(
router_tables.routing_tables, "Compressing routing Tables")
# compress each router
for router_table in progress.over(router_tables.routing_tables):
# convert to rig format
entries = self._convert_to_mundy_format(router_table)
# compress the router entries
compressed_router_table_entries = \
rigs_compressor.minimise(entries, target_length)
# convert back to pacman model
compressed_pacman_table = self._convert_to_pacman_router_table(
compressed_router_table_entries, router_table.x,
router_table.y)
# add to new compressed routing tables
compressed_pacman_router_tables.add_routing_table(
compressed_pacman_table)
# return
return compressed_pacman_router_tables
开发者ID:SpiNNakerManchester,项目名称:PACMAN,代码行数:28,代码来源:routing_table_condenser.py
示例6: __call__
def __call__(
self, transceiver, tags=None, iptags=None, reverse_iptags=None):
"""
:param tags: the tags object which contains IP and reverse IP tags.
could be none if these are being given in separate lists
:param iptags: a list of IP tags, given when tags is none
:param reverse_iptags: a list of reverse IP tags when tags is none.
:param transceiver: the transceiver object
"""
# clear all the tags from the Ethernet connection, as nothing should
# be allowed to use it (no two apps should use the same Ethernet
# connection at the same time)
progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
for tag_id in progress.over(range(MAX_TAG_ID)):
transceiver.clear_ip_tag(tag_id)
# Use tags object to supply tag info if it is supplied
if tags is not None:
iptags = list(tags.ip_tags)
reverse_iptags = list(tags.reverse_ip_tags)
# Load the IP tags and the Reverse IP tags
progress = ProgressBar(
len(iptags) + len(reverse_iptags), "Loading Tags")
self.load_iptags(iptags, transceiver, progress)
self.load_reverse_iptags(reverse_iptags, transceiver, progress)
progress.end()
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:27,代码来源:tags_loader.py
示例7: __call__
def __call__(self, txrx, app_id, all_core_subsets):
# check that the right number of processors are in sync
processors_completed = txrx.get_core_state_count(
app_id, CPUState.FINISHED)
total_processors = len(all_core_subsets)
left_to_do_cores = total_processors - processors_completed
progress = ProgressBar(
left_to_do_cores,
"Forcing error cores to generate provenance data")
error_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.RUN_TIME_EXCEPTION)
watchdog_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.WATCHDOG)
idle_cores = txrx.get_cores_in_state(
all_core_subsets, CPUState.IDLE)
if error_cores or watchdog_cores or idle_cores:
raise ConfigurationException(
"Some cores have crashed. RTE cores {}, watch-dogged cores {},"
" idle cores {}".format(
error_cores.values(), watchdog_cores.values(),
idle_cores.values()))
# check that all cores are in the state FINISHED which shows that
# the core has received the message and done provenance updating
self._update_provenance(txrx, total_processors, processors_completed,
all_core_subsets, app_id, progress)
progress.end()
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:30,代码来源:chip_provenance_updater.py
示例8: __call__
def __call__(
self, live_packet_gatherer_parameters, machine, machine_graph,
application_graph=None, graph_mapper=None):
""" Add LPG vertices on Ethernet connected chips as required.
:param live_packet_gatherer_parameters:\
the Live Packet Gatherer parameters requested by the script
:param machine: the SpiNNaker machine as discovered
:param application_graph: the application graph
:param machine_graph: the machine graph
:return: mapping between LPG parameters and LPG vertex
"""
# pylint: disable=too-many-arguments
# create progress bar
progress = ProgressBar(
machine.ethernet_connected_chips,
string_describing_what_being_progressed=(
"Adding Live Packet Gatherers to Graph"))
# Keep track of the vertices added by parameters and board address
lpg_params_to_vertices = defaultdict(dict)
# for every Ethernet connected chip, add the gatherers required
for chip in progress.over(machine.ethernet_connected_chips):
for params in live_packet_gatherer_parameters:
if (params.board_address is None or
params.board_address == chip.ip_address):
lpg_params_to_vertices[params][chip.x, chip.y] = \
self._add_lpg_vertex(application_graph, graph_mapper,
machine_graph, chip, params)
return lpg_params_to_vertices
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:33,代码来源:insert_live_packet_gatherers_to_graphs.py
示例9: __call__
def __call__(
self, transceiver, placements, provenance_file_path,
run_time_ms, machine_time_step):
"""
:param transceiver: the SpiNNMan interface object
:param placements: The placements of the vertices
:param has_ran: token that states that the simulation has ran
:param provenance_file_path: The location to store the profile data
:param run_time_ms: runtime in ms
:param machine_time_step: machine time step in ms
"""
# pylint: disable=too-many-arguments
machine_time_step_ms = machine_time_step // 1000
progress = ProgressBar(
placements.n_placements, "Getting profile data")
# retrieve provenance data from any cores that provide data
for placement in progress.over(placements.placements):
if isinstance(placement.vertex, AbstractHasProfileData):
# get data
profile_data = placement.vertex.get_profile_data(
transceiver, placement)
if profile_data.tags:
self._write(placement, profile_data, run_time_ms,
machine_time_step_ms, provenance_file_path)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:26,代码来源:profile_data_gatherer.py
示例10: get_spikes
def get_spikes(self, label, buffer_manager, region,
placements, graph_mapper, application_vertex,
base_key_function, machine_time_step):
# pylint: disable=too-many-arguments
results = list()
missing = []
ms_per_tick = machine_time_step / 1000.0
vertices = graph_mapper.get_machine_vertices(application_vertex)
progress = ProgressBar(vertices,
"Getting spikes for {}".format(label))
for vertex in progress.over(vertices):
placement = placements.get_placement_of_vertex(vertex)
vertex_slice = graph_mapper.get_slice(vertex)
# Read the spikes
raw_spike_data, data_missing = \
buffer_manager.get_data_by_placement(placement, region)
if data_missing:
missing.append(placement)
self._process_spike_data(
vertex_slice, raw_spike_data, ms_per_tick,
base_key_function(vertex), results)
if missing:
missing_str = recording_utils.make_missing_string(missing)
logger.warning(
"Population {} is missing spike data in region {} from the"
" following cores: {}", label, region, missing_str)
if not results:
return numpy.empty(shape=(0, 2))
result = numpy.vstack(results)
return result[numpy.lexsort((result[:, 1], result[:, 0]))]
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:32,代码来源:eieio_spike_recorder.py
示例11: __call__
def __call__(
self, live_packet_gatherer_parameters, placements,
live_packet_gatherers_to_vertex_mapping, machine,
machine_graph, application_graph=None, graph_mapper=None):
"""
:param live_packet_gatherer_parameters: the set of parameters
:param placements: the placements object
:param live_packet_gatherers_to_vertex_mapping:\
the mapping of LPG parameters and the machine vertices associated\
with it
:param machine: the SpiNNaker machine
:param machine_graph: the machine graph
:param application_graph: the app graph
:param graph_mapper: the graph mapper between app and machine graph
:rtype: None
"""
# pylint: disable=too-many-arguments
progress = ProgressBar(
live_packet_gatherer_parameters,
string_describing_what_being_progressed=(
"Adding edges to the machine graph between the vertices to "
"which live output has been requested and its local Live "
"Packet Gatherer"))
for lpg_params in progress.over(live_packet_gatherer_parameters):
# locate vertices needed to be connected to a LPG with these params
for vertex in live_packet_gatherer_parameters[lpg_params]:
self._connect_lpg_vertex(
application_graph, graph_mapper, machine,
placements, machine_graph, vertex,
live_packet_gatherers_to_vertex_mapping, lpg_params)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:31,代码来源:insert_edges_to_live_packet_gatherers.py
示例12: __call__
def __call__(self, machine_graph, machine, plan_n_timesteps):
""" Place each vertex in a machine graph on a core in the machine.
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine: A SpiNNaker machine object.
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return placements: Placements of vertices on the machine
:rtype :py:class:`pacman.model.placements.Placements`
"""
# check that the algorithm can handle the constraints
ResourceTracker.check_constraints(machine_graph.vertices)
placements = Placements()
vertices = sort_vertices_by_known_constraints(machine_graph.vertices)
# Iterate over vertices and generate placements
progress = ProgressBar(machine_graph.n_vertices,
"Placing graph vertices")
resource_tracker = ResourceTracker(
machine, plan_n_timesteps, self._generate_random_chips(machine))
vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
vertices_placed = set()
for vertex in progress.over(vertices):
if vertex not in vertices_placed:
vertices_placed.update(self._place_vertex(
vertex, resource_tracker, machine, placements,
vertices_on_same_chip))
return placements
开发者ID:SpiNNakerManchester,项目名称:PACMAN,代码行数:33,代码来源:random_placer.py
示例13: __call__
def __call__(self, report_default_directory, machine):
""" Creates a report that states where in SDRAM each region is.
:param report_default_directory: the folder where reports are written
:param machine: python representation of the machine
:rtype: None
"""
# create file path
directory_name = os.path.join(
report_default_directory, self.AREA_CODE_REPORT_NAME)
# create the progress bar for end users
progress_bar = ProgressBar(
len(machine.ethernet_connected_chips),
"Writing the board chip report")
# iterate over ethernet chips and then the chips on that board
with open(directory_name, "w") as writer:
for ethernet_connected_chip in \
progress_bar.over(machine.ethernet_connected_chips):
chips = machine.get_chips_on_board(ethernet_connected_chip)
writer.write(
"board with IP address : {} : has chips {}\n".format(
ethernet_connected_chip.ip_address, list(chips)))
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:25,代码来源:board_chip_report.py
示例14: __call__
def __call__(self, machine_graph, graph_mapper):
"""
:param machine_graph: the machine_graph whose edges are to be filtered
:param graph_mapper: the graph mapper between graphs
:return: a new graph mapper and machine graph
"""
new_machine_graph = MachineGraph(label=machine_graph.label)
new_graph_mapper = GraphMapper()
# create progress bar
progress = ProgressBar(
machine_graph.n_vertices +
machine_graph.n_outgoing_edge_partitions,
"Filtering edges")
# add the vertices directly, as they wont be pruned.
for vertex in progress.over(machine_graph.vertices, False):
self._add_vertex_to_new_graph(
vertex, graph_mapper, new_machine_graph, new_graph_mapper)
# start checking edges to decide which ones need pruning....
for partition in progress.over(machine_graph.outgoing_edge_partitions):
for edge in partition.edges:
if self._is_filterable(edge, graph_mapper):
logger.debug("this edge was pruned %s", edge)
continue
logger.debug("this edge was not pruned %s", edge)
self._add_edge_to_new_graph(
edge, partition, graph_mapper, new_machine_graph,
new_graph_mapper)
# returned the pruned graph and graph_mapper
return new_machine_graph, new_graph_mapper
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:33,代码来源:graph_edge_filter.py
示例15: __call__
def __call__(
self, placements, hostname,
report_default_directory, write_text_specs,
machine, graph_mapper=None, placement_order=None):
"""
:param placements: placements of machine graph to cores
:param hostname: SpiNNaker machine name
:param report_default_directory: the location where reports are stored
:param write_text_specs:\
True if the textual version of the specification is to be written
:param machine: the python representation of the SpiNNaker machine
:param graph_mapper:\
the mapping between application and machine graph
:param placement:\
the optional order in which placements should be examined
:return: DSG targets (map of placement tuple and filename)
"""
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=attribute-defined-outside-init
self._machine = machine
self._hostname = hostname
self._report_dir = report_default_directory
self._write_text = write_text_specs
# iterate though vertices and call generate_data_spec for each
# vertex
targets = DataSpecificationTargets(machine, self._report_dir)
if placement_order is None:
placement_order = placements.placements
progress = ProgressBar(
placements.n_placements, "Generating data specifications")
vertices_to_reset = list()
for placement in progress.over(placement_order):
# Try to generate the data spec for the placement
generated = self.__generate_data_spec_for_vertices(
placement, placement.vertex, targets)
if generated and isinstance(
placement.vertex, AbstractRewritesDataSpecification):
vertices_to_reset.append(placement.vertex)
# If the spec wasn't generated directly, and there is an
# application vertex, try with that
if not generated and graph_mapper is not None:
associated_vertex = graph_mapper.get_application_vertex(
placement.vertex)
generated = self.__generate_data_spec_for_vertices(
placement, associated_vertex, targets)
if generated and isinstance(
associated_vertex, AbstractRewritesDataSpecification):
vertices_to_reset.append(associated_vertex)
# Ensure that the vertices know their regions have been reloaded
for vertex in vertices_to_reset:
vertex.mark_regions_reloaded()
return targets
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:59,代码来源:graph_data_specification_writer.py
示例16: __call__
def __call__(self, fixed_routes, transceiver, app_id):
progress_bar = ProgressBar(
total_number_of_things_to_do=len(fixed_routes),
string_describing_what_being_progressed="loading fixed routes")
for chip_x, chip_y in progress_bar.over(fixed_routes.keys()):
transceiver.load_fixed_route(
chip_x, chip_y, fixed_routes[(chip_x, chip_y)], app_id)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:9,代码来源:load_fixed_routes.py
示例17: __call__
def __call__(self, router_tables, app_id, transceiver, machine):
progress = ProgressBar(router_tables.routing_tables,
"Loading routing data onto the machine")
# load each router table that is needed for the application to run into
# the chips SDRAM
for table in progress.over(router_tables.routing_tables):
if (not machine.get_chip_at(table.x, table.y).virtual
and table.multicast_routing_entries):
transceiver.load_multicast_routes(
table.x, table.y, table.multicast_routing_entries,
app_id=app_id)
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:12,代码来源:routing_table_loader.py
示例18: __call__
def __call__(self, machine_graph, machine, plan_n_timesteps):
"""
:param machine_graph: The machine_graph to place
:type machine_graph:\
:py:class:`pacman.model.graphs.machine.MachineGraph`
:param machine:\
The machine with respect to which to partition the application\
graph
:type machine: :py:class:`spinn_machine.Machine`
:param plan_n_timesteps: number of timesteps to plan for
:type plan_n_timesteps: int
:return: A set of placements
:rtype: :py:class:`pacman.model.placements.Placements`
:raise pacman.exceptions.PacmanPlaceException: \
If something goes wrong with the placement
"""
# check that the algorithm can handle the constraints
self._check_constraints(machine_graph.vertices)
# Sort the vertices into those with and those without
# placement constraints
placements = Placements()
constrained = list()
unconstrained = set()
for vertex in machine_graph.vertices:
if locate_constraints_of_type(
vertex.constraints, AbstractPlacerConstraint):
constrained.append(vertex)
else:
unconstrained.add(vertex)
# Iterate over constrained vertices and generate placements
progress = ProgressBar(
machine_graph.n_vertices, "Placing graph vertices")
resource_tracker = ResourceTracker(
machine, plan_n_timesteps, self._generate_radial_chips(machine))
constrained = sort_vertices_by_known_constraints(constrained)
for vertex in progress.over(constrained, False):
self._place_vertex(vertex, resource_tracker, machine, placements)
while unconstrained:
# Place the subgraph with the overall most connected vertex
max_connected_vertex = self._find_max_connected_vertex(
unconstrained, machine_graph)
self._place_unconstrained_subgraph(
max_connected_vertex, machine_graph, unconstrained,
machine, placements, resource_tracker, progress)
# finished, so stop progress bar and return placements
progress.end()
return placements
开发者ID:SpiNNakerManchester,项目名称:PACMAN,代码行数:52,代码来源:connective_based_placer.py
示例19: __get_projection_data
def __get_projection_data(
self, data_to_get, pre_vertex, post_vertex, connection_holder,
handle_time_out_configuration):
# pylint: disable=too-many-arguments, too-many-locals
ctl = self._spinnaker_control
# if using extra monitor functionality, locate extra data items
if ctl.get_generated_output("UsingAdvancedMonitorSupport"):
extra_monitors = ctl.get_generated_output(
"MemoryExtraMonitorVertices")
receivers = ctl.get_generated_output(
"MemoryMCGatherVertexToEthernetConnectedChipMapping")
extra_monitor_placements = ctl.get_generated_output(
"MemoryExtraMonitorToChipMapping")
else:
extra_monitors = None
receivers = None
extra_monitor_placements = None
edges = ctl.graph_mapper.get_machine_edges(self._projection_edge)
progress = ProgressBar(
edges, "Getting {}s for projection between {} and {}".format(
data_to_get, pre_vertex.label, post_vertex.label))
for edge in progress.over(edges):
placement = ctl.placements.get_placement_of_vertex(
edge.post_vertex)
# if using extra monitor data extractor find local receiver
if extra_monitors is not None:
receiver = helpful_functions.locate_extra_monitor_mc_receiver(
placement_x=placement.x, placement_y=placement.y,
machine=ctl.machine,
packet_gather_cores_to_ethernet_connection_map=receivers)
sender_extra_monitor_core = extra_monitor_placements[
placement.x, placement.y]
sender_monitor_place = ctl.placements.get_placement_of_vertex(
sender_extra_monitor_core)
else:
receiver = None
sender_monitor_place = None
connections = post_vertex.get_connections_from_machine(
ctl.transceiver, placement, edge, ctl.graph_mapper,
ctl.routing_infos, self._synapse_information,
ctl.machine_time_step, extra_monitors is not None,
ctl.placements, receiver, sender_monitor_place,
extra_monitors, handle_time_out_configuration,
ctl.fixed_routes)
if connections is not None:
connection_holder.add_connections(connections)
connection_holder.finish()
开发者ID:SpiNNakerManchester,项目名称:sPyNNaker,代码行数:51,代码来源:pynn_projection_common.py
示例20: __call__
def __call__(self, machine_graph, placements, buffer_manager):
# Count the regions to be read
n_regions_to_read, recording_placements = self._count_regions(
machine_graph, placements)
# Read back the regions
progress = ProgressBar(
n_regions_to_read, "Extracting buffers from the last run")
try:
buffer_manager.get_data_for_placements(
recording_placements, progress)
finally:
progress.end()
开发者ID:SpiNNakerManchester,项目名称:SpiNNFrontEndCommon,代码行数:14,代码来源:buffer_extractor.py
注:本文中的spinn_utilities.progress_bar.ProgressBar类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论