• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python collections.OrderedDict类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中twitter.common.collections.OrderedDict的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict类的具体用法?Python OrderedDict怎么用?Python OrderedDict使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了OrderedDict类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _calculate_classpath

  def _calculate_classpath(self, targets):
    jars = OrderedDict()
    excludes = set()

    # Support the ivy force concept when we sanely can for internal dep conflicts.
    # TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
    # strategy generally.
    def add_jar(jar):
      coordinate = (jar.org, jar.name)
      existing = jars.get(coordinate)
      jars[coordinate] = jar if not existing else (
        self._resolve_conflict(existing=existing, proposed=jar)
      )

    def collect_jars(target):
      if target.is_jvm or target.is_jar_library:
        for jar in target.jar_dependencies:
          if jar.rev:
            add_jar(jar)

      # Lift jvm target-level excludes up to the global excludes set
      if target.is_jvm and target.payload.excludes:
        excludes.update(target.payload.excludes)

    for target in targets:
      target.walk(collect_jars)

    return jars.values(), excludes
开发者ID:ankurgarg1986,项目名称:pants,代码行数:28,代码来源:ivy_utils.py


示例2: execution_order

  def execution_order(phases):
    """
      Yields goals in execution order for the given phases.  Does not account for goals run
      multiple times due to grouping.
    """
    dependencies_by_goal = OrderedDict()
    def populate_dependencies(phases):
      for phase in phases:
        for goal in phase.goals():
          if goal not in dependencies_by_goal:
            populate_dependencies(goal.dependencies)
            deps = OrderedSet()
            for phasedep in goal.dependencies:
              deps.update(phasedep.goals())
            dependencies_by_goal[goal] = deps
    populate_dependencies(phases)

    while dependencies_by_goal:
      for goal, deps in dependencies_by_goal.items():
        if not deps:
          dependencies_by_goal.pop(goal)
          for _, deps in dependencies_by_goal.items():
            if goal in deps:
              deps.discard(goal)
          yield goal
开发者ID:kevints,项目名称:commons,代码行数:25,代码来源:phase.py


示例3: write

  def write(self, target, path, confs=None):
    def as_jar(internal_target):
      jar, _, _, _ = self.get_db(internal_target).as_jar_with_version(internal_target)
      return jar

    # TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
    # codegen target - say java_thrift_library - and the synthetic generated target (java_library)
    # Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
    # the graph
    dependencies = OrderedDict()
    internal_codegen = {}
    for dep in target_internal_dependencies(target):
      jar = as_jar(dep)
      dependencies[(jar.org, jar.name)] = self.internaldep(jar, dep)
      if dep.is_codegen:
        internal_codegen[jar.name] = jar.name
    for jar in target.jar_dependencies:
      if jar.rev:
        dependencies[(jar.org, jar.name)] = self.jardep(jar)
    target_jar = self.internaldep(as_jar(target)).extend(dependencies=dependencies.values())

    template_kwargs = self.templateargs(target_jar, confs)
    with safe_open(path, 'w') as output:
      template = pkgutil.get_data(__name__, self.template_relpath)
      Generator(template, **template_kwargs).write(output)
开发者ID:alandge,项目名称:twitter-commons,代码行数:25,代码来源:jar_publish.py


示例4: compile

  def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
    # To pass options to scalac simply prefix with -S.
    args = ['-S' + x for x in self._scalac_args]

    def analysis_cache_full_path(analysis_cache_product):
      # We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
      if len(analysis_cache_product) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
      if len(analysis_cache_files) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      return os.path.join(analysis_cache_dir, analysis_cache_files[0])

    # Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
    analysis_map =\
    OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])

    if len(analysis_map) > 0:
      args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])

    args.extend([
      '-analysis-cache', analysis_cache,
      '-classpath', ':'.join(self._zinc_classpath + classpath),
      '-output-products', depfile,
      '-mirror-analysis',
      '-d', output_dir
    ])
    args.extend(sources)
    return self.run_zinc(args)
开发者ID:forestlzj,项目名称:commons,代码行数:29,代码来源:zinc_utils.py


示例5: write

  def write(self, target, path, confs=None, extra_confs=None):
    # TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
    # codegen target - say java_thrift_library - and the synthetic generated target (java_library)
    # Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
    # the graph
    dependencies = OrderedDict()
    internal_codegen = {}
    configurations = set(confs or [])
    for dep in target_internal_dependencies(target):
      jar = self._as_versioned_jar(dep)
      dependencies[(jar.org, jar.name)] = self.internaldep(jar, dep)
      if dep.is_codegen:
        internal_codegen[jar.name] = jar.name
    for jar in target.jar_dependencies:
      if jar.rev:
        dependencies[(jar.org, jar.name)] = self.jardep(jar)
        configurations |= set(jar._configurations)

    target_jar = self.internaldep(self._as_versioned_jar(target),
                                  configurations=list(configurations))
    target_jar = target_jar.extend(dependencies=dependencies.values())

    template_kwargs = self.templateargs(target_jar, confs, extra_confs)
    with safe_open(path, 'w') as output:
      template = pkgutil.get_data(self.template_package_name, self.template_relpath)
      Generator(template, **template_kwargs).write(output)
开发者ID:amedina,项目名称:pants,代码行数:26,代码来源:jar_publish.py


示例6: identify_zinc_jars

  def identify_zinc_jars(zinc_classpath):
    """Find the named jars in the zinc classpath.

    TODO: Make these mappings explicit instead of deriving them by jar name heuristics.
    """
    ret = OrderedDict()
    ret.update(ZincUtils.identify_jars(ZincUtils.ZINC_JAR_NAMES, zinc_classpath))
    return ret
开发者ID:cheecheeo,项目名称:pants,代码行数:8,代码来源:zinc_utils.py


示例7: identify_zinc_jars

  def identify_zinc_jars(zinc_classpath):
    """Find the named jars in the zinc classpath.

    TODO: When profiles migrate to regular pants jar() deps instead of ivy.xml files we can
          make these mappings explicit instead of deriving them by jar name heuristics.
    """
    ret = OrderedDict()
    ret.update(ZincUtils.identify_jars(ZincUtils.zinc_jar_names, zinc_classpath))
    return ret
开发者ID:ssalevan,项目名称:commons,代码行数:9,代码来源:zinc_utils.py


示例8: identify_zinc_jars

  def identify_zinc_jars(compiler_classpath, zinc_classpath):
    """Find the named jars in the compiler and zinc classpaths.

    TODO: When profiles migrate to regular pants jar() deps instead of ivy.xml files we can make these
          mappings explicit instead of deriving them by jar name heuristics.
    """
    ret = OrderedDict()
    ret.update(ScalaCompile.identify_jars(ScalaCompile.compiler_jar_names, compiler_classpath))
    ret.update(ScalaCompile.identify_jars(ScalaCompile.zinc_jar_names, zinc_classpath))
    return ret
开发者ID:lxwuchang,项目名称:commons,代码行数:10,代码来源:scala_compile.py


示例9: compile

  def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
    safe_mkdir(output_dir)
    compiler_classpath = nailgun_profile_classpath(self, self._compile_profile)
    compiler_args = []

    # TODO(John Sirois): separate compiler profile from runtime profile
    compiler_args.extend([
      # Support for outputting a dependencies file of source -> class
      '-Xplugin:%s' % self.get_depemitter_plugin(),
      '-P:depemitter:file:%s' % depfile
    ])
    compiler_args.extend(self._args)

    # To pass options to scalac simply prefix with -S.
    args = ['-S' + x for x in compiler_args]

    def analysis_cache_full_path(analysis_cache_product):
      # We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
      if len(analysis_cache_product) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
      if len(analysis_cache_files) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      return os.path.join(analysis_cache_dir, analysis_cache_files[0])

    # Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
    analysis_map = \
      OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])

    if len(analysis_map) > 0:
      args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])
    upstream_classes_dirs = analysis_map.keys()

    zinc_classpath = nailgun_profile_classpath(self, self._zinc_profile)
    zinc_jars = ScalaCompile.identify_zinc_jars(compiler_classpath, zinc_classpath)
    for (name, jarpath) in zinc_jars.items():  # The zinc jar names are also the flag names.
      args.extend(['-%s' % name, jarpath])

    args.extend([
      '-analysis-cache', analysis_cache,
      '-log-level', self.context.options.log_level or 'info',
      '-classpath', ':'.join(zinc_classpath + classpath + upstream_classes_dirs),
      '-d', output_dir
    ])

    if not self._color:
      args.append('-no-color')

    args.extend(sources)

    self.context.log.debug('Executing: %s %s' % (self._main, ' '.join(args)))
    return self.runjava(self._main, classpath=zinc_classpath, args=args, jvmargs=self._jvm_args)
开发者ID:jdanbrown,项目名称:commons,代码行数:52,代码来源:scala_compile.py


示例10: reset

 def reset(self):
   """Clear out the state of the BuildGraph, in particular Target mappings and dependencies."""
   self._addresses_already_closed = set()
   self._target_by_address = OrderedDict()
   self._target_dependencies_by_address = defaultdict(OrderedSet)
   self._target_dependees_by_address = defaultdict(set)
   self._derived_from_by_derivative_address = {}
开发者ID:digideskio,项目名称:pants,代码行数:7,代码来源:build_graph.py


示例11: __init__

 def __init__(self,
              checkpoint_root,
              verbose=True,
              task_killer=TaskKiller,
              executor_detector=ExecutorDetector,
              task_garbage_collector=TaskGarbageCollector,
              clock=time):
   ExecutorBase.__init__(self)
   ExceptionalThread.__init__(self)
   self.daemon = True
   self._stop_event = threading.Event()
   # mapping of task_id => (TaskInfo, AdjustRetainedTasks), in the order in
   # which they were received via a launchTask.
   self._gc_task_queue = OrderedDict()
   # cache the ExecutorDriver provided by the slave, so we can use it out
   # of band from slave-initiated callbacks.  This should be supplied by
   # ExecutorBase.registered() when the executor first registers with the
   # slave.
   self._driver = None
   self._slave_id = None  # cache the slave ID provided by the slave
   self._task_id = None  # the task_id currently being executed by the ThermosGCExecutor, if any
   self._start_time = None  # the start time of a task currently being executed, if any
   self._detector = executor_detector()
   self._collector = task_garbage_collector(root=checkpoint_root)
   self._clock = clock
   self._task_killer = task_killer
   self._checkpoint_root = checkpoint_root
   self._dropped_tasks = AtomicGauge('dropped_tasks')
   self.metrics.register(self._dropped_tasks)
开发者ID:josephglanville,项目名称:incubator-aurora,代码行数:29,代码来源:gc_executor.py


示例12: __init__

  def __init__(self, older_than=120, aggregation_depth=0):
    """
    datapoints that are `older_than` will be dropped
    if aggregation_depth > 0 then we aggregate for paths up to that depth
    """
    self._older_than = older_than
    self._aggregation_depth = aggregation_depth
    self._requests_by_timestamp = OrderedDict()
    self._lock = Lock()

    super(PerPathDatapoints, self).__init__()
开发者ID:Yasumoto,项目名称:zktraffic,代码行数:11,代码来源:per_path_datapoints.py


示例13: test_default_maybe_list

def test_default_maybe_list():
  HELLO_WORLD = ['hello', 'world']
  assert maybe_list('hello') == ['hello']
  assert maybe_list(('hello', 'world')) == HELLO_WORLD
  assert maybe_list(['hello', 'world']) == HELLO_WORLD
  assert maybe_list(OrderedSet(['hello', 'world', 'hello'])) == HELLO_WORLD
  assert maybe_list(s for s in ('hello', 'world')) == HELLO_WORLD
  od = OrderedDict(hello=1)
  od.update(world=2)
  assert maybe_list(od) == HELLO_WORLD
  assert maybe_list([]) == []
  assert maybe_list(()) == []
  assert maybe_list(set()) == []

  with pytest.raises(ValueError):
    maybe_list(123)
  with pytest.raises(ValueError):
    maybe_list(['hello', 123])

  assert maybe_list(['hello', 123], expected_type=(str, int)) == ['hello', 123]
  assert maybe_list(['hello', 123], expected_type=(int, str)) == ['hello', 123]
开发者ID:BabyDuncan,项目名称:commons,代码行数:21,代码来源:test_maybe_list.py


示例14: compile

  def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
    # To pass options to scalac simply prefix with -S.
    args = ['-S' + x for x in self._args]

    def analysis_cache_full_path(analysis_cache_product):
      # We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
      if len(analysis_cache_product) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
      if len(analysis_cache_files) != 1:
        raise TaskError('There can only be one analysis cache file per output directory')
      return os.path.join(analysis_cache_dir, analysis_cache_files[0])

    # Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
    analysis_map = \
      OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])

    args.extend(self._zinc_jar_args)

    if len(analysis_map) > 0:
      args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])

    args.extend([
      '-analysis-cache', analysis_cache,
      '-log-level', self.context.options.log_level or 'info',
      '-classpath', ':'.join(self._zinc_classpath + classpath),
      '-output-products', depfile,
      '-mirror-analysis',
      '-d', output_dir
    ])

    if not self._color:
      args.append('-no-color')

    args.extend(sources)

    self.context.log.debug('Executing: %s %s' % (self._main, ' '.join(args)))
    return self.runjava(self._main, classpath=self._zinc_classpath, args=args, jvmargs=self._jvm_args)
开发者ID:SeungEun,项目名称:commons,代码行数:38,代码来源:scala_compile.py


示例15: BuildGraph

class BuildGraph(object):
  """A directed acyclic graph of Targets and dependencies. Not necessarily connected.
  """

  class DuplicateAddressError(AddressLookupError):
    """The same address appears multiple times in a dependency list"""

  class TransitiveLookupError(AddressLookupError):
    """Used to append the current node to the error message from an AddressLookupError """

  def __init__(self, address_mapper, run_tracker=None):
    self._address_mapper = address_mapper
    self.run_tracker = run_tracker
    self.reset()

  def reset(self):
    """Clear out the state of the BuildGraph, in particular Target mappings and dependencies."""
    self._addresses_already_closed = set()
    self._target_by_address = OrderedDict()
    self._target_dependencies_by_address = defaultdict(OrderedSet)
    self._target_dependees_by_address = defaultdict(set)
    self._derived_from_by_derivative_address = {}

  def contains_address(self, address):
    return address in self._target_by_address

  def get_target_from_spec(self, spec, relative_to=''):
    """Converts `spec` into a SyntheticAddress and returns the result of `get_target`"""
    return self.get_target(SyntheticAddress.parse(spec, relative_to=relative_to))

  def get_target(self, address):
    """Returns the Target at `address` if it has been injected into the BuildGraph, otherwise None.
    """
    return self._target_by_address.get(address, None)

  def dependencies_of(self, address):
    """Returns the dependencies of the Target at `address`.

    This method asserts that the address given is actually in the BuildGraph.
    """
    assert address in self._target_by_address, (
      'Cannot retrieve dependencies of {address} because it is not in the BuildGraph.'
      .format(address=address)
    )
    return self._target_dependencies_by_address[address]

  def dependents_of(self, address):
    """Returns the Targets which depend on the target at `address`.

    This method asserts that the address given is actually in the BuildGraph.
    """
    assert address in self._target_by_address, (
      'Cannot retrieve dependents of {address} because it is not in the BuildGraph.'
      .format(address=address)
    )
    return self._target_dependees_by_address[address]

  def get_derived_from(self, address):
    """Get the target the specified target was derived from.

    If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
    ancestry.  If a Target is not derived, default to returning itself.
    """
    parent_address = self._derived_from_by_derivative_address.get(address, address)
    return self.get_target(parent_address)

  def get_concrete_derived_from(self, address):
    """Get the concrete target the specified target was (directly or indirectly) derived from.

    The returned target is guaranteed to not have been derived from any other target.
    """
    current_address = address
    next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
    while next_address != current_address:
      current_address = next_address
      next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
    return self.get_target(current_address)

  def inject_target(self, target, dependencies=None, derived_from=None):
    """Injects a fully realized Target into the BuildGraph.

    :param Target target: The Target to inject.
    :param list<Address> dependencies: The Target addresses that `target` depends on.
    :param Target derived_from: The Target that `target` was derived from, usually as a result
      of codegen.
    """

    dependencies = dependencies or frozenset()
    address = target.address

    if address in self._target_by_address:
      raise ValueError('A Target {existing_target} already exists in the BuildGraph at address'
                       ' {address}.  Failed to insert {target}.'
                       .format(existing_target=self._target_by_address[address],
                               address=address,
                               target=target))

    if derived_from:
      if not self.contains_address(derived_from.address):
        raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}'
#.........这里部分代码省略.........
开发者ID:digideskio,项目名称:pants,代码行数:101,代码来源:build_graph.py


示例16: PerPathDatapoints

class PerPathDatapoints(Thread):
  PURGE_SLEEP_TIME = 2  # sleep time between purging old datapoints
  DEFAULT_TOP_RESULTS = 10  # number of (top) results to show by default

  def __init__(self, older_than=120, aggregation_depth=0):
    """
    datapoints that are `older_than` will be dropped
    if aggregation_depth > 0 then we aggregate for paths up to that depth
    """
    self._older_than = older_than
    self._aggregation_depth = aggregation_depth
    self._requests_by_timestamp = OrderedDict()
    self._lock = Lock()

    super(PerPathDatapoints, self).__init__()

  def size(self):
    size = {"samples": 0, "requests_mem_usage": 0}
    with self._lock:
      samples, mem_usage = 0, 0
      for reqs in self._requests_by_timestamp.values():
        samples += len(reqs)
        mem_usage += sum(sys.getsizeof(r) for r in reqs)

    size["samples"] = samples
    size["requests_mem_usage"] = mem_usage
    size["requests_mem_usage"] = sizeof_fmt(size["requests_mem_usage"])
    size["ordered_dict_mem_usage"] = sizeof_fmt(sys.getsizeof(self._requests_by_timestamp))

    return size

  def run(self):
    """ drop samples that are too old """
    while True:
      time.sleep(self.PURGE_SLEEP_TIME)
      old_tstamp = time.time() - self._older_than
      with self._lock:
        for tstamp in self._requests_by_timestamp.keys():
          if tstamp < old_tstamp:
            del self._requests_by_timestamp[tstamp]

  def handle_request(self, request):
    if self._aggregation_depth > 0:
      request.path = intern(request.parent_path(self._aggregation_depth))

    with self._lock:
      tstamp = int(time.time())
      if tstamp not in self._requests_by_timestamp:
        self._requests_by_timestamp[tstamp] = []
      self._requests_by_timestamp[tstamp].append(request)

  def sum_minute(self, top=DEFAULT_TOP_RESULTS, order_by=Counters.WRITES,
                 display=[Counters.ALL], view=AccumulatedStats.VIEW_BY_PATH):
    now = int(time.time())
    old = now - NUMBER_OF_DATAPOINTS
    stats = AccumulatedStats(StatsConfig())

    with self._lock:
      # note that this is an OrderedDict so samples are in chronological order
      for tstamp in self._requests_by_timestamp.keys():
        if tstamp < old:
          continue

        if tstamp > now:
          break

        for r in self._requests_by_timestamp[tstamp]:
          stats.handle_request(r)

    return stats.dict(top=top,
                      order_by=order_by,
                      display_filters=display,
                      view=view)

  def datapoints_writes(self):
    return self._filter_datapoints(condition=lambda req: req.is_write)

  def datapoints_reads(self):
    return self._filter_datapoints(condition=lambda req: not req.is_write)

  def datapoints_for_op(self, op):
    return self._filter_datapoints(condition=lambda req: req.opcode == op)

  def datapoints_by_path_for_op(self, op, top):
    """ op is "writes" or "reads" or one of OpCodes.CREATE, OpCodes.SETDATA, etc.
        because why use Python if you can't abuse types?
        top is the number of results
    """
    if op == "writes":
      return self._datapoints_by_path_for_op_impl(lambda r: r.is_write, top)
    elif op == "reads":
      return self._datapoints_by_path_for_op_impl(lambda r: not r.is_write, top)
    else:
      return self._datapoints_by_path_for_op_impl(lambda r: r.opcode == op, top)

  def _datapoints_by_path_for_op_impl(self, request_filter, top):
    """ to make this moderately efficient we use a dict that
    provides a pre-populated list of datapoints.
    """
    tstamp = int(time.time()) - NUMBER_OF_DATAPOINTS
#.........这里部分代码省略.........
开发者ID:Yasumoto,项目名称:zktraffic,代码行数:101,代码来源:per_path_datapoints.py


示例17: ThermosGCExecutor

class ThermosGCExecutor(ExecutorBase, ExceptionalThread, Observable):
  """
    Thermos GC Executor, responsible for:
      - garbage collecting old tasks to make sure they don't clutter up the system
      - state reconciliation with the scheduler (in case it thinks we're running
        something we're not or vice versa.)
  """
  MAX_PID_TIME_DRIFT = Amount(10, Time.SECONDS)
  MAX_CHECKPOINT_TIME_DRIFT = Amount(1, Time.HOURS)  # maximum runner disconnection time

  # how old a task must be before we're willing to kill it, assuming that there could be
  # slight races in the following scenario:
  #    launch gc with retained_tasks={t1, t2, t3}
  #    launch task t4
  MINIMUM_KILL_AGE = Amount(10, Time.MINUTES)

  # wait time between checking for new GC events from the slave and/or cleaning orphaned tasks
  POLL_WAIT = Amount(5, Time.MINUTES)

  # maximum amount of time the executor will wait with no tasks before it exits.
  MAXIMUM_EXECUTOR_WAIT = Amount(15, Time.MINUTES)

  # maximum lifetime of this executor.  this is to prevent older GC executor binaries from
  # running forever
  MAXIMUM_EXECUTOR_LIFETIME = Amount(1, Time.DAYS)

  PERSISTENCE_WAIT = Amount(5, Time.SECONDS)

  def __init__(self,
               checkpoint_root,
               verbose=True,
               task_killer=TaskKiller,
               executor_detector=ExecutorDetector,
               task_garbage_collector=TaskGarbageCollector,
               clock=time):
    ExecutorBase.__init__(self)
    ExceptionalThread.__init__(self)
    self.daemon = True
    self._stop_event = threading.Event()
    # mapping of task_id => (TaskInfo, AdjustRetainedTasks), in the order in
    # which they were received via a launchTask.
    self._gc_task_queue = OrderedDict()
    # cache the ExecutorDriver provided by the slave, so we can use it out
    # of band from slave-initiated callbacks.  This should be supplied by
    # ExecutorBase.registered() when the executor first registers with the
    # slave.
    self._driver = None
    self._slave_id = None  # cache the slave ID provided by the slave
    self._task_id = None  # the task_id currently being executed by the ThermosGCExecutor, if any
    self._start_time = None  # the start time of a task currently being executed, if any
    self._detector = executor_detector()
    self._collector = task_garbage_collector(root=checkpoint_root)
    self._clock = clock
    self._task_killer = task_killer
    self._checkpoint_root = checkpoint_root
    self._dropped_tasks = AtomicGauge('dropped_tasks')
    self.metrics.register(self._dropped_tasks)

  def _runner_ckpt(self, task_id):
    """Return the runner checkpoint file for a given task_id."""
    return TaskPath(root=self._checkpoint_root, task_id=task_id).getpath('runner_checkpoint')

  def _terminate_task(self, task_id, kill=True):
    """Terminate a task using the associated task killer. Returns a boolean indicating success."""
    killer = self._task_killer(task_id, self._checkpoint_root)
    self.log('Terminating %s...' % task_id)
    runner_terminate = killer.kill if kill else killer.lose
    try:
      runner_terminate(force=True)
      return True
    except Exception as e:
      self.log('Could not terminate: %s' % e)
      return False

  def partition_tasks(self):
    """Return active/finished tasks as discovered from the checkpoint root."""
    detector = TaskDetector(root=self._checkpoint_root)
    active_tasks = set(t_id for _, t_id in detector.get_task_ids(state='active'))
    finished_tasks = set(t_id for _, t_id in detector.get_task_ids(state='finished'))
    return active_tasks, finished_tasks

  def get_states(self, task_id):
    """Returns the (timestamp, status) tuples of the task or [] if could not replay."""
    statuses = CheckpointDispatcher.iter_statuses(self._runner_ckpt(task_id))
    try:
      return [(state.timestamp_ms / 1000.0, state.state) for state in statuses]
    except CheckpointDispatcher.ErrorRecoveringState:
      return []

  def get_sandbox(self, task_id):
    """Returns the sandbox of the task, or None if it has not yet been initialized."""
    try:
      for update in CheckpointDispatcher.iter_updates(self._runner_ckpt(task_id)):
        if update.runner_header and update.runner_header.sandbox:
          return update.runner_header.sandbox
    except CheckpointDispatcher.ErrorRecoveringState:
      return None

  def maybe_terminate_unknown_task(self, task_id):
    """Terminate a task if we believe the scheduler doesn't know about it.
#.........这里部分代码省略.........
开发者ID:josephglanville,项目名称:incubator-aurora,代码行数:101,代码来源:gc_executor.py


示例18: attempt

    def attempt(self, timer, explain):
      """Executes the named phase against the current context tracking goal executions in executed.
      """

      def execute_task(goal, task, targets):
        """Execute and time a single goal that has had all of its dependencies satisfied."""
        with timer.timed(goal):
          # TODO (Senthil Kumaran):
          # Possible refactoring of the Task Execution Logic (AWESOME-1019)
          if explain:
            self._context.log.debug("Skipping execution of %s in explain mode" % goal.name)
          else:
            task.execute(targets)

      goals = self._phase.goals()
      if not goals:
        raise TaskError('No goals installed for phase %s' % self._phase)

      run_queue = []
      goals_by_group = {}
      for goal in goals:
        if goal.group:
          group_name = goal.group.name
          if group_name not in goals_by_group:
            group_goals = [goal]
            run_queue.append((group_name, group_goals))
            goals_by_group[group_name] = group_goals
          else:
            goals_by_group[group_name].append(goal)
        else:
          run_queue.append((None, [goal]))

      with self._context.new_workunit(name=self._phase.name, labels=[WorkUnit.PHASE]):
        # OrderedSet takes care of not repeating chunked task execution mentions
        execution_phases = defaultdict(OrderedSet)

        for group_name, goals in run_queue:
          if not group_name:
            goal = goals[0]
            execution_phases[self._phase].add(goal.name)
            with self._context.new_workunit(name=goal.name, labels=[WorkUnit.GOAL]):
              execute_task(goal, self._tasks_by_goal[goal], self._context.targets())
          else:
            with self._context.new_workunit(name=group_name, labels=[WorkUnit.GROUP]):
              goals_by_group_member = OrderedDict((GroupMember.from_goal(g), g) for g in goals)

              # First, divide the set of all targets to be built into compatible chunks, based
              # on their declared exclusives. Then, for each chunk of compatible exclusives, do
              # further sub-chunking. At the end, we'll have a list of chunks to be built,
              # which will go through the chunks of each exclusives-compatible group separately.

              # TODO(markcc); chunks with incompatible exclusives require separate ivy resolves.
              # Either interleave the ivy task in this group so that it runs once for each batch of
              # chunks with compatible exclusives, or make the compilation tasks do their own ivy
              # resolves for each batch of targets they're asked to compile.

              goal_chunks = []

              # We won't have exclusives calculated if stopping short for example during an explain.
              if explain:
                exclusive_chunks = [self._context.targets()]
              else:
                exclusive_chunks = ExclusivesIterator.from_context(self._context)

              for exclusive_chunk in exclusive_chunks:
                # TODO(Travis Crawford): Targets should be filtered by is_concrete rather than
                # is_internal, however, at this time python targets are not internal targets.
                group_chunks = GroupIterator(filter(lambda t: t.is_internal, exclusive_chunk),
                                             goals_by_group_member.keys())
                goal_chunks.extend(group_chunks)

              self._context.log.debug('::: created chunks(%d)' % len(goal_chunks))
              for i, (group_member, goal_chunk) in enumerate(goal_chunks):
                self._context.log.debug('  chunk(%d) [flavor=%s]:\n\t%s' % (
                    i, group_member.name, '\n\t'.join(sorted(map(str, goal_chunk)))))

              for group_member, goal_chunk in goal_chunks:
                goal = goals_by_group_member[group_member]
                execution_phases[self._phase].add((group_name, goal.name))
                with self._context.new_workunit(name=goal.name, labels=[WorkUnit.GOAL]):
                  execute_task(goal, self._tasks_by_goal[goal], goal_chunk)

        if explain:
          tasks_by_goalname = dict((goal.name, task.__class__.__name__)
                                   for goal, task in self._tasks_by_goal.items())

          def expand_goal(goal):
            if len(goal) == 2:  # goal is (group, goal)
              group_name, goal_name = goal
              task_name = tasks_by_goalname[goal_name]
              return "%s:%s->%s" % (group_name, goal_name, task_name)
            else:
              task_name = tasks_by_goalname[goal]
              return "%s->%s" % (goal, task_name)

          for phase, goals in execution_phases.items():
            goal_to_task = ", ".join(expand_goal(goal) for goal in goals)
            print("%s [%s]" % (phase, goal_to_task))
开发者ID:govindkabra,项目名称:pants,代码行数:98,代码来源:group_engine.py



注:本文中的twitter.common.collections.OrderedDict类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python collections.OrderedSet类代码示例发布时间:2022-05-27
下一篇:
Python collections.maybe_list函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap