• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python internal_client.InternalClient类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中swift.common.internal_client.InternalClient的典型用法代码示例。如果您正苦于以下问题:Python InternalClient类的具体用法?Python InternalClient怎么用?Python InternalClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了InternalClient类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: download

    def download(self, acc, container, u_agent, delay=0, request_tries=3):
        self.logger.info('Prefetching objects with InternalClient with ' + str(delay) + ' seconds of delay.')
        time.sleep(delay)
        swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries)
        headers = {}

        prefetch_list = []
        bytes_count = 0
        for o in swift.iter_objects(acc, container):
            if bytes_count + int(o['bytes']) < self.cache_max_size:
                prefetch_list.append(o['name'])
                bytes_count += int(o['bytes'])
            else:
                break

        for name in prefetch_list:
            object_path = '/v1/' + acc + '/' + container + '/' + name
            oid = hashlib.md5(object_path).hexdigest()

            status, resp_headers, it = swift.get_object(acc, container, name, headers, ACCEPTABLE_STATUS)

            object_size = int(resp_headers.get('Content-Length'))
            object_etag = resp_headers.get('Etag')

            object_storage_policy_id = '0'  # FIXME hardcoded
            to_evict = self.cache.access_cache("PUT", oid, object_size, object_etag, object_storage_policy_id)
            for ev_object_id in to_evict:
                os.remove(os.path.join(self.cache_path, ev_object_id))

            self.logger.info('Prefetch Filter - Object ' + name + ' stored in cache with ID: ' + oid)
            with open(os.path.join(self.cache_path, oid), 'w') as f:
                for el in it:
                    f.write(el)
开发者ID:Crystal-SDS,项目名称:Crystal-Filter-Samples,代码行数:33,代码来源:crystal_prefetch_control.py


示例2: make_swift_request

def make_swift_request(op, account, container=None, obj=None):
    """
    Makes a swift request via a local proxy (cost expensive)

    :param op: opertation (PUT, GET, DELETE, HEAD)
    :param account: swift account
    :param container: swift container
    :param obj: swift object
    :returns: swift.common.swob.Response instance
    """
    iclient = InternalClient(LOCAL_PROXY, 'SA', 1)
    path = iclient.make_path(account, container, obj)
    resp = iclient.make_request(op, path, {'PATH_INFO': path}, [200])

    return resp
开发者ID:iostackproject,项目名称:vertigo,代码行数:15,代码来源:utils.py


示例3: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='utilization-aggregator')
     self.interval = int(conf.get('interval') or 60)
     self.aggregate_account = '.utilization'
     self.sample_account = '.transfer_record'
     conf_path = conf.get('__file__') or \
                 '/etc/swift/swift-utilization-aggregator.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Utilization Aggregator',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 60)
     self.report_first_time = self.report_last_time = time()
     self.report_containers = 0
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.container_ring = Ring('/etc/swift', ring_name='container')
     self.sample_rate = int(self.conf.get('sample_rate', 600))
     self.last_chk = iso8601_to_timestamp(self.conf.get(
         'service_start'))
     self.kinx_api_url = self.conf.get('kinx_api_url')
开发者ID:KoreaCloudObjectStorage,项目名称:swift-utilization,代码行数:29,代码来源:aggregator.py


示例4: __init__

 def __init__(self, conf):
     self.conf = conf
     self.container_ring = Ring('/etc/swift', ring_name='container')
     self.logger = get_logger(conf, log_route='object-restorer')
     self.logger.set_statsd_prefix('s3-object-restorer')
     self.interval = int(conf.get('interval') or 300)
     self.restoring_object_account = '.s3_restoring_objects'
     self.expiring_restored_account = '.s3_expiring_restored_objects'
     self.glacier_account_prefix = '.glacier_'
     self.todo_container = 'todo'
     self.restoring_container = 'restoring'
     conf_path = '/etc/swift/s3-object-restorer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.glacier = self._init_glacier()
     self.glacier_tmpdir = conf.get('temp_path', '/var/cache/s3/')
     self.swift = InternalClient(conf_path,
                                 'Swift Object Restorer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:30,代码来源:restorer.py


示例5: __init__

 def __init__(self, conf):
     super(ObjectExpirer, self).__init__(conf)
     self.conf = conf
     self.logger = get_logger(conf, log_route='s3-object-expirer')
     self.logger.set_statsd_prefix('s3-object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.s3_expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         (conf.get('expiring_objects_account_name') or
          's3_expiring_objects')
     conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Object Expirer',
                                 request_tries)
     self.glacier = self._init_glacier()
     self.glacier_account_prefix = '.glacier_'
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:29,代码来源:expirer.py


示例6: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route="object-expirer")
     self.interval = int(conf.get("interval") or 300)
     self.expiring_objects_account = (conf.get("auto_create_account_prefix") or ".") + "expiring_objects"
     conf_path = conf.get("__file__") or "/etc/swift/object-expirer.conf"
     request_tries = int(conf.get("request_tries") or 3)
     self.swift = InternalClient(conf_path, "Swift Object Expirer", request_tries)
     self.report_interval = int(conf.get("report_interval") or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
     self.rcache = join(self.recon_cache_path, "object.recon")
开发者ID:blapid,项目名称:swift,代码行数:13,代码来源:expirer.py


示例7: __init__

 def __init__(self, conf):
     self.conf = conf
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = int(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = get_logger(conf, log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Container Reconciler',
                                 request_tries)
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
开发者ID:701,项目名称:swift,代码行数:13,代码来源:reconciler.py


示例8: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Object Expirer',
         request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
开发者ID:bn-emailops,项目名称:swift,代码行数:14,代码来源:expirer.py


示例9: setUp

    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
开发者ID:jgmerritt,项目名称:swift,代码行数:18,代码来源:test_object_expirer.py


示例10: setUp

    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest("Need more than one policy")

        self.expirer = Manager(["object-expirer"])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest("Unable to verify object-expirer service")

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, "probe-test", 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = "container-%s" % uuid.uuid4()
        self.object_name = "object-%s" % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)
开发者ID:iloveyou416068,项目名称:swift-1,代码行数:20,代码来源:test_object_expirer.py


示例11: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='restored-object-expirer')
     self.logger.set_statsd_prefix('s3-restored-object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expire_restored_account = '.s3_expiring_restored_objects'
     conf_path = '/etc/swift/s3-restored-object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Restored Object Expirer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:23,代码来源:restoredexpirer.py


示例12: setUp

    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token,
         self.account, self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
开发者ID:701,项目名称:swift,代码行数:23,代码来源:test_object_expirer.py


示例13: ObjectTransitor

class ObjectTransitor(Daemon):
    def __init__(self, conf):
        super(ObjectTransitor, self).__init__(conf)
        self.conf = conf
        self.logger = get_logger(conf, log_route='s3-object-transitor')
        self.logger.set_statsd_prefix('s3-object-transitor')
        self.interval = int(conf.get('interval') or 300)
        self.s3_tr_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            (conf.get('expiring_objects_account_name') or
             's3_transitioning_objects')
        conf_path = conf.get('__file__') or \
            '/etc/swift/s3-object-transitor.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path, 'Swift Object Transitor',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.client = Client(self.conf.get('sentry_sdn', ''))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects '
                               'transitioned') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_transition_pass': elapsed,
                              'transitioned_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects transitioned') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.s3_tr_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))

            for c in self.swift.iter_containers(self.s3_tr_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.s3_tr_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue

                    pool.spawn_n(self.transition_object, container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(self.s3_tr_objects_account,
                                                container, (2, 4))
                except (Exception, Timeout) as err:
                    report_exception(self.logger,
                                     _('Exception while deleting container %s %s') %
                                     (container, str(err)), self.client)
#.........这里部分代码省略.........
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:101,代码来源:transitor.py


示例14: ContainerReconciler

class ContainerReconciler(Daemon):
    """
    Move objects that are in the wrong storage policy.
    """

    def __init__(self, conf):
        self.conf = conf
        self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
        self.interval = int(conf.get('interval', 30))
        conf_path = conf.get('__file__') or \
            '/etc/swift/container-reconciler.conf'
        self.logger = get_logger(conf, log_route='container-reconciler')
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Container Reconciler',
                                    request_tries)
        self.stats = defaultdict(int)
        self.last_stat_time = time.time()

    def stats_log(self, metric, msg, *args, **kwargs):
        """
        Update stats tracking for metric and emit log message.
        """
        level = kwargs.pop('level', logging.DEBUG)
        log_message = '%s: ' % metric + msg
        self.logger.log(level, log_message, *args, **kwargs)
        self.stats[metric] += 1

    def log_stats(self, force=False):
        """
        Dump stats to logger, noop when stats have been already been
        logged in the last minute.
        """
        now = time.time()
        should_log = force or (now - self.last_stat_time > 60)
        if should_log:
            self.last_stat_time = now
            self.logger.info('Reconciler Stats: %r', dict(**self.stats))

    def pop_queue(self, container, obj, q_ts, q_record):
        """
        Issue a delete object request to the container for the misplaced
        object queue entry.

        :param container: the misplaced objects container
        :param q_ts: the timestamp of the misplaced object
        :param q_record: the timestamp of the queue entry

        N.B. q_ts will normally be the same time as q_record except when
        an object was manually re-enqued.
        """
        q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
        x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
        self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
                       q_path, q_ts, x_timestamp)
        headers = {'X-Timestamp': x_timestamp}
        direct_delete_container_entry(
            self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,
            container, obj, headers=headers)

    def throw_tombstones(self, account, container, obj, timestamp,
                         policy_index, path):
        """
        Issue a delete object request to the given storage_policy.

        :param account: the account name
        :param container: the container name
        :param account: the object name
        :param timestamp: the timestamp of the object to delete
        :param policy_index: the policy index to direct the request
        :param path: the path to be used for logging
        """
        x_timestamp = slightly_later_timestamp(timestamp)
        self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
                       '%s (%s) will be deleted',
                       path, timestamp, policy_index, x_timestamp)
        headers = {
            'X-Timestamp': x_timestamp,
            'X-Backend-Storage-Policy-Index': policy_index,
        }
        success = False
        try:
            self.swift.delete_object(account, container, obj,
                                     acceptable_statuses=(2, 404),
                                     headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('cleanup_failed', '%r (%f) was not cleaned up '
                           'in storage_policy %s (%s)', path, timestamp,
                           policy_index, err)
        else:
            success = True
            self.stats_log('cleanup_success', '%r (%f) was successfully '
                           'removed from policy_index %s', path, timestamp,
                           policy_index)
        return success

    def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
                          q_op, path, **kwargs):
        """
        Perform object reconciliation.
#.........这里部分代码省略.........
开发者ID:701,项目名称:swift,代码行数:101,代码来源:reconciler.py


示例15: ObjectExpirer

class ObjectExpirer(Daemon):
    def __init__(self, conf):
        super(ObjectExpirer, self).__init__(conf)
        self.conf = conf
        self.logger = get_logger(conf, log_route='s3-object-expirer')
        self.logger.set_statsd_prefix('s3-object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.s3_expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            (conf.get('expiring_objects_account_name') or
             's3_expiring_objects')
        conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.glacier = self._init_glacier()
        self.glacier_account_prefix = '.glacier_'
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.client = Client(self.conf.get('sentry_sdn', ''))

    def _init_glacier(self):
        con = Layer2(region_name='ap-northeast-1')
        return con.get_vault('swift-s3-transition')

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.s3_expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))

            for c in self.swift.iter_containers(self.
                                                s3_expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self
                                                 .s3_expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue

                    pool.spawn_n(self.delete_object, container, obj)
            pool.waitall()
            for container in containers_to_delete:
#.........这里部分代码省略.........
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:101,代码来源:expirer.py


示例16: TestObjectExpirer

class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirer again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_doesnt_make_async_pendings(self):
#.........这里部分代码省略.........
开发者ID:jgmerritt,项目名称:swift,代码行数:101,代码来源:test_object_expirer.py


示例17: ObjectExpirer

class ObjectExpirer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """

    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue
                    timestamp, actual_obj = obj.split('-', 1)
                    timestamp = int(timestamp)
                    if timestamp > int(time()):
                        break
                    pool.spawn_n(
                        self.delete_object, actual_obj, timestamp,
                        container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
#.........这里部分代码省略.........
开发者ID:Awingu,项目名称:swift,代码行数:101,代码来源:expirer.py


示例18: test_reconciler_move_object_twice

    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice([p for p in ENABLED_POLICIES
                                    if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses
            if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        int_client = InternalClient(conf_file, 'probe-test', 3)
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_pol 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python manager.Manager类代码示例发布时间:2022-05-27
下一篇:
Python http.is_success函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap