• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.renamer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中swift.common.utils.renamer函数的典型用法代码示例。如果您正苦于以下问题:Python renamer函数的具体用法?Python renamer怎么用?Python renamer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了renamer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: copy_put

    def copy_put(self, fd, tmppath):
        
        tpool.execute(os.fsync, fd)
        
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj))
        
        do_chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
        
        return True
开发者ID:sun3shines,项目名称:ufo,代码行数:25,代码来源:DiskMeta.py


示例2: process_object_update

    def process_object_update(self, update_path, device):
        """
        Process the object information to be updated and update.

        :param update_path: path to pickled object update file
        :param device: path to device
        """
        try:
            update = pickle.load(open(update_path, "rb"))
        except Exception:
            self.logger.exception(_("ERROR Pickle problem, quarantining %s"), update_path)
            renamer(update_path, os.path.join(device, "quarantined", "objects", os.path.basename(update_path)))
            return
        successes = update.get("successes", [])
        part, nodes = self.get_container_ring().get_nodes(update["account"], update["container"])
        obj = "/%s/%s/%s" % (update["account"], update["container"], update["obj"])
        success = True
        for node in nodes:
            if node["id"] not in successes:
                status = self.object_update(node, part, update["op"], obj, update["headers"])
                if not (200 <= status < 300) and status != 404:
                    success = False
                else:
                    successes.append(node["id"])
        if success:
            self.successes += 1
            self.logger.debug(_("Update sent for %(obj)s %(path)s"), {"obj": obj, "path": update_path})
            os.unlink(update_path)
        else:
            self.failures += 1
            self.logger.debug(_("Update failed for %(obj)s %(path)s"), {"obj": obj, "path": update_path})
            update["successes"] = successes
            write_pickle(update, update_path, os.path.join(device, "tmp"))
开发者ID:houseurmusic,项目名称:swift,代码行数:33,代码来源:updater.py


示例3: put

    def put(self, fd, metadata):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :param fd: file descriptor of the temp file
        :param metadata: dictionary of metadata to be written
        """
        assert self.tmppath is not None
        assert self._type == 0
        # wait, what?
        #metadata['name'] = self.name
        timestamp = normalize_timestamp(metadata['X-Timestamp'])
        base_path = os.path.join(self.datadir, timestamp)
        # P3
        fp = open("/tmp/dump","a")
        print >>fp, "posix put old", self.tmppath, "new", base_path
        fp.close()
        write_meta_file(base_path + '.meta', metadata)
        #if 'Content-Length' in metadata:
        #    self.drop_cache(fd, 0, int(metadata['Content-Length']))
        # XXX os.fsync maybe?
        #tpool.execute(fsync, fd)
        renamer(self.tmppath, base_path + ".data")
        # but not setting self.data_file here, is this right?
        self.metadata = metadata
开发者ID:kururu-lu,项目名称:swift-lfs,代码行数:27,代码来源:lfs_posix.py


示例4: recalculate_hashes

def recalculate_hashes(partition_dir, suffixes, reclaim_age=ONE_WEEK):
    """
    Recalculates hashes for the given suffixes in the partition and updates
    them in the partition's hashes file.

    :param partition_dir: directory of the partition in which to recalculate
    :param suffixes: list of suffixes to recalculate
    :param reclaim_age: age in seconds at which tombstones should be removed
    """

    def tpool_listdir(partition_dir):
        return dict(((suff, None) for suff in os.listdir(partition_dir)
                     if len(suff) == 3 and isdir(join(partition_dir, suff))))
    hashes_file = join(partition_dir, HASH_FILE)
    with lock_path(partition_dir):
        try:
            with open(hashes_file, 'rb') as fp:
                hashes = pickle.load(fp)
        except Exception:
            hashes = tpool.execute(tpool_listdir, partition_dir)
        for suffix in suffixes:
            suffix_dir = join(partition_dir, suffix)
            if os.path.exists(suffix_dir):
                hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
            elif suffix in hashes:
                del hashes[suffix]
        with open(hashes_file + '.tmp', 'wb') as fp:
            pickle.dump(hashes, fp, PICKLE_PROTOCOL)
        renamer(hashes_file + '.tmp', hashes_file)
开发者ID:edwardt,项目名称:swift,代码行数:29,代码来源:replicator.py


示例5: quarantine

 def quarantine(self, reason):
     """
     The database will be quarantined and a
     sqlite3.DatabaseError will be raised indicating the action taken.
     """
     prefix_path = os.path.dirname(self.db_dir)
     partition_path = os.path.dirname(prefix_path)
     dbs_path = os.path.dirname(partition_path)
     device_path = os.path.dirname(dbs_path)
     quar_path = os.path.join(device_path, 'quarantined',
                              self.db_type + 's',
                              os.path.basename(self.db_dir))
     try:
         renamer(self.db_dir, quar_path, fsync=False)
     except OSError as e:
         if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
             raise
         quar_path = "%s-%s" % (quar_path, uuid4().hex)
         renamer(self.db_dir, quar_path, fsync=False)
     detail = _('Quarantined %(db_dir)s to %(quar_path)s due to '
                '%(reason)s') % {'db_dir': self.db_dir,
                                 'quar_path': quar_path,
                                 'reason': reason}
     self.logger.error(detail)
     raise sqlite3.DatabaseError(detail)
开发者ID:chenzhongtao,项目名称:swift,代码行数:25,代码来源:db.py


示例6: find_and_process

    def find_and_process(self):
        src_filename = time.strftime(self.filename_format)
        working_dir = os.path.join(self.target_dir, ".%-stats_tmp" % self.stats_type)
        shutil.rmtree(working_dir, ignore_errors=True)
        mkdirs(working_dir)
        tmp_filename = os.path.join(working_dir, src_filename)
        hasher = hashlib.md5()
        try:
            with open(tmp_filename, "wb") as statfile:
                statfile.write(self.get_header())
                for device in os.listdir(self.devices):
                    if self.mount_check and not check_mount(self.devices, device):
                        self.logger.error(_("Device %s is not mounted, skipping.") % device)
                        continue
                    db_dir = os.path.join(self.devices, device, self.data_dir)
                    if not os.path.exists(db_dir):
                        self.logger.debug(_("Path %s does not exist, skipping.") % db_dir)
                        continue
                    for root, dirs, files in os.walk(db_dir, topdown=False):
                        for filename in files:
                            if filename.endswith(".db"):
                                db_path = os.path.join(root, filename)
                                try:
                                    line_data = self.get_data(db_path)
                                except sqlite3.Error, err:
                                    self.logger.info(_("Error accessing db %s: %s") % (db_path, err))
                                    continue
                                if line_data:
                                    statfile.write(line_data)
                                    hasher.update(line_data)

            src_filename += hasher.hexdigest()
            renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
开发者ID:VenkataSeshadri,项目名称:slogging,代码行数:33,代码来源:db_stats_collector.py


示例7: put

    def put(self, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file
        to the real location.  This should be called after the data has been
        written to the temp file.

        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        assert self.tmppath is not None
        timestamp = normalize_timestamp(metadata['X-Timestamp'])
        metadata['name'] = self.disk_file.name
        # Write the metadata before calling fsync() so that both data and
        # metadata are flushed to disk.
        write_metadata(self.fd, metadata)
        # We call fsync() before calling drop_cache() to lower the amount of
        # redundant work the drop cache code will perform on the pages (now
        # that after fsync the pages will be all clean).
        tpool.execute(fsync, self.fd)
        # From the Department of the Redundancy Department, make sure we
        # call drop_cache() after fsync() to avoid redundant work (pages
        # all clean).
        drop_buffer_cache(self.fd, 0, self.upload_size)
        invalidate_hash(os.path.dirname(self.disk_file.datadir))
        # After the rename completes, this object will be available for other
        # requests to reference.
        renamer(self.tmppath,
                os.path.join(self.disk_file.datadir, timestamp + extension))
        self.disk_file.metadata = metadata
开发者ID:aswadrangnekar,项目名称:swift,代码行数:29,代码来源:server.py


示例8: possibly_quarantine

 def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
     """
     Checks the exception info to see if it indicates a quarantine situation
     (malformed or corrupted database). If not, the original exception will
     be reraised. If so, the database will be quarantined and a new
     sqlite3.DatabaseError will be raised indicating the action taken.
     """
     if 'database disk image is malformed' in str(exc_value):
         exc_hint = 'malformed'
     elif 'file is encrypted or is not a database' in str(exc_value):
         exc_hint = 'corrupted'
     else:
         raise exc_type, exc_value, exc_traceback
     prefix_path = os.path.dirname(self.db_dir)
     partition_path = os.path.dirname(prefix_path)
     dbs_path = os.path.dirname(partition_path)
     device_path = os.path.dirname(dbs_path)
     quar_path = os.path.join(device_path, 'quarantined',
                              self.db_type + 's',
                              os.path.basename(self.db_dir))
     try:
         renamer(self.db_dir, quar_path)
     except OSError as e:
         if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
             raise
         quar_path = "%s-%s" % (quar_path, uuid4().hex)
         renamer(self.db_dir, quar_path)
     detail = _('Quarantined %s to %s due to %s database') % \
               (self.db_dir, quar_path, exc_hint)
     self.logger.error(detail)
     raise sqlite3.DatabaseError(detail)
开发者ID:HoO-Group,项目名称:swift,代码行数:31,代码来源:db.py


示例9: put

    def put(self, fd, tmppath, metadata, extension=''):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :params fd: file descriptor of the temp file
        :param tmppath: path to the temporary file being used
        :param metadata: dictionary of metadata to be written
        :param extention: extension to be used when making the file
        """
        #Marker dir.
        if extension == '.ts':
            return True
        if extension == '.meta':
            self.put_metadata(metadata)
            return True
        else:
            extension = ''
        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            self.create_dir_object(os.path.join(self.datadir, self.obj))
            self.put_metadata(metadata)
            self.data_file = self.datadir + '/' + self.obj
            return True
        #Check if directory already exists.
        if self.is_dir:
            self.logger.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False
        #metadata['name'] = self.name
        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(tmppath, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj + extension))
        do_chown(os.path.join(self.datadir, self.obj + extension), \
              self.uid, self.gid)
        self.metadata = metadata
        #self.logger.error("Meta %s", self.metadata)
        self.data_file = self.datadir + '/' + self.obj + extension
        return True
开发者ID:Xarthisius,项目名称:glusterfs,代码行数:58,代码来源:DiskFile.py


示例10: process_object_update

    def process_object_update(self, update_path, device, policy):
        """
        Process the object information to be updated and update.

        :param update_path: path to pickled object update file
        :param device: path to device
        :param policy: storage policy of object update
        """
        try:
            update = pickle.load(open(update_path, 'rb'))
        except Exception:
            self.logger.exception(
                _('ERROR Pickle problem, quarantining %s'), update_path)
            self.stats.quarantines += 1
            self.logger.increment('quarantines')
            target_path = os.path.join(device, 'quarantined', 'objects',
                                       os.path.basename(update_path))
            renamer(update_path, target_path, fsync=False)
            return
        successes = update.get('successes', [])
        part, nodes = self.get_container_ring().get_nodes(
            update['account'], update['container'])
        obj = '/%s/%s/%s' % \
              (update['account'], update['container'], update['obj'])
        headers_out = HeaderKeyDict(update['headers'])
        headers_out['user-agent'] = 'object-updater %s' % os.getpid()
        headers_out.setdefault('X-Backend-Storage-Policy-Index',
                               str(int(policy)))
        events = [spawn(self.object_update,
                        node, part, update['op'], obj, headers_out)
                  for node in nodes if node['id'] not in successes]
        success = True
        new_successes = False
        for event in events:
            event_success, node_id = event.wait()
            if event_success is True:
                successes.append(node_id)
                new_successes = True
            else:
                success = False
        if success:
            self.stats.successes += 1
            self.logger.increment('successes')
            self.logger.debug('Update sent for %(obj)s %(path)s',
                              {'obj': obj, 'path': update_path})
            self.stats.unlinks += 1
            self.logger.increment('unlinks')
            os.unlink(update_path)
        else:
            self.stats.failures += 1
            self.logger.increment('failures')
            self.logger.debug('Update failed for %(obj)s %(path)s',
                              {'obj': obj, 'path': update_path})
            if new_successes:
                update['successes'] = successes
                write_pickle(update, update_path, os.path.join(
                    device, get_tmp_dir(policy)))
开发者ID:chenzhongtao,项目名称:swift,代码行数:57,代码来源:updater.py


示例11: complete_rsync

 def complete_rsync(self, drive, db_file, args):
     old_filename = os.path.join(self.root, drive, 'tmp', args[0])
     if os.path.exists(db_file):
         return HTTPNotFound()
     if not os.path.exists(old_filename):
         return HTTPNotFound()
     broker = self.broker_class(old_filename)
     broker.newid(args[0])
     renamer(old_filename, db_file)
     return HTTPNoContent()
开发者ID:VictorLowther,项目名称:swift,代码行数:10,代码来源:db_replicator.py


示例12: put

    def put(self, fd, tmppath, metadata, extension=''):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :params fd: file descriptor of the temp file
        :param tmppath: path to the temporary file being used
        :param metadata: dictionary of metadata to be written
        :param extention: extension to be used when making the file
        """
        #Marker dir.
        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            if os.path.exists(os.path.join(self.datadir, self.obj)) and \
               not os.path.isdir(os.path.join(self.datadir, self.obj)):
                os.unlink(os.path.join(self.datadir, self.obj))
            mkdirs(os.path.join(self.datadir, self.obj))
            os.chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
            self.put_metadata(metadata)
            self.data_file = self.datadir + '/' + self.obj
            return True
        #Check if directory already exists.
        if self.is_dir:
            logging.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False
        #metadata['name'] = self.name
        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(fd, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(tmp_path, metadata[X_TIMESTAMP]):
                        return False
                                       
        #print 'Gaurav put tmppath', tmppath, os.path.join(self.datadir,
                                                          #self.obj+extension)
        #invalidate_hash(os.path.dirname(self.datadir))
        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj + extension))
        os.chown(os.path.join(self.datadir, self.obj + extension), \
              self.uid, self.gid)
        self.metadata = metadata
        self.data_file = self.datadir + '/' + self.obj + extension
        return True
开发者ID:vbellur,项目名称:UFO,代码行数:54,代码来源:server.py


示例13: process_object_update

    def process_object_update(self, update_path, device, policy_idx):
        """
        Process the object information to be updated and update.

        :param update_path: path to pickled object update file
        :param device: path to device
        :param policy_idx: storage policy index of object update
        """
        try:
            update = pickle.load(open(update_path, 'rb'))
        except Exception:
            self.logger.exception(
                _('ERROR Pickle problem, quarantining %s'), update_path)
            self.logger.increment('quarantines')
            renamer(update_path, os.path.join(
                    device, 'quarantined', 'objects',
                    os.path.basename(update_path)))
            return
        successes = update.get('successes', [])
        part, nodes = self.get_container_ring().get_nodes(
            update['account'], update['container'])
        obj = '/%s/%s/%s' % \
              (update['account'], update['container'], update['obj'])
        success = True
        new_successes = False
        for node in nodes:
            if node['id'] not in successes:
                headers = update['headers'].copy()
                headers.setdefault('X-Backend-Storage-Policy-Index',
                                   str(policy_idx))
                status = self.object_update(node, part, update['op'], obj,
                                            headers)
                if not is_success(status) and status != HTTP_NOT_FOUND:
                    success = False
                else:
                    successes.append(node['id'])
                    new_successes = True
        if success:
            self.successes += 1
            self.logger.increment('successes')
            self.logger.debug('Update sent for %(obj)s %(path)s',
                              {'obj': obj, 'path': update_path})
            self.logger.increment("unlinks")
            os.unlink(update_path)
        else:
            self.failures += 1
            self.logger.increment('failures')
            self.logger.debug('Update failed for %(obj)s %(path)s',
                              {'obj': obj, 'path': update_path})
            if new_successes:
                update['successes'] = successes
                write_pickle(update, update_path, os.path.join(
                    device, get_tmp_dir(policy_idx)))
开发者ID:7yue,项目名称:swift,代码行数:53,代码来源:updater.py


示例14: find_and_process

 def find_and_process(self):
     src_filename = time.strftime(self.filename_format)
     working_dir = os.path.join(self.target_dir, '.stats_tmp')
     shutil.rmtree(working_dir, ignore_errors=True)
     mkdirs(working_dir)
     tmp_filename = os.path.join(working_dir, src_filename)
     hasher = hashlib.md5()
     with open(tmp_filename, 'wb') as statfile:
         # csv has the following columns:
         # Account Name, Container Count, Object Count, Bytes Used
         for device in os.listdir(self.devices):
             if self.mount_check and not check_mount(self.devices, device):
                 self.logger.error(
                     _("Device %s is not mounted, skipping.") % device)
                 continue
             accounts = os.path.join(self.devices,
                                     device,
                                     account_server_data_dir)
             if not os.path.exists(accounts):
                 self.logger.debug(_("Path %s does not exist, skipping.") %
                     accounts)
                 continue
             for root, dirs, files in os.walk(accounts, topdown=False):
                 for filename in files:
                     if filename.endswith('.db'):
                         db_path = os.path.join(root, filename)
                         broker = AccountBroker(db_path)
                         if not broker.is_deleted():
                             (account_name,
                             _junk, _junk, _junk,
                             container_count,
                             object_count,
                             bytes_used,
                             _junk, _junk) = broker.get_info()
                             line_data = '"%s",%d,%d,%d\n' % (
                                 account_name, container_count,
                                 object_count, bytes_used)
                             statfile.write(line_data)
                             hasher.update(line_data)
     file_hash = hasher.hexdigest()
     hash_index = src_filename.find('*')
     if hash_index < 0:
         # if there is no * in the target filename, the uploader probably
         # won't work because we are crafting a filename that doesn't
         # fit the pattern
         src_filename = '_'.join([src_filename, file_hash])
     else:
         parts = src_filename[:hash_index], src_filename[hash_index + 1:]
         src_filename = ''.join([parts[0], file_hash, parts[1]])
     renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
     shutil.rmtree(working_dir, ignore_errors=True)
开发者ID:edwardt,项目名称:swift,代码行数:51,代码来源:account_stats.py


示例15: quarantine_db

def quarantine_db(object_file, server_type):
    """
    In the case that a corrupt file is found, move it to a quarantined area to
    allow replication to fix it.

    :param object_file: path to corrupt file
    :param server_type: type of file that is corrupt
                        ('container' or 'account')
    """
    object_dir = os.path.dirname(object_file)
    quarantine_dir = os.path.abspath(os.path.join(object_dir, '..',
        '..', '..', '..', 'quarantined', server_type + 's',
        os.path.basename(object_dir)))
    renamer(object_dir, quarantine_dir)
开发者ID:edwardt,项目名称:swift,代码行数:14,代码来源:db_replicator.py


示例16: put

    def put(self, fd, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :param fd: file descriptor of the temp file
        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        # Our caller will use '.data' here; we just ignore it since we map the
        # URL directly to the file system.
        extension = ''

        metadata = _adjust_metadata(metadata)

        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            if not self.data_file:
                self.data_file = os.path.join(self.datadir, self._obj)
                self._create_dir_object(self.data_file)
            self.put_metadata(metadata)
            return

        # Check if directory already exists.
        if self._is_dir:
            # FIXME: How can we have a directory and it not be marked as a
            # MARKER_DIR (see above)?
            msg = 'File object exists as a directory: %s' % self.data_file
            raise AlreadyExistsAsDir(msg)

        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(self.tmppath, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self._obj_path:
            dir_objs = self._obj_path.split('/')
            assert len(dir_objs) >= 1
            tmp_path = self._container_path
            for dir_name in dir_objs:
                tmp_path = os.path.join(tmp_path, dir_name)
                self._create_dir_object(tmp_path)

        newpath = os.path.join(self.datadir, self._obj)
        renamer(self.tmppath, newpath)
        do_chown(newpath, self.uid, self.gid)
        self.metadata = metadata
        self.data_file = newpath
        self.filter_metadata()
        return
开发者ID:mattf,项目名称:glusterfs,代码行数:50,代码来源:DiskFile.py


示例17: get_hashes

def get_hashes(partition_dir, do_listdir=True, reclaim_age=ONE_WEEK):
    """
    Get a list of hashes for the suffix dir.  do_listdir causes it to mistrust
    the hash cache for suffix existence at the (unexpectedly high) cost of a
    listdir.  reclaim_age is just passed on to hash_suffix.

    :param partition_dir: absolute path of partition to get hashes for
    :param do_listdir: force existence check for all hashes in the partition
    :param reclaim_age: age at which to remove tombstones

    :returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
    """

    def tpool_listdir(hashes, partition_dir):
        return dict(((suff, hashes.get(suff, None))
                     for suff in os.listdir(partition_dir)
                     if len(suff) == 3 and isdir(join(partition_dir, suff))))
    hashed = 0
    hashes_file = join(partition_dir, HASH_FILE)
    with lock_path(partition_dir):
        modified = False
        hashes = {}
        try:
            with open(hashes_file, 'rb') as fp:
                hashes = pickle.load(fp)
        except Exception:
            do_listdir = True
        if do_listdir:
            hashes = tpool.execute(tpool_listdir, hashes, partition_dir)
            modified = True
        for suffix, hash_ in hashes.items():
            if not hash_:
                suffix_dir = join(partition_dir, suffix)
                if os.path.exists(suffix_dir):
                    try:
                        hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
                        hashed += 1
                    except OSError:
                        logging.exception(_('Error hashing suffix'))
                        hashes[suffix] = None
                else:
                    del hashes[suffix]
                modified = True
                sleep()
        if modified:
            with open(hashes_file + '.tmp', 'wb') as fp:
                pickle.dump(hashes, fp, PICKLE_PROTOCOL)
            renamer(hashes_file + '.tmp', hashes_file)
        return hashed, hashes
开发者ID:edwardt,项目名称:swift,代码行数:49,代码来源:replicator.py


示例18: rsync_then_merge

 def rsync_then_merge(self, drive, db_file, args):
     old_filename = os.path.join(self.root, drive, 'tmp', args[0])
     if not os.path.exists(db_file) or not os.path.exists(old_filename):
         return HTTPNotFound()
     new_broker = self.broker_class(old_filename)
     existing_broker = self.broker_class(db_file)
     point = -1
     objects = existing_broker.get_items_since(point, 1000)
     while len(objects):
         new_broker.merge_items(objects)
         point = objects[-1]['ROWID']
         objects = existing_broker.get_items_since(point, 1000)
         sleep()
     new_broker.newid(args[0])
     renamer(old_filename, db_file)
     return HTTPNoContent()
开发者ID:VictorLowther,项目名称:swift,代码行数:16,代码来源:db_replicator.py


示例19: finalize_put

 def finalize_put():
     # Write the metadata before calling fsync() so that both data and
     # metadata are flushed to disk.
     write_metadata(self.fd, metadata)
     # We call fsync() before calling drop_cache() to lower the amount
     # of redundant work the drop cache code will perform on the pages
     # (now that after fsync the pages will be all clean).
     fsync(self.fd)
     # From the Department of the Redundancy Department, make sure
     # we call drop_cache() after fsync() to avoid redundant work
     # (pages all clean).
     drop_buffer_cache(self.fd, 0, self.upload_size)
     invalidate_hash(os.path.dirname(self.disk_file.datadir))
     # After the rename completes, this object will be available for
     # other requests to reference.
     renamer(self.tmppath, os.path.join(self.disk_file.datadir, timestamp + extension))
开发者ID:niwa-kj,项目名称:swift,代码行数:16,代码来源:diskfile.py


示例20: put

    def put(self, fd, tmppath, metadata,extension=''):
        
        if extension == '.ts':
            # TombStone marker (deleted)
            return True
        
        metadata[X_TYPE] = OBJECT
        
        if extension == '.meta':
            # Metadata recorded separately from the file
            self.meta_put_metadata(metadata)
            return True

        # Check if directory already exists.
        if self.is_dir:
            self.logger.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False

        meta_write_metadata(self.metafile, metadata)
        
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj))
        
        do_chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
        self.metadata = metadata
        
        return True
开发者ID:sun3shines,项目名称:ufo,代码行数:47,代码来源:DiskMeta.py



注:本文中的swift.common.utils.renamer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.rsync_ip函数代码示例发布时间:2022-05-27
下一篇:
Python utils.remove_file函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap