• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python database.Connection类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中s3ql.database.Connection的典型用法代码示例。如果您正苦于以下问题:Python Connection类的具体用法?Python Connection怎么用?Python Connection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Connection类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: db

def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
开发者ID:NickChen0113,项目名称:s3ql,代码行数:10,代码来源:t3_verify.py


示例2: setUp

    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)
开发者ID:DirtYiCE,项目名称:s3ql,代码行数:13,代码来源:t1_dump.py


示例3: main

def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    # Check for cached metadata
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
    if not os.path.exists(cachepath + '.params'):
        raise QuietError("No local metadata found.")

    with open(cachepath + '.params', 'rb') as fh:
        param = pickle.load(fh)

    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new.')

    if os.path.exists(DBNAME):
        raise QuietError('%s exists, aborting.' % DBNAME)

    log.info('Copying database...')
    dst = tempfile.NamedTemporaryFile()
    with open(cachepath + '.db', 'rb') as src:
        shutil.copyfileobj(src, dst)
    dst.flush()
    db = Connection(dst.name)

    log.info('Scrambling...')
    md5 = lambda x: hashlib.md5(x).hexdigest()
    for (id_, name) in db.query('SELECT id, name FROM names'):
        db.execute('UPDATE names SET name=? WHERE id=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
        db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
        db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
                   (md5(name), id_))

    log.info('Saving...')
    with open(DBNAME, 'wb+') as fh:
        dump_metadata(db, fh)
开发者ID:NickChen0113,项目名称:s3ql,代码行数:49,代码来源:scramble_db.py


示例4: setUp

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: BetterBackend(b'schwubl', 'lzma', plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        self.block_cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                      self.max_obj_size * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))

        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:30,代码来源:t3_fs_api.py


示例5: setUp

    def setUp(self):

        self.backend_dir = tempfile.mkdtemp()
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir, 
                                                           None, None))

        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        self.cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                self.max_obj_size * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:31,代码来源:t2_block_cache.py


示例6: run

    def run(self):
        log.debug('MetadataDownloadThread: start')
        
        while not self.quit:
            self.event.wait(self.interval)
            self.event.clear()
            
            if self.quit:
                break
            
            with self.bucket_pool() as bucket:
                #XXX: call bucket.is_get_consistent() to verify data consistency later
                seq_no = get_seq_no(bucket)
                if seq_no > self.param['seq_no']:
                    log.info('Remote metadata is newer than local (%d vs %d), '
                              'download it', seq_no, self.param['seq_no'])
                elif seq_no < self.param['seq_no']:
                    log.warn('Remote metadata is older than local (%s vs %d), '
                              'ignore the bucket until upload metadata thread done',
                              seq_no, self.param['seq_no'])
                    continue
                else:
                    log.info('seqno equals local (%d vs %d), ignore then download',
                             seq_no, self.param['seq_no'])
                    continue
                              
                log.info("Downloading & uncompressing metadata...")
                os.close(os.open(self.cachepath + '.db.tmp',
                                 os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                                 stat.S_IRUSR | stat.S_IWUSR))

                
                db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
                with bucket.open_read("s3ql_metadata") as fh:
                    restore_metadata(fh, db_conn)
                db_conn.close()

                with llfuse.lock:
                    if self.quit:
                        break
                    os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
                    self.db_mtime = os.stat(self.cachepath + '.db').st_mtime 
                    self.param['seq_no'] = seq_no

        log.debug('MetadataDownloadThread: end')    
开发者ID:drewlu,项目名称:ossql,代码行数:45,代码来源:mount.py


示例7: setUp

 def setUp(self):
     # Destructors are not guaranteed to run, and we can't unlink
     # the file immediately because apsw refers to it by name. 
     # Therefore, we unlink the file manually in tearDown() 
     self.dbfile = tempfile.NamedTemporaryFile(delete=False)
     
     self.db = Connection(self.dbfile.name)
     create_tables(self.db)
     init_tables(self.db)
     self.cache = inode_cache.InodeCache(self.db, 0)
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:10,代码来源:t3_inode_cache.py


示例8: setUp

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
开发者ID:rootfs,项目名称:s3ql,代码行数:14,代码来源:t3_fsck.py


示例9: setUp

    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket = local.Bucket(self.bucket_dir, None, None)
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        self.fsck.expect_errors = True
开发者ID:drewlu,项目名称:ossql,代码行数:14,代码来源:t3_fsck.py


示例10: setUp

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:18,代码来源:t3_fsck.py


示例11: setUp

    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        now_ns = time_ns()
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
开发者ID:DirtYiCE,项目名称:s3ql,代码行数:42,代码来源:t2_block_cache.py


示例12: setUp

    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
开发者ID:netconstructor,项目名称:ossql,代码行数:42,代码来源:t2_block_cache.py


示例13: setUp

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                              plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0
开发者ID:rootfs,项目名称:s3ql,代码行数:40,代码来源:t3_fs_api.py


示例14: setUp

    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
        self.bucket = self.bucket_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
        
        self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
                                      self.blocksize * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
          
        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
开发者ID:drewlu,项目名称:ossql,代码行数:24,代码来源:t3_fs_api.py


示例15: cache_tests

class cache_tests(TestCase):
    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        self.cache.bucket_pool = self.bucket_pool
        self.cache.destroy()
        if os.path.exists(self.cachedir):
            shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)

        llfuse.lock.release()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.blocksize))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.clear()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            time.sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # Flush the 2 most recently accessed ones
        commit(self.cache, inode, most_recent[-2])
        commit(self.cache, inode, most_recent[-3])

#.........这里部分代码省略.........
开发者ID:netconstructor,项目名称:ossql,代码行数:101,代码来源:t2_block_cache.py


示例16: cache_tests

class cache_tests(unittest.TestCase):

    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        llfuse.lock.release()
        self.cache.backend_pool = self.backend_pool
        self.cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        self.dbfile.close()
        os.unlink(self.dbfile.name)

    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False
        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True
        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True
        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                catch_logmsg('Unable to flush cache, no upload threads left alive',
#.........这里部分代码省略.........
开发者ID:rootfs,项目名称:s3ql,代码行数:101,代码来源:t2_block_cache.py


示例17: fsck_tests

class fsck_tests(unittest.TestCase):

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        self.fsck.check()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                               | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
                               1, 8))
        self._link('test-entry', inode)

        # Create new block
        fh = open(self.cachedir + '/%d-0' % inode, 'wb')
        fh.write('somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.backend['s3ql_data_1'], 'somedata')

        # Existing block
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 8, inode))
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedata')
        self.assert_fsck(self.fsck.check_cache)

        # Old block preserved
        with open(self.cachedir + '/%d-0' % inode, 'wb') as fh:
            fh.write('somedat2')
        self.assert_fsck(self.fsck.check_cache)

        # Old block removed
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedat3')
        self.assert_fsck(self.fsck.check_cache)


    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
                                (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        def check():
            self.fsck.check_lof()
            self.fsck.check_inodes_refcount()

        self.assert_fsck(check)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',))
        inode = self.db.get_val('SELECT inode FROM contents WHERE name_id=? AND '
                                'parent_inode=?', (name_id, ROOT_INODE))
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode,))
        self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?',
                        (name_id, ROOT_INODE))
        self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id,))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
#.........这里部分代码省略.........
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:101,代码来源:t3_fsck.py


示例18: cache_tests

class cache_tests(unittest.TestCase):

    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)

    def tearDown(self):
        self.cache.destroy()
        os.unlink(self.dbfile.name)

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime': time.time(),
                 'ctime': time.time(),
                 'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)

        for key in attrs.keys():
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.iteritems():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)

        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in xrange(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:78,代码来源:t3_inode_cache.py


示例19: DumpTests

class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh, trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
#.........这里部分代码省略.........
开发者ID:DirtYiCE,项目名称:s3ql,代码行数:101,代码来源:t1_dump.py


示例20: get_metadata

def get_metadata(bucket, cachepath, readonly=False):
    '''Retrieve metadata
    
    Checks:
    - Revision
    - Unclean mounts
    
    Locally cached metadata is used if up-to-date.
    '''
                
    seq_no = get_seq_no(bucket)

    # Check for cached metadata
    db = None
    if os.path.exists(cachepath + '.params'):
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
    else:
        param = bucket.lookup('s3ql_metadata')
 
    # Check for unclean shutdown
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not
           #    the case, the file system may have not been unmounted cleanly and you should try
           #    to run fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint but local bucket is consistent, just ignore it")
        else:                
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not the
           #    case, the file system may have not been unmounted cleanly or the data from the 
           #    most-recent mount may have not yet propagated through the backend. In the later case,
           #    waiting for a while should fix the problem, in the former case you should try to run
           #    fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint and local bucket is inconsistent, could not ignore the error")
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
        
    # Check that the fs itself is clean
    if not readonly and param['needs_fsck']:
        raise QuietError("File system damaged or not unmounted cleanly, run fsck!")        
    if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
        log.warn('Last file system check was more than 1 month ago, '
                 'running fsck.s3ql is recommended.')
    
    # Download metadata
    if not db:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
 
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    param['needs_fsck'] = False
    
    return (param, db)
开发者ID:drewlu,项目名称:ossql,代码行数:78,代码来源:mount.py

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python s3utils.s3_unicode函数代码示例发布时间:2022-05-27
下一篇:
Python io.StreamWriter类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap