• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python config.GPDBConfig类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mpp.lib.config.GPDBConfig的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig类的具体用法?Python GPDBConfig怎么用?Python GPDBConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了GPDBConfig类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_substitutions

 def get_substitutions(self):
     subst = {}
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     subst['@[email protected]'] = 'rh55-qavm44'
     subst['@[email protected]'] = os.path.join(self.get_sql_dir(), 'datagen.py')
     return subst
开发者ID:50wu,项目名称:gpdb,代码行数:7,代码来源:test_run_workload.py


示例2: wait_for_shutdown_before_commit

    def wait_for_shutdown_before_commit(self):
        self.check_system()

        config = GPDBConfig()
        db_id = config.get_dbid(-1,'p')

        test_case_list0 = []
        test_case_list0.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['fts_wait_for_shutdown', 'infinite_loop'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list0)

        test_case_list1 = []
        test_case_list1.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['filerep_consumer', 'fault', 'primary']))
        self.test_case_scenario.append(test_case_list1)

        test_case_list2 = []
        test_case_list2.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.check_fault_status', ['fts_wait_for_shutdown'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list2)

        test_case_list3 = []
        test_case_list3.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.restart_db_with_no_rc_check')
        self.test_case_scenario.append(test_case_list3)
    
        test_case_list4 = []
        test_case_list4.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.cluster_state')
        self.test_case_scenario.append(test_case_list4)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:25,代码来源:fts_transitions.py


示例3: GpRecover

class GpRecover(GpRecoverseg):
    '''Class for gprecoverseg utility methods '''

    MAX_COUNTER=400

    def __init__(self, config=None):
        if config is not None:
            self.config = config
        else:
            self.config = GPDBConfig()
        self.gphome = os.environ.get('GPHOME')

    def incremental(self, workerPool=False):
        '''Incremental Recoverseg '''
        tinctest.logger.info('Running Incremental gprecoverseg...')
        if workerPool:
            return self.run_using_workerpool()
        else:
            return self.run()

    def full(self):
        '''Full Recoverseg '''
        tinctest.logger.info('Running Full gprecoverseg...')
        return self.run(option = '-F')

    def rebalance(self):
        '''Run gprecoverseg to rebalance the cluster '''
        tinctest.logger.info('Running gprecoverseg rebalance...')
        return self.run(option = '-r')

    def wait_till_insync_transition(self):
        '''
            Poll till all the segments transition to insync state. 
            Number of trials set to MAX_COUNTER
        '''
        counter= 1
        while(not self.config.is_not_insync_segments()):
            if counter > self.MAX_COUNTER:
                raise Exception('Segments did not come insync after 20 minutes')
            else:
                counter = counter + 1
                time.sleep(3) #Wait 3 secs before polling again
        tinctest.logger.info('Segments are synchronized ...')
        return True
        
    def recover_rebalance_segs(self):
        if not self.config.is_balanced_segments():
            # recover
            if not self.incremental():
                raise Exception('Gprecvoerseg failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments recovered and back in sync')

            # rebalance
            if not self.rebalance():
                raise Exception('Gprecvoerseg -r failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments rebalanced and back in sync')
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:60,代码来源:gprecoverseg.py


示例4: setUpClass

    def setUpClass(cls):
        # we need an empty db to run the tests
        tinctest.logger.info("recreate database wet using dropdb/createdb")
        cmd = Command('recreatedb', 'dropdb wet; createdb wet')
        cmd.run(validateAfter=False)

        cls.drop_roles()

        super(LegacyWETTestCase, cls).setUpClass()

        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        cls.config = config

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        # WET writes into this directory.
        data_out_dir = os.path.join(cls.gpfdist.getdir(), 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
开发者ID:HaozhouWang,项目名称:gpdb,代码行数:26,代码来源:test_write.py


示例5: test_insert_commit_before_truncate

    def test_insert_commit_before_truncate(self):
        '''
        @description We suspend the vacuum on master after the first
                     transaction, and connect to segment.  Modify the
                     relation in vacuum and commit the segment local
                     transaction before the truncate transaction starts.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        # run vacuum in background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        self.run_psql(psql2)

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')
        thread1.join()
        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
开发者ID:50wu,项目名称:gpdb,代码行数:34,代码来源:test_crossexec.py


示例6: setUpFilespaceForCTAS

def setUpFilespaceForCTAS(isForHawq):
      config = GPDBConfig()
      if isForHawq:
            filespace = HAWQGpfilespace()
      else:
            filespace = Gpfilespace()
      if config.is_not_insync_segments():
             filespace.create_filespace('tincrepo_qp_ddl_ctas')
开发者ID:50wu,项目名称:gpdb,代码行数:8,代码来源:test_ctas.py


示例7: get_host_and_db_path

    def get_host_and_db_path(self, dbname, contentid=0):
        ''' Get the host and database path for the content'''
        config = GPDBConfig()
        db_oid = PSQL.run_sql_command("select oid from pg_database where datname='%s'" % dbname, flags='-q -t', dbname='postgres')
        dbid = PSQL.run_sql_command("select dbid from gp_segment_configuration where content=%s and role='p'" % contentid, flags='-q -t', dbname='postgres')
        (host, address) = config.get_host_and_datadir_of_segment(dbid= dbid.strip())

        db_path = os.path.join(address, 'base', db_oid.strip())
        return (host.strip(), db_path)
开发者ID:50wu,项目名称:gpdb,代码行数:9,代码来源:gp_filedump.py


示例8: is_changetracking

    def is_changetracking(self):
        """
        @summary: return true if system is in change tracking mode
        
        @return: Boolean value representing the whether the cluster is insync or not
        """

        config = GPDBConfig()
        return not config.is_not_insync_segments()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:9,代码来源:genFault.py


示例9: copy_files_to_master

def copy_files_to_master(filename, location):
    config = GPDBConfig()
    host = config.get_masterhost()
    cmd = 'gpssh -h %s -e "scp %s %s:%s/" ' % (host, filename, host, location)
    tinctest.logger.debug(cmd)
    res = {"rc": 0, "stderr": "", "stdout": ""}
    run_shell_command(cmd, "run scp", res)
    if res["rc"] > 0:
        raise Exception("Copying to host %s failed" % host)
开发者ID:kaknikhil,项目名称:gpdb,代码行数:9,代码来源:common_utils.py


示例10: get_substitutions

 def get_substitutions(self):
     """
     Returns sustitution variables.
     """
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     variables = {
             'HOST': host, 
             }
     return variables
开发者ID:50wu,项目名称:gpdb,代码行数:10,代码来源:test_externalpartition.py


示例11: test_pg_aocsseg_corruption

 def test_pg_aocsseg_corruption(self):
     self.create_appendonly_tables(row=False)
     config = GPDBConfig()
     host, port = config.get_hostandport_of_segment() 
     self.transform_sql_file(os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql.t'), 'co1')
     out_file = os.path.join(self.output_dir, 'corrupt_pg_aocsseg.out')
     ans_file = os.path.join(self.ans_dir, 'corrupt_pg_aocsseg.ans')
     sql_file = os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql')
     PSQL.run_sql_file_utility_mode(sql_file, out_file=out_file, host=host,
                                    port=port, dbname=os.environ['PGDATABASE'])
     if not Gpdiff.are_files_equal(out_file, ans_file, match_sub=[local_path('sql/init_file')]):
         raise Exception('Corruption test of pg_aocsseg failed for appendonly tables !')
开发者ID:50wu,项目名称:gpdb,代码行数:12,代码来源:test_ao_read_check.py


示例12: test_insert_unlock_before_truncate

    def test_insert_unlock_before_truncate(self):
        '''
        @description This is rather complicated.  We suspend the vacuum on
                     master after the first transaction, and connect to
                     segment, modify the relation in question, and release the
                     lock, keep the transaction.  To release the lock, we need
                     a special UDF.  Vacuum is supposed to skip truncate if it
                     sees such in-progress transaction.  Usually this should
                     not happen, but it rather simulates catalog DDL.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        PSQL.run_sql_command(sql_cmd='drop table if exists sync_table; create table sync_table(a int)')
        # Use pygresql to keep the connection and issue commands seprately.
        # thread2 will wait on sync_table before finish its work, so we
        # can keep the transaction open until the vacuum completes its work.
        conn = pygresql.pg.connect(host=seghost, port=int(segport), opt='-c gp_session_role=utility')
        conn.query('begin')
        conn.query('lock sync_table in access exclusive mode')

        # run vacuum background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        thread2 = threading.Thread(target=self.run_psql, args=(psql2,))
        thread2.start()

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')

        # Once thread1 finishes, we can now release the lock on sync_table,
        # so that thread2 can proceed.
        thread1.join()
        conn.query('commit')
        thread2.join()

        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
开发者ID:50wu,项目名称:gpdb,代码行数:52,代码来源:test_crossexec.py


示例13: check_logs

def check_logs(search_string_list):
    """
    Check all the segment logs(master/primary/mirror) for keywords in the
    search_string_list
    """
    dbid_list = PSQL.run_sql_command("select dbid from gp_segment_configuration;", flags="-q -t", dbname="postgres")
    dbid_list = dbid_list.split()
    config = GPDBConfig()
    for dbid in dbid_list:
        (host, data_dir) = config.get_host_and_datadir_of_segment(dbid.strip())
        (rc, msg) = search_string(host, search_string_list, data_dir)
        if rc:
            return (False, msg)
    return (True, "No Issues found")
开发者ID:kaknikhil,项目名称:gpdb,代码行数:14,代码来源:common_utils.py


示例14: setUpClass

 def setUpClass(cls):
     super(PreExpansionWorkloadTests, cls).setUpClass()
     # gpscp the script required for external table in create_base_workload
     scp_file = os.path.join(cls.get_sql_dir(), 'datagen.py')
     gpconfig = GPDBConfig()
     hosts = gpconfig.get_hosts()
     hosts_file = os.path.join(cls.get_out_dir(), 'hostfile')
     with open(hosts_file, 'w') as f:
         f.write('\n'.join(hosts))
     
     res = {'rc':0, 'stderr':'', 'stdout':''}
     run_shell_command("gpscp -f %s %s =:$GPHOME/bin" %(hosts_file, scp_file), 'gpscp script', res)
     if res['rc'] > 0:
         tinctest.logger.warning("Failed to gpscp the required script to all the segments for external table queries. The script might already exist !")
开发者ID:50wu,项目名称:gpdb,代码行数:14,代码来源:test_run_workload.py


示例15: test_do_full_recovery

    def test_do_full_recovery(self):
        """
        [feature]: Performs Full Recovery
        
        """

        config = GPDBConfig()
        recoverseg = GpRecoverseg()
        tinctest.logger.info('Running Full gprecoverseg...')
        recoverseg.run(option = '-F')
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
开发者ID:50wu,项目名称:gpdb,代码行数:14,代码来源:fault.py


示例16: test_full_recovery_skip_persistent_tables_check

    def test_full_recovery_skip_persistent_tables_check(self):
        """
        [feature]: Run recoverseg with persistent tables check option 
        
        """

        config = GPDBConfig()
        recoverseg = GpRecoverseg()
        tinctest.logger.info('Running gprecoverseg...')
        recoverseg.run(option='-F')
        self.assertNotIn('Performing persistent table check', recoverseg.stdout)
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
开发者ID:50wu,项目名称:gpdb,代码行数:15,代码来源:fault.py


示例17: setUpClass

    def setUpClass(cls):
        super(other_tests, cls).setUpClass()
        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        data_out_dir = os.path.join(data_dir, 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
开发者ID:50wu,项目名称:gpdb,代码行数:15,代码来源:test_runsqls.py


示例18: __init__

 def __init__(self,methodName,config=None):
     if config is not None:
         self.config = config
     else:
         self.config = GPDBConfig()
     self.gpverify = GpdbVerify(config=self.config)
     super(DbStateClass,self).__init__(methodName)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:7,代码来源:dbstate.py


示例19: __init__

    def __init__(self, methodName):
        self.config = GPDBConfig()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
        self.master_host = self.config.get_masterhost()
        self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
        self.datadir_config_file = local_path('configs/datadir_config_file') 
        self.mirror_config_file = local_path('configs/mirror_config_file')
        self.gpinitconfig_file = local_path('configs/gpinitconfig')
        self.host_file = local_path('configs/hosts')
        self.hosts = self.config.get_hosts(segments = True)

        self.port_base = '40000'
        self.master_port = os.environ.get('PGPORT', '5432')
        self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
        # initially set the mirror data dir same to primary's
        self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
        self.gpinitsystem = True
        self.number_of_segments = self.config.get_countprimarysegments()
        self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
        self.standby_enabled = False
        self.number_of_parallelism = 4
        self.fs_location = []

        super(GPAddmirrorsTestCase, self).__init__(methodName)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:25,代码来源:test_gpaddmirrors.py


示例20: test_recovery_with_new_loc

    def test_recovery_with_new_loc(self):
        """
        [feature]: Performs recovery by creating a configuration file with new segment locations 
        
        """

        newfault = Fault()
        config = GPDBConfig()
        hosts = newfault.get_segment_host()
        newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')
        if not newfault.run_recovery_with_config(filename='recovery_new.conf'):
            self.fail("*** Incremental recovery with config file recovery_new.conf failed")
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
开发者ID:50wu,项目名称:gpdb,代码行数:16,代码来源:fault.py



注:本文中的mpp.lib.config.GPDBConfig类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gpfilespace.Gpfilespace类代码示例发布时间:2022-05-27
下一篇:
Python PSQL.PSQL类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap